diff --git a/pyproject.toml b/pyproject.toml
index c719a8b8..6c6632d6 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ name = "elevenlabs"
[tool.poetry]
name = "elevenlabs"
-version = "1.51.0"
+version = "1.52.0"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index aac13c8d..1b716338 100644
--- a/reference.md
+++ b/reference.md
@@ -1,23 +1,8 @@
# Reference
-## History
-client.history.get_all(...)
-
--
-
-#### 📝 Description
-
-
--
-
+
client.claim_a_task_v_1_speech_to_text_reviews_tasks_task_id_claim_post(...)
-
-Returns metadata about all your generated audio.
-
-
-
-
-
#### 🔌 Usage
@@ -32,7 +17,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.history.get_all()
+client.claim_a_task_v_1_speech_to_text_reviews_tasks_task_id_claim_post(
+ task_id="task_id",
+)
```
@@ -48,7 +35,7 @@ client.history.get_all()
-
-**page_size:** `typing.Optional[int]` — How many history items to return at maximum. Can not exceed 1000, defaults to 100.
+**task_id:** `str` — The ID task to claim.
@@ -56,23 +43,56 @@ client.history.get_all()
-
-**start_after_history_item_id:** `typing.Optional[str]` — After which ID to start fetching, use this parameter to paginate across a large collection of history items. In case this parameter is not provided history items will be fetched starting from the most recently created one ordered descending by their creation date.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+client.submit_a_completed_task_v_1_speech_to_text_reviews_producers_user_id_tasks_task_id_submit_put(...)
-
-**voice_id:** `typing.Optional[str]` — Voice ID to be filtered for, you can use GET https://api.elevenlabs.io/v1/voices to receive a list of voices and their IDs.
-
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.submit_a_completed_task_v_1_speech_to_text_reviews_producers_user_id_tasks_task_id_submit_put(
+ user_id="user_id",
+ task_id="task_id",
+)
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**search:** `typing.Optional[str]` — search term used for filtering
+
+-
+
+**user_id:** `str`
@@ -80,7 +100,7 @@ client.history.get_all()
-
-**source:** `typing.Optional[HistoryGetAllRequestSource]` — Source of the generated history item
+**task_id:** `str` — The ID task review to claim.
@@ -100,24 +120,10 @@ client.history.get_all()
-client.history.get(...)
-
--
-
-#### 📝 Description
-
-
--
-
+
client.list_unclaimed_reviews_v_1_speech_to_text_reviews_tasks_post(...)
-
-Returns information about an history item by its ID.
-
-
-
-
-
#### 🔌 Usage
@@ -127,13 +133,20 @@ Returns information about an history item by its ID.
-
```python
-from elevenlabs import ElevenLabs
+from elevenlabs import ElevenLabs, TagModel
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.history.get(
- history_item_id="HISTORY_ITEM_ID",
+client.list_unclaimed_reviews_v_1_speech_to_text_reviews_tasks_post(
+ tags=[
+ [
+ TagModel(
+ kind="lang",
+ value="value",
+ )
+ ]
+ ],
)
```
@@ -150,7 +163,7 @@ client.history.get(
-
-**history_item_id:** `str` — History item ID to be used, you can use GET https://api.elevenlabs.io/v1/history to receive a list of history items and their IDs.
+**tags:** `typing.Sequence[typing.Sequence[TagModel]]`
@@ -158,35 +171,53 @@ client.history.get(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**page_size:** `typing.Optional[int]` — The number of tasks to return per page.
-
-
+
+-
+**cursor:** `typing.Optional[str]` — Cursor for pagination, using the cursor from the previous page.
+
-
-client.history.delete(...)
-
-#### 📝 Description
+**unclaimed_only:** `typing.Optional[bool]`
+
+
+
-
+**include_instances:** `typing.Optional[bool]`
+
+
+
+
-
-Delete a history item by its ID
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+client.list_tasks_instances_for_a_user_v_1_speech_to_text_reviews_producers_user_id_tasks_get(...)
+
+-
#### 🔌 Usage
@@ -202,8 +233,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.history.delete(
- history_item_id="HISTORY_ITEM_ID",
+client.list_tasks_instances_for_a_user_v_1_speech_to_text_reviews_producers_user_id_tasks_get(
+ user_id="user_id",
)
```
@@ -220,7 +251,7 @@ client.history.delete(
-
-**history_item_id:** `str` — History item ID to be used, you can use GET https://api.elevenlabs.io/v1/history to receive a list of history items and their IDs.
+**user_id:** `str`
@@ -228,35 +259,37 @@ client.history.delete(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**page_size:** `typing.Optional[int]` — The number of tasks to return per page.
-
-
-
-
-
-
-
-client.history.get_audio(...)
-
-#### 📝 Description
-
-
--
+**cursor:** `typing.Optional[str]` — Cursor for pagination, using the cursor from the previous page.
+
+
+
-
-Returns the audio of an history item.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+client.compute_a_quote_for_a_asr_transcription_review_task_v_1_speech_to_text_reviews_get_quote_post(...)
+
+-
#### 🔌 Usage
@@ -267,13 +300,18 @@ Returns the audio of an history item.
-
```python
-from elevenlabs import ElevenLabs
+from elevenlabs import ElevenLabs, QuoteRequestModel
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.history.get_audio(
- history_item_id="HISTORY_ITEM_ID",
+client.compute_a_quote_for_a_asr_transcription_review_task_v_1_speech_to_text_reviews_get_quote_post(
+ request=QuoteRequestModel(
+ content_hash="content_hash",
+ duration_s=1.1,
+ speaker_count=1,
+ language="language",
+ ),
)
```
@@ -290,7 +328,7 @@ client.history.get_audio(
-
-**history_item_id:** `str` — History item ID to be used, you can use GET https://api.elevenlabs.io/v1/history to receive a list of history items and their IDs.
+**request:** `QuoteRequestModel`
@@ -298,7 +336,7 @@ client.history.get_audio(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -310,7 +348,8 @@ client.history.get_audio(
-client.history.download(...)
+## History
+client.history.get_all(...)
-
@@ -322,7 +361,7 @@ client.history.get_audio(
-
-Download one or more history items. If one history item ID is provided, we will return a single audio file. If more than one history item IDs are provided, we will provide the history items packed into a .zip file.
+Returns metadata about all your generated audio.
@@ -342,9 +381,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.history.download(
- history_item_ids=["HISTORY_ITEM_ID"],
-)
+client.history.get_all()
```
@@ -360,7 +397,7 @@ client.history.download(
-
-**history_item_ids:** `typing.Sequence[str]` — A list of history items to download, you can get IDs of history items and other metadata using the GET https://api.elevenlabs.io/v1/history endpoint.
+**page_size:** `typing.Optional[int]` — How many history items to return at maximum. Can not exceed 1000, defaults to 100.
@@ -368,7 +405,31 @@ client.history.download(
-
-**output_format:** `typing.Optional[str]` — Output format to transcode the audio file, can be wav or default.
+**start_after_history_item_id:** `typing.Optional[str]` — After which ID to start fetching, use this parameter to paginate across a large collection of history items. In case this parameter is not provided history items will be fetched starting from the most recently created one ordered descending by their creation date.
+
+
+
+
+
+-
+
+**voice_id:** `typing.Optional[str]` — Voice ID to be filtered for, you can use GET https://api.elevenlabs.io/v1/voices to receive a list of voices and their IDs.
+
+
+
+
+
+-
+
+**search:** `typing.Optional[str]` — search term used for filtering
+
+
+
+
+
+-
+
+**source:** `typing.Optional[HistoryGetAllRequestSource]` — Source of the generated history item
@@ -388,8 +449,7 @@ client.history.download(
-## TextToSoundEffects
-client.text_to_sound_effects.convert(...)
+client.history.get(...)
-
@@ -401,7 +461,7 @@ client.history.download(
-
-Converts a text of your choice into sound
+Returns information about an history item by its ID.
@@ -421,8 +481,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.text_to_sound_effects.convert(
- text="Spacious braam suitable for high-impact movie trailer moments",
+client.history.get(
+ history_item_id="HISTORY_ITEM_ID",
)
```
@@ -439,23 +499,7 @@ client.text_to_sound_effects.convert(
-
-**text:** `str` — The text that will get converted into a sound effect.
-
-
-
-
-
--
-
-**duration_seconds:** `typing.Optional[float]` — The duration of the sound which will be generated in seconds. Must be at least 0.5 and at most 22. If set to None we will guess the optimal duration using the prompt. Defaults to None.
-
-
-
-
-
--
-
-**prompt_influence:** `typing.Optional[float]` — A higher prompt influence makes your generation follow the prompt more closely while also making generations less variable. Must be a value between 0 and 1. Defaults to 0.3.
+**history_item_id:** `str` — History item ID to be used, you can use GET https://api.elevenlabs.io/v1/history to receive a list of history items and their IDs.
@@ -463,7 +507,7 @@ client.text_to_sound_effects.convert(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -475,9 +519,7 @@ client.text_to_sound_effects.convert(
-## AudioIsolation
-## samples
-client.samples.delete(...)
+client.history.delete(...)
-
@@ -489,7 +531,7 @@ client.text_to_sound_effects.convert(
-
-Removes a sample by its ID.
+Delete a history item by its ID
@@ -509,9 +551,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.samples.delete(
- voice_id="VOICE_ID",
- sample_id="SAMPLE_ID",
+client.history.delete(
+ history_item_id="HISTORY_ITEM_ID",
)
```
@@ -528,15 +569,7 @@ client.samples.delete(
-
-**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
-
-
-
-
-
--
-
-**sample_id:** `str` — Sample ID to be used, you can use GET https://api.elevenlabs.io/v1/voices/{voice_id} to list all the available samples for a voice.
+**history_item_id:** `str` — History item ID to be used, you can use GET https://api.elevenlabs.io/v1/history to receive a list of history items and their IDs.
@@ -556,7 +589,7 @@ client.samples.delete(
-client.samples.get_audio(...)
+client.history.get_audio(...)
-
@@ -568,7 +601,7 @@ client.samples.delete(
-
-Returns the audio corresponding to a sample attached to a voice.
+Returns the audio of an history item.
@@ -588,9 +621,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.samples.get_audio(
- voice_id="VOICE_ID",
- sample_id="SAMPLE_ID",
+client.history.get_audio(
+ history_item_id="HISTORY_ITEM_ID",
)
```
@@ -607,15 +639,7 @@ client.samples.get_audio(
-
-**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
-
-
-
-
-
--
-
-**sample_id:** `str` — Sample ID to be used, you can use GET https://api.elevenlabs.io/v1/voices/{voice_id} to list all the available samples for a voice.
+**history_item_id:** `str` — History item ID to be used, you can use GET https://api.elevenlabs.io/v1/history to receive a list of history items and their IDs.
@@ -635,8 +659,7 @@ client.samples.get_audio(
-## TextToSpeech
-client.text_to_speech.convert(...)
+client.history.download(...)
-
@@ -648,7 +671,7 @@ client.samples.get_audio(
-
-Converts text into speech using a voice of your choice and returns audio.
+Download one or more history items. If one history item ID is provided, we will return a single audio file. If more than one history item IDs are provided, we will provide the history items packed into a .zip file.
@@ -668,11 +691,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.text_to_speech.convert(
- voice_id="JBFqnCBsd6RMkjVDRZzb",
- output_format="mp3_44100_128",
- text="The first move is what sets everything in motion.",
- model_id="eleven_multilingual_v2",
+client.history.download(
+ history_item_ids=["HISTORY_ITEM_ID"],
)
```
@@ -689,7 +709,7 @@ client.text_to_speech.convert(
-
-**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
+**history_item_ids:** `typing.Sequence[str]` — A list of history items to download, you can get IDs of history items and other metadata using the GET https://api.elevenlabs.io/v1/history endpoint.
@@ -697,7 +717,7 @@ client.text_to_speech.convert(
-
-**text:** `str` — The text that will get converted into speech.
+**output_format:** `typing.Optional[str]` — Output format to transcode the audio file, can be wav or default.
@@ -705,40 +725,70 @@ client.text_to_speech.convert(
-
-**enable_logging:** `typing.Optional[bool]` — When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+## TextToSoundEffects
+client.text_to_sound_effects.convert(...)
-
-**optimize_streaming_latency:** `typing.Optional[int]`
+#### 📝 Description
-You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
-0 - default mode (no latency optimizations)
-1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
-2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
-3 - max latency optimizations
-4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).
+
+-
-Defaults to None.
-
+
+-
+
+Converts a text of your choice into sound
+
+
+#### 🔌 Usage
+
-
-**output_format:** `typing.Optional[OutputFormat]` — The output format of the generated audio.
-
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.text_to_sound_effects.convert(
+ text="Spacious braam suitable for high-impact movie trailer moments",
+)
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**model_id:** `typing.Optional[str]` — Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.
+
+-
+
+**text:** `str` — The text that will get converted into a sound effect.
@@ -746,7 +796,7 @@ Defaults to None.
-
-**language_code:** `typing.Optional[str]` — Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided.
+**duration_seconds:** `typing.Optional[float]` — The duration of the sound which will be generated in seconds. Must be at least 0.5 and at most 22. If set to None we will guess the optimal duration using the prompt. Defaults to None.
@@ -754,7 +804,7 @@ Defaults to None.
-
-**voice_settings:** `typing.Optional[VoiceSettings]` — Voice settings overriding stored setttings for the given voice. They are applied only on the given request.
+**prompt_influence:** `typing.Optional[float]` — A higher prompt influence makes your generation follow the prompt more closely while also making generations less variable. Must be a value between 0 and 1. Defaults to 0.3.
@@ -762,31 +812,72 @@ Defaults to None.
-
-**pronunciation_dictionary_locators:** `typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]` — A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
+
+
+
+
+
+
+
+## AudioIsolation
+## samples
+client.samples.delete(...)
-
-**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
-
+#### 📝 Description
+
+
+-
+
+
+-
+
+Removes a sample by its ID.
+
+
+#### 🔌 Usage
+
-
-**previous_text:** `typing.Optional[str]` — The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
-
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.samples.delete(
+ voice_id="VOICE_ID",
+ sample_id="SAMPLE_ID",
+)
+
+```
+
+
+#### ⚙️ Parameters
+
+
+-
+
-
-**next_text:** `typing.Optional[str]` — The text that comes after the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
@@ -794,7 +885,7 @@ Defaults to None.
-
-**previous_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
+**sample_id:** `str` — Sample ID to be used, you can use GET https://api.elevenlabs.io/v1/voices/{voice_id} to list all the available samples for a voice.
@@ -802,15 +893,70 @@ Defaults to None.
-
-**next_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+client.samples.get_audio(...)
-
-**use_pvc_as_ivc:** `typing.Optional[bool]` — If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
+#### 📝 Description
+
+
+-
+
+
+-
+
+Returns the audio corresponding to a sample attached to a voice.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.samples.get_audio(
+ voice_id="VOICE_ID",
+ sample_id="SAMPLE_ID",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
@@ -818,7 +964,7 @@ Defaults to None.
-
-**apply_text_normalization:** `typing.Optional[BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
+**sample_id:** `str` — Sample ID to be used, you can use GET https://api.elevenlabs.io/v1/voices/{voice_id} to list all the available samples for a voice.
@@ -838,7 +984,8 @@ Defaults to None.
-client.text_to_speech.convert_with_timestamps(...)
+## TextToSpeech
+client.text_to_speech.convert(...)
-
@@ -850,7 +997,7 @@ Defaults to None.
-
-Converts text into speech using a voice of your choice and returns JSON containing audio as a base64 encoded string together with information on when which character was spoken.
+Converts text into speech using a voice of your choice and returns audio.
@@ -870,7 +1017,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.text_to_speech.convert_with_timestamps(
+client.text_to_speech.convert(
voice_id="JBFqnCBsd6RMkjVDRZzb",
output_format="mp3_44100_128",
text="The first move is what sets everything in motion.",
@@ -980,7 +1127,7 @@ Defaults to None.
-
-**previous_text:** `typing.Optional[str]` — The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+**previous_text:** `typing.Optional[str]` — The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
@@ -988,7 +1135,7 @@ Defaults to None.
-
-**next_text:** `typing.Optional[str]` — The text that comes after the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+**next_text:** `typing.Optional[str]` — The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
@@ -996,7 +1143,7 @@ Defaults to None.
-
-**previous_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
+**previous_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
@@ -1004,7 +1151,7 @@ Defaults to None.
-
-**next_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+**next_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
@@ -1020,9 +1167,7 @@ Defaults to None.
-
-**apply_text_normalization:** `typing.Optional[
- BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization
-]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
+**apply_text_normalization:** `typing.Optional[BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
@@ -1030,7 +1175,7 @@ Defaults to None.
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
@@ -1042,7 +1187,7 @@ Defaults to None.
-client.text_to_speech.convert_as_stream(...)
+client.text_to_speech.convert_with_timestamps(...)
-
@@ -1054,7 +1199,7 @@ Defaults to None.
-
-Converts text into speech using a voice of your choice and returns audio as an audio stream.
+Generate speech from text with precise character-level timing information for audio-text synchronization.
@@ -1074,11 +1219,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.text_to_speech.convert_as_stream(
- voice_id="JBFqnCBsd6RMkjVDRZzb",
- output_format="mp3_44100_128",
- text="The first move is what sets everything in motion.",
- model_id="eleven_multilingual_v2",
+client.text_to_speech.convert_with_timestamps(
+ voice_id="21m00Tcm4TlvDq8ikWAM",
+ text="This is a test for the API of ElevenLabs.",
)
```
@@ -1184,7 +1327,7 @@ Defaults to None.
-
-**previous_text:** `typing.Optional[str]` — The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+**previous_text:** `typing.Optional[str]` — The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
@@ -1192,7 +1335,7 @@ Defaults to None.
-
-**next_text:** `typing.Optional[str]` — The text that comes after the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+**next_text:** `typing.Optional[str]` — The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
@@ -1200,7 +1343,7 @@ Defaults to None.
-
-**previous_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
+**previous_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
@@ -1208,7 +1351,7 @@ Defaults to None.
-
-**next_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+**next_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
@@ -1225,7 +1368,7 @@ Defaults to None.
-
**apply_text_normalization:** `typing.Optional[
- BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization
+ BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization
]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
@@ -1234,7 +1377,7 @@ Defaults to None.
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -1246,7 +1389,7 @@ Defaults to None.
-client.text_to_speech.stream_with_timestamps(...)
+client.text_to_speech.convert_as_stream(...)
-
@@ -1258,7 +1401,7 @@ Defaults to None.
-
-Converts text into speech using a voice of your choice and returns a stream of JSONs containing audio as a base64 encoded string together with information on when which character was spoken.
+Converts text into speech using a voice of your choice and returns audio as an audio stream.
@@ -1278,14 +1421,12 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-response = client.text_to_speech.stream_with_timestamps(
+client.text_to_speech.convert_as_stream(
voice_id="JBFqnCBsd6RMkjVDRZzb",
output_format="mp3_44100_128",
text="The first move is what sets everything in motion.",
model_id="eleven_multilingual_v2",
)
-for chunk in response:
- yield chunk
```
@@ -1390,7 +1531,7 @@ Defaults to None.
-
-**previous_text:** `typing.Optional[str]` — The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+**previous_text:** `typing.Optional[str]` — The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
@@ -1398,7 +1539,7 @@ Defaults to None.
-
-**next_text:** `typing.Optional[str]` — The text that comes after the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+**next_text:** `typing.Optional[str]` — The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
@@ -1406,7 +1547,7 @@ Defaults to None.
-
-**previous_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
+**previous_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
@@ -1414,7 +1555,7 @@ Defaults to None.
-
-**next_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+**next_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
@@ -1431,7 +1572,7 @@ Defaults to None.
-
**apply_text_normalization:** `typing.Optional[
- BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization
+ BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization
]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
@@ -1440,7 +1581,7 @@ Defaults to None.
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
@@ -1452,8 +1593,7 @@ Defaults to None.
-## SpeechToSpeech
-client.speech_to_speech.convert(...)
+client.text_to_speech.stream_with_timestamps(...)
-
@@ -1465,7 +1605,7 @@ Defaults to None.
-
-Create speech by combining the content and emotion of the uploaded audio with a voice of your choice.
+Converts text into speech using a voice of your choice and returns a stream of JSONs containing audio as a base64 encoded string together with information on when which character was spoken.
@@ -1485,11 +1625,14 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.speech_to_speech.convert(
+response = client.text_to_speech.stream_with_timestamps(
voice_id="JBFqnCBsd6RMkjVDRZzb",
output_format="mp3_44100_128",
- model_id="eleven_multilingual_sts_v2",
+ text="The first move is what sets everything in motion.",
+ model_id="eleven_multilingual_v2",
)
+for chunk in response:
+ yield chunk
```
@@ -1513,9 +1656,7 @@ client.speech_to_speech.convert(
-
-**audio:** `from __future__ import annotations
-
-core.File` — See core.File for more documentation
+**text:** `str` — The text that will get converted into speech.
@@ -1556,7 +1697,7 @@ Defaults to None.
-
-**model_id:** `typing.Optional[str]` — Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property.
+**model_id:** `typing.Optional[str]` — Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.
@@ -1564,7 +1705,7 @@ Defaults to None.
-
-**voice_settings:** `typing.Optional[str]` — Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string.
+**language_code:** `typing.Optional[str]` — Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided.
@@ -1572,7 +1713,7 @@ Defaults to None.
-
-**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
+**voice_settings:** `typing.Optional[VoiceSettings]` — Voice settings overriding stored setttings for the given voice. They are applied only on the given request.
@@ -1580,7 +1721,7 @@ Defaults to None.
-
-**remove_background_noise:** `typing.Optional[bool]` — If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.
+**pronunciation_dictionary_locators:** `typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]` — A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request
@@ -1588,31 +1729,237 @@ Defaults to None.
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
+**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
-
-
+
+-
+**previous_text:** `typing.Optional[str]` — The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
+
-
-client.speech_to_speech.convert_as_stream(...)
-
-#### 📝 Description
+**next_text:** `typing.Optional[str]` — The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
+
+
+
-
+**previous_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
+
+
+
+
-
-Create speech by combining the content and emotion of the uploaded audio with a voice of your choice and returns an audio stream.
+**next_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+
+
+
+
+
+-
+
+**use_pvc_as_ivc:** `typing.Optional[bool]` — If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
+
+
+
+
+
+-
+
+**apply_text_normalization:** `typing.Optional[
+ BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization
+]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## SpeechToSpeech
+client.speech_to_speech.convert(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Transform audio from one voice to another. Maintain full control over emotion, timing and delivery.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.speech_to_speech.convert(
+ voice_id="JBFqnCBsd6RMkjVDRZzb",
+ output_format="mp3_44100_128",
+ model_id="eleven_multilingual_sts_v2",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
+
+
+
+
+
+-
+
+**audio:** `from __future__ import annotations
+
+core.File` — See core.File for more documentation
+
+
+
+
+
+-
+
+**enable_logging:** `typing.Optional[bool]` — When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers.
+
+
+
+
+
+-
+
+**optimize_streaming_latency:** `typing.Optional[int]`
+
+You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
+0 - default mode (no latency optimizations)
+1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
+2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
+3 - max latency optimizations
+4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).
+
+Defaults to None.
+
+
+
+
+
+-
+
+**output_format:** `typing.Optional[OutputFormat]` — The output format of the generated audio.
+
+
+
+
+
+-
+
+**model_id:** `typing.Optional[str]` — Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property.
+
+
+
+
+
+-
+
+**voice_settings:** `typing.Optional[str]` — Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string.
+
+
+
+
+
+-
+
+**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
+
+
+
+
+
+-
+
+**remove_background_noise:** `typing.Optional[bool]` — If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
+
+
+
+
+
+
+
+
+
+
+
+client.speech_to_speech.convert_as_stream(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Stream audio from one voice to another. Maintain full control over emotion, timing and delivery.
@@ -3219,6 +3566,14 @@ client.voices.get_shared(
-
+**min_notice_period_days:** `typing.Optional[int]` — Filter voices with a minimum notice period of the given number of days.
+
+
+
+
+
+-
+
**reader_app_enabled:** `typing.Optional[bool]` — Filter voices that are enabled for the reader app
@@ -3460,7 +3815,7 @@ client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.studio.create_podcast(
- model_id="model_id",
+ model_id="21m00Tcm4TlvDq8ikWAM",
mode=BodyCreatePodcastV1StudioPodcastsPostMode_Conversation(
conversation=PodcastConversationModeData(
host_voice_id="host_voice_id",
@@ -3616,7 +3971,7 @@ client = ElevenLabs(
api_key="YOUR_API_KEY",
)
client.projects.create_podcast(
- model_id="model_id",
+ model_id="21m00Tcm4TlvDq8ikWAM",
mode=BodyCreatePodcastV1ProjectsPodcastCreatePostMode_Conversation(
conversation=PodcastConversationModeData(
host_voice_id="host_voice_id",
@@ -4021,14 +4376,6 @@ ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate
-
-**quality_check_on:** `typing.Optional[bool]` — [Depracated] Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
-
-
-
-
-
--
-
**apply_text_normalization:** `typing.Optional[AddProjectV1ProjectsAddPostRequestApplyTextNormalization]`
@@ -4176,9 +4523,9 @@ client = ElevenLabs(
)
client.projects.edit_basic_project_info(
project_id="21m00Tcm4TlvDq8ikWAM",
- name="name",
- default_title_voice_id="default_title_voice_id",
- default_paragraph_voice_id="default_paragraph_voice_id",
+ name="Project 1",
+ default_title_voice_id="21m00Tcm4TlvDq8ikWAM",
+ default_paragraph_voice_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -4259,14 +4606,6 @@ client.projects.edit_basic_project_info(
-
-**quality_check_on:** `typing.Optional[bool]` — [Depracated] Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -5021,7 +5360,7 @@ client = ElevenLabs(
)
client.projects.add_chapter_to_a_project(
project_id="21m00Tcm4TlvDq8ikWAM",
- name="name",
+ name="Chapter 1",
)
```
@@ -5400,6 +5739,14 @@ client.projects.update_pronunciation_dictionaries(
-
+**invalidate_affected_text:** `typing.Optional[bool]` — This will automatically mark text in this project for reconversion when the new dictionary applies or the old one no longer does.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -6022,7 +6369,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-client.audio_native.update_content(...)
+client.audio_native.get_settings(...)
-
@@ -6034,7 +6381,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-Updates content for the specific AudioNative Project.
+Get player settings for the specific project.
@@ -6054,7 +6401,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.audio_native.update_content(
+client.audio_native.get_settings(
project_id="21m00Tcm4TlvDq8ikWAM",
)
@@ -6080,34 +6427,104 @@ client.audio_native.update_content(
-
-**file:** `from __future__ import annotations
-
-typing.Optional[core.File]` — See core.File for more documentation
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**auto_convert:** `typing.Optional[bool]` — Whether to auto convert the project to audio or not.
-
+
+client.audio_native.update_content(...)
-
-**auto_publish:** `typing.Optional[bool]` — Whether to auto publish the new project snapshot after it's converted.
-
-
-
+#### 📝 Description
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
+-
+
+Updates content for the specific AudioNative Project.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.audio_native.update_content(
+ project_id="21m00Tcm4TlvDq8ikWAM",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**project_id:** `str` — The ID of the Studio project.
+
+
+
+
+
+-
+
+**file:** `from __future__ import annotations
+
+typing.Optional[core.File]` — See core.File for more documentation
+
+
+
+
+
+-
+
+**auto_convert:** `typing.Optional[bool]` — Whether to auto convert the project to audio or not.
+
+
+
+
+
+-
+
+**auto_publish:** `typing.Optional[bool]` — Whether to auto publish the new project snapshot after it's converted.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
@@ -6414,7 +6831,384 @@ List of pronunciation rules. Rule can be either:
-
-Remove rules from the pronunciation dictionary
+Remove rules from the pronunciation dictionary
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.pronunciation_dictionary.remove_rules(
+ pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM",
+ rule_strings=["rule_strings"],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary
+
+
+
+
+
+-
+
+**rule_strings:** `typing.Sequence[str]` — List of strings to remove from the pronunciation dictionary.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.pronunciation_dictionary.download(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get PLS file with a pronunciation dictionary version rules
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.pronunciation_dictionary.download(
+ dictionary_id="Fm6AvNgS53NXe6Kqxp3e",
+ version_id="KZFyRUq3R6kaqhKI146w",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**dictionary_id:** `str` — The id of the pronunciation dictionary
+
+
+
+
+
+-
+
+**version_id:** `str` — The id of the version of the pronunciation dictionary
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.pronunciation_dictionary.get(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get metadata for a pronunciation dictionary
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.pronunciation_dictionary.get(
+ pronunciation_dictionary_id="Fm6AvNgS53NXe6Kqxp3e",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.pronunciation_dictionary.get_all(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a list of the pronunciation dictionaries you have access to and their metadata
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.pronunciation_dictionary.get_all(
+ page_size=1,
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
+
+
+
+
+
+-
+
+**page_size:** `typing.Optional[int]` — How many pronunciation dictionaries to return at maximum. Can not exceed 100, defaults to 30.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## Workspace
+client.workspace.search_user_groups(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Searches for user groups in the workspace. Multiple or no groups may be returned.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from elevenlabs import ElevenLabs
+
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.workspace.search_user_groups(
+ name="name",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**name:** `str` — Name of the group to find.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.workspace.delete_member_from_user_group(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Removes a member from the specified group. This endpoint may only be called by workspace administrators.
@@ -6434,9 +7228,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.pronunciation_dictionary.remove_rules(
- pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM",
- rule_strings=["rule_strings"],
+client.workspace.delete_member_from_user_group(
+ group_id="group_id",
+ email="email",
)
```
@@ -6453,7 +7247,7 @@ client.pronunciation_dictionary.remove_rules(
-
-**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary
+**group_id:** `str` — The ID of the target group.
@@ -6461,7 +7255,7 @@ client.pronunciation_dictionary.remove_rules(
-
-**rule_strings:** `typing.Sequence[str]` — List of strings to remove from the pronunciation dictionary.
+**email:** `str` — The email of the target workspace member.
@@ -6481,7 +7275,7 @@ client.pronunciation_dictionary.remove_rules(
-client.pronunciation_dictionary.download(...)
+client.workspace.add_member_to_user_group(...)
-
@@ -6493,7 +7287,7 @@ client.pronunciation_dictionary.remove_rules(
-
-Get PLS file with a pronunciation dictionary version rules
+Adds a member of your workspace to the specified group. This endpoint may only be called by workspace administrators.
@@ -6513,9 +7307,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.pronunciation_dictionary.download(
- dictionary_id="Fm6AvNgS53NXe6Kqxp3e",
- version_id="KZFyRUq3R6kaqhKI146w",
+client.workspace.add_member_to_user_group(
+ group_id="group_id",
+ email="email",
)
```
@@ -6532,7 +7326,7 @@ client.pronunciation_dictionary.download(
-
-**dictionary_id:** `str` — The id of the pronunciation dictionary
+**group_id:** `str` — The ID of the target group.
@@ -6540,7 +7334,7 @@ client.pronunciation_dictionary.download(
-
-**version_id:** `str` — The id of the version of the pronunciation dictionary
+**email:** `str` — The email of the target workspace member.
@@ -6560,7 +7354,7 @@ client.pronunciation_dictionary.download(
-client.pronunciation_dictionary.get(...)
+client.workspace.invite_user(...)
-
@@ -6572,7 +7366,7 @@ client.pronunciation_dictionary.download(
-
-Get metadata for a pronunciation dictionary
+Sends an email invitation to join your workspace to the provided email. If the user doesn't have an account they will be prompted to create one. If the user accepts this invite they will be added as a user to your workspace and your subscription using one of your seats. This endpoint may only be called by workspace administrators. If the user is already in the workspace a 400 error will be returned.
@@ -6592,8 +7386,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.pronunciation_dictionary.get(
- pronunciation_dictionary_id="Fm6AvNgS53NXe6Kqxp3e",
+client.workspace.invite_user(
+ email="john.doe@testmail.com",
)
```
@@ -6610,7 +7404,15 @@ client.pronunciation_dictionary.get(
-
-**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary
+**email:** `str` — The email of the customer
+
+
+
+
+
+-
+
+**group_ids:** `typing.Optional[typing.Sequence[str]]` — The group ids of the user
@@ -6630,7 +7432,7 @@ client.pronunciation_dictionary.get(
-client.pronunciation_dictionary.get_all(...)
+client.workspace.invite_multiple_users(...)
-
@@ -6642,7 +7444,7 @@ client.pronunciation_dictionary.get(
-
-Get a list of the pronunciation dictionaries you have access to and their metadata
+Sends email invitations to join your workspace to the provided emails. Requires all email addresses to be part of a verified domain. If the users don't have an account they will be prompted to create one. If the users accept these invites they will be added as users to your workspace and your subscription using one of your seats. This endpoint may only be called by workspace administrators.
@@ -6662,8 +7464,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.pronunciation_dictionary.get_all(
- page_size=1,
+client.workspace.invite_multiple_users(
+ emails=["emails"],
)
```
@@ -6680,7 +7482,7 @@ client.pronunciation_dictionary.get_all(
-
-**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
+**emails:** `typing.Sequence[str]` — The email of the customer
@@ -6688,7 +7490,7 @@ client.pronunciation_dictionary.get_all(
-
-**page_size:** `typing.Optional[int]` — How many pronunciation dictionaries to return at maximum. Can not exceed 100, defaults to 30.
+**group_ids:** `typing.Optional[typing.Sequence[str]]` — The group ids of the user
@@ -6708,8 +7510,7 @@ client.pronunciation_dictionary.get_all(
-## Workspace
-client.workspace.search_user_groups(...)
+client.workspace.delete_existing_invitation(...)
-
@@ -6721,7 +7522,7 @@ client.pronunciation_dictionary.get_all(
-
-Searches for user groups in the workspace. Multiple or no groups may be returned.
+Invalidates an existing email invitation. The invitation will still show up in the inbox it has been delivered to, but activating it to join the workspace won't work. This endpoint may only be called by workspace administrators.
@@ -6741,8 +7542,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.workspace.search_user_groups(
- name="name",
+client.workspace.delete_existing_invitation(
+ email="john.doe@testmail.com",
)
```
@@ -6759,7 +7560,7 @@ client.workspace.search_user_groups(
-
-**name:** `str` — Name of the group to find.
+**email:** `str` — The email of the customer
@@ -6779,7 +7580,7 @@ client.workspace.search_user_groups(
-client.workspace.delete_member_from_user_group(...)
+client.workspace.update_member(...)
-
@@ -6791,7 +7592,7 @@ client.workspace.search_user_groups(
-
-Removes a member from the specified group. This endpoint may only be called by workspace administrators.
+Updates attributes of a workspace member. Apart from the email identifier, all parameters will remain unchanged unless specified. This endpoint may only be called by workspace administrators.
@@ -6811,8 +7612,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.workspace.delete_member_from_user_group(
- group_id="group_id",
+client.workspace.update_member(
email="email",
)
@@ -6830,7 +7630,7 @@ client.workspace.delete_member_from_user_group(
-
-**group_id:** `str` — The ID of the target group.
+**email:** `str` — Email of the target user.
@@ -6838,7 +7638,15 @@ client.workspace.delete_member_from_user_group(
-
-**email:** `str` — The email of the target workspace member.
+**is_locked:** `typing.Optional[bool]` — Whether to lock or unlock the user account.
+
+
+
+
+
+-
+
+**workspace_role:** `typing.Optional[BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole]` — Role dictating permissions in the workspace.
@@ -6858,7 +7666,8 @@ client.workspace.delete_member_from_user_group(
-client.workspace.add_member_to_user_group(...)
+## SpeechToText
+client.speech_to_text.convert(...)
-
@@ -6870,7 +7679,7 @@ client.workspace.delete_member_from_user_group(
-
-Adds a member of your workspace to the specified group. This endpoint may only be called by workspace administrators.
+Transcribe an audio or video file.
@@ -6890,9 +7699,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.workspace.add_member_to_user_group(
- group_id="group_id",
- email="email",
+client.speech_to_text.convert(
+ model_id="model_id",
)
```
@@ -6909,7 +7717,7 @@ client.workspace.add_member_to_user_group(
-
-**group_id:** `str` — The ID of the target group.
+**model_id:** `str` — The ID of the model to use for transcription, currently only 'scribe_v1' is available.
@@ -6917,7 +7725,9 @@ client.workspace.add_member_to_user_group(
-
-**email:** `str` — The email of the target workspace member.
+**file:** `from __future__ import annotations
+
+core.File` — See core.File for more documentation
@@ -6925,69 +7735,31 @@ client.workspace.add_member_to_user_group(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**language_code:** `typing.Optional[str]` — An ISO-639-1 or ISO-639-3 language_code corresponding to the language of the audio file. Can sometimes improve transcription performance if known beforehand. Defaults to null, in this case the language is predicted automatically.
-
-
-
-
-
-
-
-
-client.workspace.invite_user(...)
-
--
-
-#### 📝 Description
-
-
--
-
-Sends an email invitation to join your workspace to the provided email. If the user doesn't have an account they will be prompted to create one. If the user accepts this invite they will be added as a user to your workspace and your subscription using one of your seats. This endpoint may only be called by workspace administrators. If the user is already in the workspace a 400 error will be returned.
-
-
+**tag_audio_events:** `typing.Optional[bool]` — Whether to tag audio events like (laughter), (footsteps), etc. in the transcription.
+
-#### 🔌 Usage
-
-
--
-
-
-```python
-from elevenlabs import ElevenLabs
-
-client = ElevenLabs(
- api_key="YOUR_API_KEY",
-)
-client.workspace.invite_user(
- email="email",
-)
-
-```
-
-
+**num_speakers:** `typing.Optional[int]` — The maximum amount of speakers talking in the uploaded file. Can help with predicting who speaks when. The maximum amount of speakers that can be predicted is 32. Defaults to null, in this case the amount of speakers is set to the maximum value the model supports.
+
-#### ⚙️ Parameters
-
-
-
--
-
-**email:** `str` — The email of the customer
+**timestamps_granularity:** `typing.Optional[SpeechToTextConvertRequestTimestampsGranularity]` — The granularity of the timestamps in the transcription. 'word' provides word-level timestamps and 'character' provides character-level timestamps per word.
@@ -6995,7 +7767,7 @@ client.workspace.invite_user(
-
-**group_ids:** `typing.Optional[typing.Sequence[str]]` — The group ids of the user
+**diarize:** `typing.Optional[bool]` — Whether to annotate which speaker is currently talking in the uploaded file. Enabling this will limit the maximum duration of your inputs to 8 minutes.
@@ -7015,7 +7787,8 @@ client.workspace.invite_user(
-client.workspace.invite_multiple_users(...)
+## ConversationalAi
+client.conversational_ai.get_signed_url(...)
-
@@ -7027,7 +7800,7 @@ client.workspace.invite_user(
-
-Sends email invitations to join your workspace to the provided emails. Requires all email addresses to be part of a verified domain. If the users don't have an account they will be prompted to create one. If the users accept these invites they will be added as users to your workspace and your subscription using one of your seats. This endpoint may only be called by workspace administrators.
+Get a signed url to start a conversation with an agent with an agent that requires authorization
@@ -7047,8 +7820,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.workspace.invite_multiple_users(
- emails=["emails"],
+client.conversational_ai.get_signed_url(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -7065,15 +7838,7 @@ client.workspace.invite_multiple_users(
-
-**emails:** `typing.Sequence[str]` — The email of the customer
-
-
-
-
-
--
-
-**group_ids:** `typing.Optional[typing.Sequence[str]]` — The group ids of the user
+**agent_id:** `str` — The id of the agent you're taking the action on.
@@ -7093,7 +7858,7 @@ client.workspace.invite_multiple_users(
-client.workspace.delete_existing_invitation(...)
+client.conversational_ai.create_agent(...)
-
@@ -7105,7 +7870,7 @@ client.workspace.invite_multiple_users(
-
-Invalidates an existing email invitation. The invitation will still show up in the inbox it has been delivered to, but activating it to join the workspace won't work. This endpoint may only be called by workspace administrators.
+Create an agent from a config object
@@ -7120,13 +7885,13 @@ Invalidates an existing email invitation. The invitation will still show up in t
-
```python
-from elevenlabs import ElevenLabs
+from elevenlabs import ConversationalConfig, ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.workspace.delete_existing_invitation(
- email="email",
+client.conversational_ai.create_agent(
+ conversation_config=ConversationalConfig(),
)
```
@@ -7143,7 +7908,31 @@ client.workspace.delete_existing_invitation(
-
-**email:** `str` — The email of the customer
+**conversation_config:** `ConversationalConfig` — Conversation configuration for an agent
+
+
+
+
+
+-
+
+**use_tool_ids:** `typing.Optional[bool]` — Use tool ids instead of tools specs from request payload.
+
+
+
+
+
+-
+
+**platform_settings:** `typing.Optional[AgentPlatformSettingsRequestModel]` — Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — A name to make the agent easier to find
@@ -7163,7 +7952,7 @@ client.workspace.delete_existing_invitation(
-client.workspace.update_member(...)
+client.conversational_ai.get_agent(...)
-
@@ -7175,7 +7964,7 @@ client.workspace.delete_existing_invitation(
-
-Updates attributes of a workspace member. Apart from the email identifier, all parameters will remain unchanged unless specified. This endpoint may only be called by workspace administrators.
+Retrieve config for an agent
@@ -7195,8 +7984,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.workspace.update_member(
- email="email",
+client.conversational_ai.get_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -7213,23 +8002,7 @@ client.workspace.update_member(
-
-**email:** `str` — Email of the target user.
-
-
-
-
-
--
-
-**is_locked:** `typing.Optional[bool]` — Whether to lock or unlock the user account.
-
-
-
-
-
--
-
-**workspace_role:** `typing.Optional[BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole]` — Role dictating permissions in the workspace.
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
@@ -7249,8 +8022,7 @@ client.workspace.update_member(
-## SpeechToText
-client.speech_to_text.convert(...)
+client.conversational_ai.delete_agent(...)
-
@@ -7262,37 +8034,13 @@ client.workspace.update_member(
-
-Transcribe an audio or video file.
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from elevenlabs import ElevenLabs
-
-client = ElevenLabs(
- api_key="YOUR_API_KEY",
-)
-client.speech_to_text.convert(
- model_id="model_id",
-)
-
-```
+Delete an agent
-#### ⚙️ Parameters
+#### 🔌 Usage
-
@@ -7300,41 +8048,31 @@ client.speech_to_text.convert(
-
-**model_id:** `str` — The ID of the model to use for transcription, currently only 'scribe_v1' is available.
-
-
-
-
-
--
+```python
+from elevenlabs import ElevenLabs
-**file:** `from __future__ import annotations
+client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+)
+client.conversational_ai.delete_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
+)
-typing.Optional[core.File]` — See core.File for more documentation
-
+```
-
-
--
-
-**language_code:** `typing.Optional[str]` — An ISO-639-1 or ISO-639-3 language_code corresponding to the language of the audio file. Can sometimes improve transcription performance if known beforehand. Defaults to null, in this case the language is predicted automatically.
-
+#### ⚙️ Parameters
+
-
-**tag_audio_events:** `typing.Optional[bool]` — Whether to tag audio events like (laughter), (footsteps), etc. in the transcription.
-
-
-
-
-
-**num_speakers:** `typing.Optional[int]` — The maximum amount of speakers talking in the uploaded file. Can help with predicting who speaks when. The maximum amount of speakers that can be predicted is 31. Defaults to null, in this case the amount of speakers is set to the maximum value the model supports.
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
@@ -7354,7 +8092,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-client.speech_to_text.convert_as_stream(...)
+client.conversational_ai.update_agent(...)
-
@@ -7366,7 +8104,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-Transcribe an audio or video file with streaming response. Returns chunks of transcription as they become available, with each chunk separated by double newlines (\n\n).
+Patches an Agent settings
@@ -7386,11 +8124,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-response = client.speech_to_text.convert_as_stream(
- model_id="model_id",
+client.conversational_ai.update_agent(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
)
-for chunk in response:
- yield chunk
```
@@ -7406,7 +8142,7 @@ for chunk in response:
-
-**model_id:** `str` — The ID of the model to use for transcription, currently only 'scribe_v1' is available.
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
@@ -7414,9 +8150,15 @@ for chunk in response:
-
-**file:** `from __future__ import annotations
+**use_tool_ids:** `typing.Optional[bool]` — Use tool ids instead of tools specs from request payload.
+
+
+
-typing.Optional[core.File]` — See core.File for more documentation
+
+-
+
+**conversation_config:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Conversation configuration for an agent
@@ -7424,7 +8166,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**language_code:** `typing.Optional[str]` — An ISO-639-1 or ISO-639-3 language_code corresponding to the language of the audio file. Can sometimes improve transcription performance if known beforehand. Defaults to null, in this case the language is predicted automatically.
+**platform_settings:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
@@ -7432,7 +8174,11 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**tag_audio_events:** `typing.Optional[bool]` — Whether to tag audio events like (laughter), (footsteps), etc. in the transcription.
+**secrets:** `typing.Optional[
+ typing.Sequence[
+ BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem
+ ]
+]` — A list of secrets for the agent. Can be used to add new secrets or update and delete the existing ones
@@ -7440,7 +8186,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**num_speakers:** `typing.Optional[int]` — The maximum amount of speakers talking in the uploaded file. Can help with predicting who speaks when. The maximum amount of speakers that can be predicted is 31. Defaults to null, in this case the amount of speakers is set to the maximum value the model supports.
+**name:** `typing.Optional[str]` — A name to make the agent easier to find
@@ -7460,8 +8206,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-## ConversationalAi
-client.conversational_ai.get_signed_url(...)
+client.conversational_ai.get_agent_widget(...)
-
@@ -7473,7 +8218,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-Get a signed url to start a conversation with an agent with an agent that requires authorization
+Retrieve the widget configuration for an agent
@@ -7493,7 +8238,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.get_signed_url(
+client.conversational_ai.get_agent_widget(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
@@ -7511,7 +8256,15 @@ client.conversational_ai.get_signed_url(
-
-**agent_id:** `str` — The id of the agent you're taking the action on.
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
+
+
+
+
+
+-
+
+**conversation_signature:** `typing.Optional[str]` — An expiring token that enables a conversation to start. These can be generated for an agent using the /v1/convai/conversation/get_signed_url endpoint
@@ -7531,7 +8284,7 @@ client.conversational_ai.get_signed_url(
-client.conversational_ai.create_agent(...)
+client.conversational_ai.get_agent_link(...)
-
@@ -7543,7 +8296,7 @@ client.conversational_ai.get_signed_url(
-
-Create an agent from a config object
+Get the current link used to share the agent with others
@@ -7558,13 +8311,13 @@ Create an agent from a config object
-
```python
-from elevenlabs import ConversationalConfig, ElevenLabs
+from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.create_agent(
- conversation_config=ConversationalConfig(),
+client.conversational_ai.get_agent_link(
+ agent_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -7581,31 +8334,7 @@ client.conversational_ai.create_agent(
-
-**conversation_config:** `ConversationalConfig` — Conversation configuration for an agent
-
-
-
-
-
--
-
-**use_tool_ids:** `typing.Optional[bool]` — Use tool ids instead of tools specs from request payload.
-
-
-
-
-
--
-
-**platform_settings:** `typing.Optional[AgentPlatformSettings]` — Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
-
-
-
-
-
--
-
-**name:** `typing.Optional[str]` — A name to make the agent easier to find
+**agent_id:** `str` — The id of an agent. This is returned on agent creation.
@@ -7625,7 +8354,7 @@ client.conversational_ai.create_agent(
-client.conversational_ai.get_agent(...)
+client.conversational_ai.post_agent_avatar(...)
-
@@ -7637,7 +8366,7 @@ client.conversational_ai.create_agent(
-
-Retrieve config for an agent
+Sets the avatar for an agent displayed in the widget
@@ -7657,7 +8386,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.get_agent(
+client.conversational_ai.post_agent_avatar(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
@@ -7683,6 +8412,16 @@ client.conversational_ai.get_agent(
-
+**avatar_file:** `from __future__ import annotations
+
+core.File` — See core.File for more documentation
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -7695,7 +8434,7 @@ client.conversational_ai.get_agent(
-client.conversational_ai.delete_agent(...)
+client.conversational_ai.add_agent_secret(...)
-
@@ -7707,7 +8446,7 @@ client.conversational_ai.get_agent(
-
-Delete an agent
+Uploads a file or reference a webpage for the agent to use as part of it's knowledge base
@@ -7727,8 +8466,10 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.delete_agent(
+client.conversational_ai.add_agent_secret(
agent_id="21m00Tcm4TlvDq8ikWAM",
+ name="MY API KEY",
+ secret_value="sk_api_12354abc",
)
```
@@ -7753,6 +8494,22 @@ client.conversational_ai.delete_agent(
-
+**name:** `str` — A name to help identify a particular agent secret
+
+
+
+
+
+-
+
+**secret_value:** `str` — A value to be encrypted and used by the agent
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -7765,7 +8522,7 @@ client.conversational_ai.delete_agent(
-client.conversational_ai.update_agent(...)
+client.conversational_ai.get_agents(...)
-
@@ -7777,7 +8534,7 @@ client.conversational_ai.delete_agent(
-
-Patches an Agent settings
+Returns a page of your agents and their metadata.
@@ -7797,9 +8554,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.update_agent(
- agent_id="21m00Tcm4TlvDq8ikWAM",
-)
+client.conversational_ai.get_agents()
```
@@ -7815,31 +8570,7 @@ client.conversational_ai.update_agent(
-
-**agent_id:** `str` — The id of an agent. This is returned on agent creation.
-
-
-
-
-
--
-
-**use_tool_ids:** `typing.Optional[bool]` — Use tool ids instead of tools specs from request payload.
-
-
-
-
-
--
-
-**conversation_config:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Conversation configuration for an agent
-
-
-
-
-
--
-
-**platform_settings:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
+**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
@@ -7847,11 +8578,7 @@ client.conversational_ai.update_agent(
-
-**secrets:** `typing.Optional[
- typing.Sequence[
- BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem
- ]
-]` — A list of secrets for the agent. Can be used to add new secrets or update and delete the existing ones
+**page_size:** `typing.Optional[int]` — How many Agents to return at maximum. Can not exceed 100, defaults to 30.
@@ -7859,7 +8586,7 @@ client.conversational_ai.update_agent(
-
-**name:** `typing.Optional[str]` — A name to make the agent easier to find
+**search:** `typing.Optional[str]` — Search by agents name.
@@ -7879,7 +8606,7 @@ client.conversational_ai.update_agent(
-client.conversational_ai.get_agent_widget(...)
+client.conversational_ai.get_conversations(...)
-
@@ -7891,7 +8618,7 @@ client.conversational_ai.update_agent(
-
-Retrieve the widget configuration for an agent
+Get all conversations of agents that user owns. With option to restrict to a specific agent.
@@ -7911,7 +8638,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.get_agent_widget(
+client.conversational_ai.get_conversations(
agent_id="21m00Tcm4TlvDq8ikWAM",
)
@@ -7929,7 +8656,7 @@ client.conversational_ai.get_agent_widget(
-
-**agent_id:** `str` — The id of an agent. This is returned on agent creation.
+**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
@@ -7937,7 +8664,23 @@ client.conversational_ai.get_agent_widget(
-
-**conversation_signature:** `typing.Optional[str]` — An expiring token that enables a conversation to start. These can be generated for an agent using the /v1/convai/conversation/get_signed_url endpoint
+**agent_id:** `typing.Optional[str]` — The id of the agent you're taking the action on.
+
+
+
+
+
+-
+
+**call_successful:** `typing.Optional[EvaluationSuccessResult]` — The result of the success evaluation
+
+
+
+
+
+-
+
+**page_size:** `typing.Optional[int]` — How many conversations to return at maximum. Can not exceed 100, defaults to 30.
@@ -7957,7 +8700,7 @@ client.conversational_ai.get_agent_widget(
-client.conversational_ai.get_agent_link(...)
+client.conversational_ai.get_conversation(...)
-
@@ -7969,7 +8712,7 @@ client.conversational_ai.get_agent_widget(
-
-Get the current link used to share the agent with others
+Get the details of a particular conversation
@@ -7989,8 +8732,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.get_agent_link(
- agent_id="21m00Tcm4TlvDq8ikWAM",
+client.conversational_ai.get_conversation(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -8007,7 +8750,7 @@ client.conversational_ai.get_agent_link(
-
-**agent_id:** `str` — The id of an agent. This is returned on agent creation.
+**conversation_id:** `str` — The id of the conversation you're taking the action on.
@@ -8027,7 +8770,7 @@ client.conversational_ai.get_agent_link(
-client.conversational_ai.post_agent_avatar(...)
+client.conversational_ai.delete_conversation(...)
-
@@ -8039,7 +8782,7 @@ client.conversational_ai.get_agent_link(
-
-Sets the avatar for an agent displayed in the widget
+Delete a particular conversation
@@ -8059,8 +8802,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.post_agent_avatar(
- agent_id="21m00Tcm4TlvDq8ikWAM",
+client.conversational_ai.delete_conversation(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -8077,17 +8820,7 @@ client.conversational_ai.post_agent_avatar(
-
-**agent_id:** `str` — The id of an agent. This is returned on agent creation.
-
-
-
-
-
--
-
-**avatar_file:** `from __future__ import annotations
-
-core.File` — See core.File for more documentation
+**conversation_id:** `str` — The id of the conversation you're taking the action on.
@@ -8107,7 +8840,7 @@ core.File` — See core.File for more documentation
-client.conversational_ai.add_agent_secret(...)
+client.conversational_ai.get_conversation_audio(...)
-
@@ -8119,7 +8852,7 @@ core.File` — See core.File for more documentation
-
-Uploads a file or reference a webpage for the agent to use as part of it's knowledge base
+Get the audio recording of a particular conversation
@@ -8139,10 +8872,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.add_agent_secret(
- agent_id="21m00Tcm4TlvDq8ikWAM",
- name="name",
- secret_value="secret_value",
+client.conversational_ai.get_conversation_audio(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -8159,23 +8890,7 @@ client.conversational_ai.add_agent_secret(
-
-**agent_id:** `str` — The id of an agent. This is returned on agent creation.
-
-
-
-
-
--
-
-**name:** `str` — A name to help identify a particular agent secret
-
-
-
-
-
--
-
-**secret_value:** `str` — A value to be encrypted and used by the agent
+**conversation_id:** `str` — The id of the conversation you're taking the action on.
@@ -8195,7 +8910,7 @@ client.conversational_ai.add_agent_secret(
-client.conversational_ai.get_agents(...)
+client.conversational_ai.post_conversation_feedback(...)
-
@@ -8207,7 +8922,7 @@ client.conversational_ai.add_agent_secret(
-
-Returns a page of your agents and their metadata.
+Send the feedback for the given conversation
@@ -8227,7 +8942,10 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.get_agents()
+client.conversational_ai.post_conversation_feedback(
+ conversation_id="21m00Tcm4TlvDq8ikWAM",
+ feedback="like",
+)
```
@@ -8243,15 +8961,7 @@ client.conversational_ai.get_agents()
-
-**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
-
-
-
-
-
--
-
-**page_size:** `typing.Optional[int]` — How many Agents to return at maximum. Can not exceed 100, defaults to 30.
+**conversation_id:** `str` — The id of the conversation you're taking the action on.
@@ -8259,7 +8969,7 @@ client.conversational_ai.get_agents()
-
-**search:** `typing.Optional[str]` — Search by agents name.
+**feedback:** `UserFeedbackScore` — Either 'like' or 'dislike' to indicate the feedback for the conversation.
@@ -8279,7 +8989,7 @@ client.conversational_ai.get_agents()
-client.conversational_ai.get_conversations(...)
+client.conversational_ai.create_phone_number(...)
-
@@ -8291,7 +9001,7 @@ client.conversational_ai.get_agents()
-
-Get all conversations of agents that user owns. With option to restrict to a specific agent.
+Import Phone Number from Twilio configuration
@@ -8311,8 +9021,11 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.get_conversations(
- agent_id="21m00Tcm4TlvDq8ikWAM",
+client.conversational_ai.create_phone_number(
+ phone_number="phone_number",
+ label="label",
+ sid="sid",
+ token="token",
)
```
@@ -8329,7 +9042,7 @@ client.conversational_ai.get_conversations(
-
-**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
+**phone_number:** `str` — Phone number
@@ -8337,7 +9050,7 @@ client.conversational_ai.get_conversations(
-
-**agent_id:** `typing.Optional[str]` — The id of the agent you're taking the action on.
+**label:** `str` — Label for the phone number
@@ -8345,7 +9058,7 @@ client.conversational_ai.get_conversations(
-
-**call_successful:** `typing.Optional[EvaluationSuccessResult]` — The result of the success evaluation
+**sid:** `str` — Twilio Account SID
@@ -8353,7 +9066,7 @@ client.conversational_ai.get_conversations(
-
-**page_size:** `typing.Optional[int]` — How many conversations to return at maximum. Can not exceed 100, defaults to 30.
+**token:** `str` — Twilio Token
@@ -8373,7 +9086,7 @@ client.conversational_ai.get_conversations(
-client.conversational_ai.get_conversation(...)
+client.conversational_ai.get_phone_number(...)
-
@@ -8385,7 +9098,7 @@ client.conversational_ai.get_conversations(
-
-Get the details of a particular conversation
+Retrieve Phone Number details by ID
@@ -8405,8 +9118,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.get_conversation(
- conversation_id="21m00Tcm4TlvDq8ikWAM",
+client.conversational_ai.get_phone_number(
+ phone_number_id="TeaqRRdTcIfIu2i7BYfT",
)
```
@@ -8423,7 +9136,7 @@ client.conversational_ai.get_conversation(
-
-**conversation_id:** `str` — The id of the conversation you're taking the action on.
+**phone_number_id:** `str` — The id of an agent. This is returned on agent creation.
@@ -8443,7 +9156,7 @@ client.conversational_ai.get_conversation(
-client.conversational_ai.delete_conversation(...)
+client.conversational_ai.delete_phone_number(...)
-
@@ -8455,7 +9168,7 @@ client.conversational_ai.get_conversation(
-
-Delete a particular conversation
+Delete Phone Number by ID
@@ -8475,8 +9188,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.delete_conversation(
- conversation_id="21m00Tcm4TlvDq8ikWAM",
+client.conversational_ai.delete_phone_number(
+ phone_number_id="TeaqRRdTcIfIu2i7BYfT",
)
```
@@ -8493,7 +9206,7 @@ client.conversational_ai.delete_conversation(
-
-**conversation_id:** `str` — The id of the conversation you're taking the action on.
+**phone_number_id:** `str` — The id of an agent. This is returned on agent creation.
@@ -8513,7 +9226,7 @@ client.conversational_ai.delete_conversation(
-client.conversational_ai.get_conversation_audio(...)
+client.conversational_ai.update_phone_number(...)
-
@@ -8525,7 +9238,7 @@ client.conversational_ai.delete_conversation(
-
-Get the audio recording of a particular conversation
+Update Phone Number details by ID
@@ -8545,8 +9258,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.get_conversation_audio(
- conversation_id="21m00Tcm4TlvDq8ikWAM",
+client.conversational_ai.update_phone_number(
+ phone_number_id="TeaqRRdTcIfIu2i7BYfT",
)
```
@@ -8563,7 +9276,15 @@ client.conversational_ai.get_conversation_audio(
-
-**conversation_id:** `str` — The id of the conversation you're taking the action on.
+**phone_number_id:** `str` — The id of an agent. This is returned on agent creation.
+
+
+
+
+
+-
+
+**agent_id:** `typing.Optional[str]`
@@ -8583,7 +9304,7 @@ client.conversational_ai.get_conversation_audio(
-client.conversational_ai.post_conversation_feedback(...)
+client.conversational_ai.get_phone_numbers()
-
@@ -8595,7 +9316,7 @@ client.conversational_ai.get_conversation_audio(
-
-Send the feedback for the given conversation
+Retrieve all Phone Numbers
@@ -8615,10 +9336,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.post_conversation_feedback(
- conversation_id="21m00Tcm4TlvDq8ikWAM",
- feedback="like",
-)
+client.conversational_ai.get_phone_numbers()
```
@@ -8634,22 +9352,6 @@ client.conversational_ai.post_conversation_feedback(
-
-**conversation_id:** `str` — The id of the conversation you're taking the action on.
-
-
-
-
-
--
-
-**feedback:** `UserFeedbackScore` — Either 'like' or 'dislike' to indicate the feedback for the conversation.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -8662,7 +9364,7 @@ client.conversational_ai.post_conversation_feedback(
-client.conversational_ai.create_phone_number(...)
+client.conversational_ai.get_knowledge_base_list(...)
-
@@ -8674,7 +9376,7 @@ client.conversational_ai.post_conversation_feedback(
-
-Import Phone Number from Twilio configuration
+Get a list of available knowledge base documents
@@ -8694,12 +9396,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.create_phone_number(
- phone_number="phone_number",
- label="label",
- sid="sid",
- token="token",
-)
+client.conversational_ai.get_knowledge_base_list()
```
@@ -8715,7 +9412,7 @@ client.conversational_ai.create_phone_number(
-
-**phone_number:** `str` — Phone number
+**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
@@ -8723,7 +9420,7 @@ client.conversational_ai.create_phone_number(
-
-**label:** `str` — Label for the phone number
+**page_size:** `typing.Optional[int]` — How many documents to return at maximum. Can not exceed 100, defaults to 30.
@@ -8731,7 +9428,7 @@ client.conversational_ai.create_phone_number(
-
-**sid:** `str` — Twilio Account SID
+**search:** `typing.Optional[str]` — If specified, the endpoint returns only such knowledge base documents whose names start with this string.
@@ -8739,7 +9436,15 @@ client.conversational_ai.create_phone_number(
-
-**token:** `str` — Twilio Token
+**show_only_owned_documents:** `typing.Optional[bool]` — If set to true, the endpoint will return only documents owned by you (and not shared from somebody else).
+
+
+
+
+
+-
+
+**use_typesense:** `typing.Optional[bool]` — If set to true, the endpoint will use typesense DB to search for the documents).
@@ -8759,7 +9464,7 @@ client.conversational_ai.create_phone_number(
-client.conversational_ai.get_phone_number(...)
+client.conversational_ai.add_to_knowledge_base(...)
-
@@ -8771,7 +9476,7 @@ client.conversational_ai.create_phone_number(
-
-Retrieve Phone Number details by ID
+Uploads a file or reference a webpage to use as part of the shared knowledge base
@@ -8791,9 +9496,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.get_phone_number(
- phone_number_id="TeaqRRdTcIfIu2i7BYfT",
-)
+client.conversational_ai.add_to_knowledge_base()
```
@@ -8809,77 +9512,17 @@ client.conversational_ai.get_phone_number(
-
-**phone_number_id:** `str` — The id of an agent. This is returned on agent creation.
-
-
-
-
-
--
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**url:** `typing.Optional[str]` — URL to a page of documentation that the agent will have access to in order to interact with users.
-
-
-
-
-
-
-
-
-client.conversational_ai.delete_phone_number(...)
-
--
-
-#### 📝 Description
-
-
--
-
-
--
-
-Delete Phone Number by ID
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from elevenlabs import ElevenLabs
-
-client = ElevenLabs(
- api_key="YOUR_API_KEY",
-)
-client.conversational_ai.delete_phone_number(
- phone_number_id="TeaqRRdTcIfIu2i7BYfT",
-)
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
+**file:** `from __future__ import annotations
-**phone_number_id:** `str` — The id of an agent. This is returned on agent creation.
+typing.Optional[core.File]` — See core.File for more documentation
@@ -8899,7 +9542,7 @@ client.conversational_ai.delete_phone_number(
-client.conversational_ai.update_phone_number(...)
+client.conversational_ai.get_knowledge_base_document_by_id(...)
-
@@ -8911,7 +9554,7 @@ client.conversational_ai.delete_phone_number(
-
-Update Phone Number details by ID
+Get details about a specific documentation making up the agent's knowledge base
@@ -8931,8 +9574,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.update_phone_number(
- phone_number_id="TeaqRRdTcIfIu2i7BYfT",
+client.conversational_ai.get_knowledge_base_document_by_id(
+ documentation_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -8949,15 +9592,7 @@ client.conversational_ai.update_phone_number(
-
-**phone_number_id:** `str` — The id of an agent. This is returned on agent creation.
-
-
-
-
-
--
-
-**agent_id:** `typing.Optional[str]`
+**documentation_id:** `str` — The id of a document from the knowledge base. This is returned on document addition.
@@ -8977,7 +9612,7 @@ client.conversational_ai.update_phone_number(
-client.conversational_ai.get_phone_numbers()
+client.conversational_ai.delete_knowledge_base_document(...)
-
@@ -8989,7 +9624,7 @@ client.conversational_ai.update_phone_number(
-
-Retrieve all Phone Numbers
+Delete a document from the knowledge base
@@ -9009,7 +9644,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.get_phone_numbers()
+client.conversational_ai.delete_knowledge_base_document(
+ documentation_id="21m00Tcm4TlvDq8ikWAM",
+)
```
@@ -9025,6 +9662,14 @@ client.conversational_ai.get_phone_numbers()
-
+**documentation_id:** `str` — The id of a document from the knowledge base. This is returned on document addition.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -9037,7 +9682,7 @@ client.conversational_ai.get_phone_numbers()
-client.conversational_ai.get_knowledge_base_list(...)
+client.conversational_ai.get_dependent_agents(...)
-
@@ -9049,7 +9694,7 @@ client.conversational_ai.get_phone_numbers()
-
-Get a list of available knowledge base documents
+Get a list of agents depending on this knowledge base document
@@ -9069,7 +9714,9 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.get_knowledge_base_list()
+client.conversational_ai.get_dependent_agents(
+ documentation_id="21m00Tcm4TlvDq8ikWAM",
+)
```
@@ -9085,6 +9732,14 @@ client.conversational_ai.get_knowledge_base_list()
-
+**documentation_id:** `str` — The id of a document from the knowledge base. This is returned on document addition.
+
+
+
+
+
+-
+
**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
@@ -9113,7 +9768,7 @@ client.conversational_ai.get_knowledge_base_list()
-client.conversational_ai.add_to_knowledge_base(...)
+client.conversational_ai.get_tools()
-
@@ -9125,7 +9780,7 @@ client.conversational_ai.get_knowledge_base_list()
-
-Uploads a file or reference a webpage to use as part of the shared knowledge base
+Get all available tools available in the workspace.
@@ -9145,7 +9800,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.add_to_knowledge_base()
+client.conversational_ai.get_tools()
```
@@ -9161,24 +9816,6 @@ client.conversational_ai.add_to_knowledge_base()
-
-**url:** `typing.Optional[str]` — URL to a page of documentation that the agent will have access to in order to interact with users.
-
-
-
-
-
--
-
-**file:** `from __future__ import annotations
-
-typing.Optional[core.File]` — See core.File for more documentation
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -9191,7 +9828,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-client.conversational_ai.get_knowledge_base_document_by_id(...)
+client.conversational_ai.add_tool(...)
-
@@ -9203,7 +9840,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-Get details about a specific documentation making up the agent's knowledge base
+Add a new tool to the available tools in the workspace.
@@ -9218,13 +9855,26 @@ Get details about a specific documentation making up the agent's knowledge base
-
```python
-from elevenlabs import ElevenLabs
+from elevenlabs import (
+ ElevenLabs,
+ ToolRequestModel,
+ ToolRequestModelToolConfig_Webhook,
+ WebhookToolApiSchemaConfig,
+)
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.get_knowledge_base_document_by_id(
- documentation_id="21m00Tcm4TlvDq8ikWAM",
+client.conversational_ai.add_tool(
+ request=ToolRequestModel(
+ tool_config=ToolRequestModelToolConfig_Webhook(
+ name="name",
+ description="description",
+ api_schema=WebhookToolApiSchemaConfig(
+ url="url",
+ ),
+ ),
+ ),
)
```
@@ -9241,7 +9891,7 @@ client.conversational_ai.get_knowledge_base_document_by_id(
-
-**documentation_id:** `str` — The id of a document from the knowledge base. This is returned on document addition.
+**request:** `ToolRequestModel`
@@ -9261,7 +9911,7 @@ client.conversational_ai.get_knowledge_base_document_by_id(
-client.conversational_ai.delete_knowledge_base_document(...)
+client.conversational_ai.get_tool(...)
-
@@ -9273,7 +9923,7 @@ client.conversational_ai.get_knowledge_base_document_by_id(
-
-Delete a document from the knowledge base
+Get tool that is available in the workspace.
@@ -9293,8 +9943,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.delete_knowledge_base_document(
- documentation_id="21m00Tcm4TlvDq8ikWAM",
+client.conversational_ai.get_tool(
+ tool_id="tool_id",
)
```
@@ -9311,7 +9961,7 @@ client.conversational_ai.delete_knowledge_base_document(
-
-**documentation_id:** `str` — The id of a document from the knowledge base. This is returned on document addition.
+**tool_id:** `str` — ID of the requested tool.
@@ -9331,7 +9981,7 @@ client.conversational_ai.delete_knowledge_base_document(
-client.conversational_ai.get_dependent_agents(...)
+client.conversational_ai.remove_tool(...)
-
@@ -9343,7 +9993,7 @@ client.conversational_ai.delete_knowledge_base_document(
-
-Get a list of agents depending on this knowledge base document
+Delete tool from the workspace.
@@ -9363,8 +10013,8 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.get_dependent_agents(
- documentation_id="21m00Tcm4TlvDq8ikWAM",
+client.conversational_ai.remove_tool(
+ tool_id="tool_id",
)
```
@@ -9381,23 +10031,7 @@ client.conversational_ai.get_dependent_agents(
-
-**documentation_id:** `str` — The id of a document from the knowledge base. This is returned on document addition.
-
-
-
-
-
--
-
-**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
-
-
-
-
-
--
-
-**page_size:** `typing.Optional[int]` — How many documents to return at maximum. Can not exceed 100, defaults to 30.
+**tool_id:** `str` — ID of the requested tool.
@@ -9417,7 +10051,7 @@ client.conversational_ai.get_dependent_agents(
-client.conversational_ai.get_tools()
+client.conversational_ai.update_tool(...)
-
@@ -9429,7 +10063,7 @@ client.conversational_ai.get_dependent_agents(
-
-Get all available tools available in the workspace.
+Update tool that is available in the workspace.
@@ -9444,12 +10078,28 @@ Get all available tools available in the workspace.
-
```python
-from elevenlabs import ElevenLabs
+from elevenlabs import (
+ ElevenLabs,
+ ToolRequestModel,
+ ToolRequestModelToolConfig_Webhook,
+ WebhookToolApiSchemaConfig,
+)
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.get_tools()
+client.conversational_ai.update_tool(
+ tool_id="tool_id",
+ request=ToolRequestModel(
+ tool_config=ToolRequestModelToolConfig_Webhook(
+ name="name",
+ description="description",
+ api_schema=WebhookToolApiSchemaConfig(
+ url="url",
+ ),
+ ),
+ ),
+)
```
@@ -9465,6 +10115,22 @@ client.conversational_ai.get_tools()
-
+**tool_id:** `str` — ID of the requested tool.
+
+
+
+
+
+-
+
+**request:** `ToolRequestModel`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -9477,7 +10143,7 @@ client.conversational_ai.get_tools()
-client.conversational_ai.add_tool(...)
+client.conversational_ai.get_settings()
-
@@ -9489,7 +10155,7 @@ client.conversational_ai.get_tools()
-
-Add a new tool to the available tools in the workspace.
+Retrieve Convai settings for the workspace
@@ -9504,27 +10170,12 @@ Add a new tool to the available tools in the workspace.
-
```python
-from elevenlabs import (
- ElevenLabs,
- ToolRequestModel,
- ToolRequestModelToolConfig_Webhook,
- WebhookToolApiSchemaConfig,
-)
+from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.add_tool(
- request=ToolRequestModel(
- tool_config=ToolRequestModelToolConfig_Webhook(
- name="name",
- description="description",
- api_schema=WebhookToolApiSchemaConfig(
- url="url",
- ),
- ),
- ),
-)
+client.conversational_ai.get_settings()
```
@@ -9540,14 +10191,6 @@ client.conversational_ai.add_tool(
-
-**request:** `ToolRequestModel`
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -9560,7 +10203,7 @@ client.conversational_ai.add_tool(
-client.conversational_ai.get_tool(...)
+client.conversational_ai.update_settings(...)
-
@@ -9572,7 +10215,7 @@ client.conversational_ai.add_tool(
-
-Get tool that is available in the workspace.
+Update Convai settings for the workspace
@@ -9588,12 +10231,20 @@ Get tool that is available in the workspace.
```python
from elevenlabs import ElevenLabs
+from elevenlabs.conversational_ai import (
+ PatchConvaiSettingsRequestSecretsItem_New,
+)
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.get_tool(
- tool_id="tool_id",
+client.conversational_ai.update_settings(
+ secrets=[
+ PatchConvaiSettingsRequestSecretsItem_New(
+ name="name",
+ value="value",
+ )
+ ],
)
```
@@ -9610,7 +10261,23 @@ client.conversational_ai.get_tool(
-
-**tool_id:** `str` — ID of the requested tool.
+**secrets:** `typing.Sequence[PatchConvaiSettingsRequestSecretsItem]`
+
+
+
+
+
+-
+
+**conversation_initiation_client_data_webhook:** `typing.Optional[ConversationInitiationClientDataWebhook]`
+
+
+
+
+
+-
+
+**webhooks:** `typing.Optional[ConvAiWebhooks]`
@@ -9630,7 +10297,7 @@ client.conversational_ai.get_tool(
-client.conversational_ai.remove_tool(...)
+client.conversational_ai.get_secrets()
-
@@ -9642,7 +10309,7 @@ client.conversational_ai.get_tool(
-
-Delete tool from the workspace.
+Get all secrets for the workspace
@@ -9662,9 +10329,7 @@ from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.remove_tool(
- tool_id="tool_id",
-)
+client.conversational_ai.get_secrets()
```
@@ -9680,14 +10345,6 @@ client.conversational_ai.remove_tool(
-
-**tool_id:** `str` — ID of the requested tool.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -9700,7 +10357,7 @@ client.conversational_ai.remove_tool(
-client.conversational_ai.update_tool(...)
+client.conversational_ai.create_secret(...)
-
@@ -9712,7 +10369,7 @@ client.conversational_ai.remove_tool(
-
-Update tool that is available in the workspace.
+Create a new secret for the workspace
@@ -9727,27 +10384,14 @@ Update tool that is available in the workspace.
-
```python
-from elevenlabs import (
- ElevenLabs,
- ToolRequestModel,
- ToolRequestModelToolConfig_Webhook,
- WebhookToolApiSchemaConfig,
-)
+from elevenlabs import ElevenLabs
client = ElevenLabs(
api_key="YOUR_API_KEY",
)
-client.conversational_ai.update_tool(
- tool_id="tool_id",
- request=ToolRequestModel(
- tool_config=ToolRequestModelToolConfig_Webhook(
- name="name",
- description="description",
- api_schema=WebhookToolApiSchemaConfig(
- url="url",
- ),
- ),
- ),
+client.conversational_ai.create_secret(
+ name="name",
+ value="value",
)
```
@@ -9764,7 +10408,7 @@ client.conversational_ai.update_tool(
-
-**tool_id:** `str` — ID of the requested tool.
+**name:** `str`
@@ -9772,7 +10416,7 @@ client.conversational_ai.update_tool(
-
-**request:** `ToolRequestModel`
+**value:** `str`
@@ -10091,14 +10735,6 @@ ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate
-
-**quality_check_on:** `typing.Optional[bool]` — [Depracated] Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
-
-
-
-
-
--
-
**apply_text_normalization:** `typing.Optional[ProjectsAddRequestApplyTextNormalization]`
@@ -10246,9 +10882,9 @@ client = ElevenLabs(
)
client.studio.projects.update_metadata(
project_id="21m00Tcm4TlvDq8ikWAM",
- name="name",
- default_title_voice_id="default_title_voice_id",
- default_paragraph_voice_id="default_paragraph_voice_id",
+ name="Project 1",
+ default_title_voice_id="21m00Tcm4TlvDq8ikWAM",
+ default_paragraph_voice_id="21m00Tcm4TlvDq8ikWAM",
)
```
@@ -10329,14 +10965,6 @@ client.studio.projects.update_metadata(
-
-**quality_check_on:** `typing.Optional[bool]` — [Depracated] Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -10893,6 +11521,14 @@ client.studio.projects.update_pronunciation_dictionaries(
-
+**invalidate_affected_text:** `typing.Optional[bool]` — This will automatically mark text in this project for reconversion when the new dictionary applies or the old one no longer does.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -11010,7 +11646,7 @@ client = ElevenLabs(
)
client.studio.chapters.create(
project_id="21m00Tcm4TlvDq8ikWAM",
- name="name",
+ name="Chapter 1",
)
```
diff --git a/src/elevenlabs/__init__.py b/src/elevenlabs/__init__.py
index 63a79510..5d7545f3 100644
--- a/src/elevenlabs/__init__.py
+++ b/src/elevenlabs/__init__.py
@@ -17,9 +17,9 @@
AgentConfigOverride,
AgentConfigOverrideConfig,
AgentMetadataResponseModel,
- AgentPlatformSettings,
+ AgentPlatformSettingsRequestModel,
+ AgentPlatformSettingsResponseModel,
AgentSummaryResponseModel,
- AgentSummaryResponseModelAccessLevel,
AllowlistItem,
ArrayJsonSchemaProperty,
ArrayJsonSchemaPropertyItems,
@@ -29,6 +29,8 @@
AsrQuality,
AudioNativeCreateProjectResponseModel,
AudioNativeEditContentResponseModel,
+ AudioNativeProjectSettingsResponseModel,
+ AudioWithTimestampsResponseModel,
AuthSettings,
AuthorizationMethod,
BanReasonType,
@@ -52,11 +54,21 @@
ChapterStatisticsResponse,
ChapterWithContentResponseModel,
ChapterWithContentResponseModelState,
+ CharacterAlignmentResponseModel,
ClientEvent,
ClientToolConfig,
ConvAiNewSecretConfig,
ConvAiSecretLocator,
ConvAiStoredSecretConfig,
+ ConvAiStoredSecretDependencies,
+ ConvAiStoredSecretDependenciesAgentsItem,
+ ConvAiStoredSecretDependenciesAgentsItem_Available,
+ ConvAiStoredSecretDependenciesAgentsItem_Unknown,
+ ConvAiStoredSecretDependenciesToolsItem,
+ ConvAiStoredSecretDependenciesToolsItem_Available,
+ ConvAiStoredSecretDependenciesToolsItem_Unknown,
+ ConvAiWebhooks,
+ ConvAiWorkspaceStoredSecretConfig,
ConversationChargingCommonModel,
ConversationConfig,
ConversationConfigClientOverride,
@@ -73,6 +85,8 @@
ConversationInitiationClientData,
ConversationInitiationClientDataConfig,
ConversationInitiationClientDataDynamicVariablesValue,
+ ConversationInitiationClientDataWebhook,
+ ConversationInitiationClientDataWebhookRequestHeadersValue,
ConversationSignedUrlResponseModel,
ConversationSummaryResponseModel,
ConversationSummaryResponseModelStatus,
@@ -84,9 +98,13 @@
Currency,
CustomLlm,
DataCollectionResultCommonModel,
+ DeleteSampleResponseModel,
DependentAvailableAgentIdentifier,
DependentAvailableAgentIdentifierAccessLevel,
+ DependentAvailableToolIdentifier,
+ DependentAvailableToolIdentifierAccessLevel,
DependentUnknownAgentIdentifier,
+ DependentUnknownToolIdentifier,
DoDubbingResponse,
DubbingMediaMetadata,
DubbingMetadataResponse,
@@ -108,7 +126,9 @@
GetAgentLinkResponseModel,
GetAgentResponseModel,
GetAgentsPageResponseModel,
+ GetAudioNativeProjectSettingsResponseModel,
GetChaptersResponse,
+ GetConvaiSettingsResponseModel,
GetConversationResponseModel,
GetConversationResponseModelStatus,
GetConversationsPageResponseModel,
@@ -133,6 +153,7 @@
GetPronunciationDictionaryMetadataResponse,
GetSpeechHistoryResponse,
GetVoicesResponse,
+ GetWorkspaceSecretsResponseModel,
HistoryAlignmentResponseModel,
HistoryAlignmentsResponseModel,
HistoryItem,
@@ -161,6 +182,7 @@
ObjectJsonSchemaPropertyPropertiesValue,
OrbAvatar,
OutputFormat,
+ PaginatedListedReviewTaskInstanceModel,
PhoneNumberAgentInfo,
PodcastBulletinMode,
PodcastBulletinModeData,
@@ -170,6 +192,7 @@
PodcastTextSource,
PodcastUrlSource,
PostAgentAvatarResponseModel,
+ PostWorkspaceSecretResponseModel,
PrivacyConfig,
ProfilePageResponseModel,
ProjectCreationMetaResponseModel,
@@ -204,28 +227,42 @@
PronunciationDictionaryVersionResponseModel,
PydanticPronunciationDictionaryVersionLocator,
QueryParamsJsonSchema,
+ QuoteRequestModel,
+ QuoteResponseModel,
ReaderResourceResponseModel,
ReaderResourceResponseModelResourceType,
RecordingResponse,
RemovePronunciationDictionaryRulesResponseModel,
+ ResourceAccessInfo,
+ ResourceAccessInfoRole,
+ ReviewState,
ReviewStatus,
- Safety,
+ ReviewTaskInstanceResponseModel,
+ SafetyCommonModel,
SafetyEvaluation,
+ SafetyResponseModel,
SafetyRule,
+ SecretDependencyType,
SpeechHistoryItemResponse,
SpeechHistoryItemResponseModelSource,
SpeechHistoryItemResponseModelVoiceCategory,
+ SpeechToTextCharacterResponseModel,
SpeechToTextChunkResponseModel,
- SpeechToTextStreamResponseModel,
SpeechToTextWordResponseModel,
SpeechToTextWordResponseModelType,
+ StreamingAudioChunkWithTimestampsResponseModel,
Subscription,
SubscriptionResponse,
SubscriptionResponseModelBillingPeriod,
SubscriptionResponseModelCharacterRefreshPeriod,
SubscriptionResponseModelCurrency,
SubscriptionStatus,
+ SubscriptionUsageResponseModel,
SystemToolConfig,
+ TagKind,
+ TagModel,
+ TaskInstanceEventKind,
+ TaskInstanceEventResponseModel,
TelephonyProvider,
TextToSpeechAsStreamRequest,
ToolRequestModel,
@@ -258,6 +295,7 @@
ValidationError,
ValidationErrorLocItem,
VerificationAttemptResponse,
+ VerifiedVoiceLanguageResponseModel,
Voice,
VoiceGenerationParameterOptionResponse,
VoiceGenerationParameterResponse,
@@ -286,10 +324,11 @@
WidgetConfigResponseModelAvatar_Image,
WidgetConfigResponseModelAvatar_Orb,
WidgetConfigResponseModelAvatar_Url,
+ WidgetExpandable,
WidgetFeedbackMode,
WorkspaceGroupByNameResponseModel,
)
-from .errors import BadRequestError, UnprocessableEntityError
+from .errors import UnprocessableEntityError
from . import (
audio_isolation,
audio_native,
@@ -317,6 +356,9 @@
BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem,
BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New,
BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored,
+ PatchConvaiSettingsRequestSecretsItem,
+ PatchConvaiSettingsRequestSecretsItem_New,
+ PatchConvaiSettingsRequestSecretsItem_Stored,
)
from .dubbing import DubbingGetTranscriptForDubRequestFormatType
from .environment import ElevenLabsEnvironment
@@ -342,6 +384,7 @@
PronunciationDictionaryRule_Alias,
PronunciationDictionaryRule_Phoneme,
)
+from .speech_to_text import SpeechToTextConvertRequestTimestampsGranularity
from .studio import (
BodyCreatePodcastV1StudioPodcastsPostDurationScale,
BodyCreatePodcastV1StudioPodcastsPostMode,
@@ -358,9 +401,6 @@
BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization,
BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization,
BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization,
- TextToSpeechStreamWithTimestampsResponse,
- TextToSpeechStreamWithTimestampsResponseAlignment,
- TextToSpeechStreamWithTimestampsResponseNormalizedAlignment,
)
from .text_to_voice import TextToVoiceCreatePreviewsRequestOutputFormat
from .version import __version__
@@ -386,9 +426,9 @@
"AgentConfigOverride",
"AgentConfigOverrideConfig",
"AgentMetadataResponseModel",
- "AgentPlatformSettings",
+ "AgentPlatformSettingsRequestModel",
+ "AgentPlatformSettingsResponseModel",
"AgentSummaryResponseModel",
- "AgentSummaryResponseModelAccessLevel",
"AllowlistItem",
"ArrayJsonSchemaProperty",
"ArrayJsonSchemaPropertyItems",
@@ -399,9 +439,10 @@
"AsyncElevenLabs",
"AudioNativeCreateProjectResponseModel",
"AudioNativeEditContentResponseModel",
+ "AudioNativeProjectSettingsResponseModel",
+ "AudioWithTimestampsResponseModel",
"AuthSettings",
"AuthorizationMethod",
- "BadRequestError",
"BanReasonType",
"BodyAddToKnowledgeBaseV1ConvaiAddToKnowledgeBasePost",
"BodyAddToKnowledgeBaseV1ConvaiAgentsAgentIdAddToKnowledgeBasePost",
@@ -449,11 +490,21 @@
"ChapterStatisticsResponse",
"ChapterWithContentResponseModel",
"ChapterWithContentResponseModelState",
+ "CharacterAlignmentResponseModel",
"ClientEvent",
"ClientToolConfig",
"ConvAiNewSecretConfig",
"ConvAiSecretLocator",
"ConvAiStoredSecretConfig",
+ "ConvAiStoredSecretDependencies",
+ "ConvAiStoredSecretDependenciesAgentsItem",
+ "ConvAiStoredSecretDependenciesAgentsItem_Available",
+ "ConvAiStoredSecretDependenciesAgentsItem_Unknown",
+ "ConvAiStoredSecretDependenciesToolsItem",
+ "ConvAiStoredSecretDependenciesToolsItem_Available",
+ "ConvAiStoredSecretDependenciesToolsItem_Unknown",
+ "ConvAiWebhooks",
+ "ConvAiWorkspaceStoredSecretConfig",
"ConversationChargingCommonModel",
"ConversationConfig",
"ConversationConfigClientOverride",
@@ -470,6 +521,8 @@
"ConversationInitiationClientData",
"ConversationInitiationClientDataConfig",
"ConversationInitiationClientDataDynamicVariablesValue",
+ "ConversationInitiationClientDataWebhook",
+ "ConversationInitiationClientDataWebhookRequestHeadersValue",
"ConversationSignedUrlResponseModel",
"ConversationSummaryResponseModel",
"ConversationSummaryResponseModelStatus",
@@ -481,9 +534,13 @@
"Currency",
"CustomLlm",
"DataCollectionResultCommonModel",
+ "DeleteSampleResponseModel",
"DependentAvailableAgentIdentifier",
"DependentAvailableAgentIdentifierAccessLevel",
+ "DependentAvailableToolIdentifier",
+ "DependentAvailableToolIdentifierAccessLevel",
"DependentUnknownAgentIdentifier",
+ "DependentUnknownToolIdentifier",
"DoDubbingResponse",
"DubbingGetTranscriptForDubRequestFormatType",
"DubbingMediaMetadata",
@@ -508,7 +565,9 @@
"GetAgentLinkResponseModel",
"GetAgentResponseModel",
"GetAgentsPageResponseModel",
+ "GetAudioNativeProjectSettingsResponseModel",
"GetChaptersResponse",
+ "GetConvaiSettingsResponseModel",
"GetConversationResponseModel",
"GetConversationResponseModelStatus",
"GetConversationsPageResponseModel",
@@ -533,6 +592,7 @@
"GetPronunciationDictionaryMetadataResponse",
"GetSpeechHistoryResponse",
"GetVoicesResponse",
+ "GetWorkspaceSecretsResponseModel",
"HistoryAlignmentResponseModel",
"HistoryAlignmentsResponseModel",
"HistoryGetAllRequestSource",
@@ -562,6 +622,10 @@
"ObjectJsonSchemaPropertyPropertiesValue",
"OrbAvatar",
"OutputFormat",
+ "PaginatedListedReviewTaskInstanceModel",
+ "PatchConvaiSettingsRequestSecretsItem",
+ "PatchConvaiSettingsRequestSecretsItem_New",
+ "PatchConvaiSettingsRequestSecretsItem_Stored",
"PhoneNumberAgentInfo",
"PodcastBulletinMode",
"PodcastBulletinModeData",
@@ -571,6 +635,7 @@
"PodcastTextSource",
"PodcastUrlSource",
"PostAgentAvatarResponseModel",
+ "PostWorkspaceSecretResponseModel",
"PrivacyConfig",
"ProfilePageResponseModel",
"ProjectCreationMetaResponseModel",
@@ -609,33 +674,45 @@
"PronunciationDictionaryVersionResponseModel",
"PydanticPronunciationDictionaryVersionLocator",
"QueryParamsJsonSchema",
+ "QuoteRequestModel",
+ "QuoteResponseModel",
"ReaderResourceResponseModel",
"ReaderResourceResponseModelResourceType",
"RecordingResponse",
"RemovePronunciationDictionaryRulesResponseModel",
+ "ResourceAccessInfo",
+ "ResourceAccessInfoRole",
+ "ReviewState",
"ReviewStatus",
- "Safety",
+ "ReviewTaskInstanceResponseModel",
+ "SafetyCommonModel",
"SafetyEvaluation",
+ "SafetyResponseModel",
"SafetyRule",
+ "SecretDependencyType",
"SpeechHistoryItemResponse",
"SpeechHistoryItemResponseModelSource",
"SpeechHistoryItemResponseModelVoiceCategory",
+ "SpeechToTextCharacterResponseModel",
"SpeechToTextChunkResponseModel",
- "SpeechToTextStreamResponseModel",
+ "SpeechToTextConvertRequestTimestampsGranularity",
"SpeechToTextWordResponseModel",
"SpeechToTextWordResponseModelType",
+ "StreamingAudioChunkWithTimestampsResponseModel",
"Subscription",
"SubscriptionResponse",
"SubscriptionResponseModelBillingPeriod",
"SubscriptionResponseModelCharacterRefreshPeriod",
"SubscriptionResponseModelCurrency",
"SubscriptionStatus",
+ "SubscriptionUsageResponseModel",
"SystemToolConfig",
+ "TagKind",
+ "TagModel",
+ "TaskInstanceEventKind",
+ "TaskInstanceEventResponseModel",
"TelephonyProvider",
"TextToSpeechAsStreamRequest",
- "TextToSpeechStreamWithTimestampsResponse",
- "TextToSpeechStreamWithTimestampsResponseAlignment",
- "TextToSpeechStreamWithTimestampsResponseNormalizedAlignment",
"TextToVoiceCreatePreviewsRequestOutputFormat",
"ToolRequestModel",
"ToolRequestModelToolConfig",
@@ -668,6 +745,7 @@
"ValidationError",
"ValidationErrorLocItem",
"VerificationAttemptResponse",
+ "VerifiedVoiceLanguageResponseModel",
"Voice",
"VoiceGenerationParameterOptionResponse",
"VoiceGenerationParameterResponse",
@@ -696,6 +774,7 @@
"WidgetConfigResponseModelAvatar_Image",
"WidgetConfigResponseModelAvatar_Orb",
"WidgetConfigResponseModelAvatar_Url",
+ "WidgetExpandable",
"WidgetFeedbackMode",
"WorkspaceGroupByNameResponseModel",
"__version__",
diff --git a/src/elevenlabs/audio_native/client.py b/src/elevenlabs/audio_native/client.py
index 531215d8..807f683e 100644
--- a/src/elevenlabs/audio_native/client.py
+++ b/src/elevenlabs/audio_native/client.py
@@ -10,8 +10,9 @@
from ..types.http_validation_error import HttpValidationError
from json.decoder import JSONDecodeError
from ..core.api_error import ApiError
-from ..types.audio_native_edit_content_response_model import AudioNativeEditContentResponseModel
+from ..types.get_audio_native_project_settings_response_model import GetAudioNativeProjectSettingsResponseModel
from ..core.jsonable_encoder import jsonable_encoder
+from ..types.audio_native_edit_content_response_model import AudioNativeEditContentResponseModel
from ..core.client_wrapper import AsyncClientWrapper
# this is used as the default value for optional parameters
@@ -145,6 +146,65 @@ def create(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ def get_settings(
+ self, project_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetAudioNativeProjectSettingsResponseModel:
+ """
+ Get player settings for the specific project.
+
+ Parameters
+ ----------
+ project_id : str
+ The ID of the Studio project.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAudioNativeProjectSettingsResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.audio_native.get_settings(
+ project_id="21m00Tcm4TlvDq8ikWAM",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/audio-native/{jsonable_encoder(project_id)}/settings",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAudioNativeProjectSettingsResponseModel,
+ construct_type(
+ type_=GetAudioNativeProjectSettingsResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
def update_content(
self,
project_id: str,
@@ -363,6 +423,73 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ async def get_settings(
+ self, project_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetAudioNativeProjectSettingsResponseModel:
+ """
+ Get player settings for the specific project.
+
+ Parameters
+ ----------
+ project_id : str
+ The ID of the Studio project.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetAudioNativeProjectSettingsResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.audio_native.get_settings(
+ project_id="21m00Tcm4TlvDq8ikWAM",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/audio-native/{jsonable_encoder(project_id)}/settings",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetAudioNativeProjectSettingsResponseModel,
+ construct_type(
+ type_=GetAudioNativeProjectSettingsResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
async def update_content(
self,
project_id: str,
diff --git a/src/elevenlabs/base_client.py b/src/elevenlabs/base_client.py
index 14263765..7b7a469b 100644
--- a/src/elevenlabs/base_client.py
+++ b/src/elevenlabs/base_client.py
@@ -25,6 +25,18 @@
from .workspace.client import WorkspaceClient
from .speech_to_text.client import SpeechToTextClient
from .conversational_ai.client import ConversationalAiClient
+from .core.request_options import RequestOptions
+from .core.jsonable_encoder import jsonable_encoder
+from .core.unchecked_base_model import construct_type
+from .errors.unprocessable_entity_error import UnprocessableEntityError
+from .types.http_validation_error import HttpValidationError
+from json.decoder import JSONDecodeError
+from .core.api_error import ApiError
+from .types.tag_model import TagModel
+from .core.serialization import convert_and_respect_annotation_metadata
+from .types.paginated_listed_review_task_instance_model import PaginatedListedReviewTaskInstanceModel
+from .types.quote_request_model import QuoteRequestModel
+from .types.quote_response_model import QuoteResponseModel
from .core.client_wrapper import AsyncClientWrapper
from .history.client import AsyncHistoryClient
from .text_to_sound_effects.client import AsyncTextToSoundEffectsClient
@@ -47,6 +59,9 @@
from .speech_to_text.client import AsyncSpeechToTextClient
from .conversational_ai.client import AsyncConversationalAiClient
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
class BaseElevenLabs:
"""
@@ -127,6 +142,354 @@ def __init__(
self.speech_to_text = SpeechToTextClient(client_wrapper=self._client_wrapper)
self.conversational_ai = ConversationalAiClient(client_wrapper=self._client_wrapper)
+ def claim_a_task_v_1_speech_to_text_reviews_tasks_task_id_claim_post(
+ self, task_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.Optional[typing.Any]:
+ """
+ Parameters
+ ----------
+ task_id : str
+ The ID task to claim.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.Optional[typing.Any]
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.claim_a_task_v_1_speech_to_text_reviews_tasks_task_id_claim_post(
+ task_id="task_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/speech-to-text/reviews/tasks/{jsonable_encoder(task_id)}/claim",
+ method="POST",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ typing.Optional[typing.Any],
+ construct_type(
+ type_=typing.Optional[typing.Any], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def submit_a_completed_task_v_1_speech_to_text_reviews_producers_user_id_tasks_task_id_submit_put(
+ self, user_id: str, task_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.Optional[typing.Any]:
+ """
+ Parameters
+ ----------
+ user_id : str
+
+ task_id : str
+ The ID task review to claim.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.Optional[typing.Any]
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.submit_a_completed_task_v_1_speech_to_text_reviews_producers_user_id_tasks_task_id_submit_put(
+ user_id="user_id",
+ task_id="task_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/speech-to-text/reviews/producers/{jsonable_encoder(user_id)}/tasks/{jsonable_encoder(task_id)}/submit",
+ method="PUT",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ typing.Optional[typing.Any],
+ construct_type(
+ type_=typing.Optional[typing.Any], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list_unclaimed_reviews_v_1_speech_to_text_reviews_tasks_post(
+ self,
+ *,
+ tags: typing.Sequence[typing.Sequence[TagModel]],
+ page_size: typing.Optional[int] = None,
+ cursor: typing.Optional[str] = None,
+ unclaimed_only: typing.Optional[bool] = OMIT,
+ include_instances: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Optional[typing.Any]:
+ """
+ Parameters
+ ----------
+ tags : typing.Sequence[typing.Sequence[TagModel]]
+
+ page_size : typing.Optional[int]
+ The number of tasks to return per page.
+
+ cursor : typing.Optional[str]
+ Cursor for pagination, using the cursor from the previous page.
+
+ unclaimed_only : typing.Optional[bool]
+
+ include_instances : typing.Optional[bool]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.Optional[typing.Any]
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs, TagModel
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.list_unclaimed_reviews_v_1_speech_to_text_reviews_tasks_post(
+ tags=[
+ [
+ TagModel(
+ kind="lang",
+ value="value",
+ )
+ ]
+ ],
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v1/speech-to-text/reviews/tasks",
+ method="POST",
+ params={
+ "page_size": page_size,
+ "cursor": cursor,
+ },
+ json={
+ "tags": convert_and_respect_annotation_metadata(
+ object_=tags, annotation=typing.Sequence[typing.Sequence[TagModel]], direction="write"
+ ),
+ "unclaimed_only": unclaimed_only,
+ "include_instances": include_instances,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ typing.Optional[typing.Any],
+ construct_type(
+ type_=typing.Optional[typing.Any], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list_tasks_instances_for_a_user_v_1_speech_to_text_reviews_producers_user_id_tasks_get(
+ self,
+ user_id: str,
+ *,
+ page_size: typing.Optional[int] = None,
+ cursor: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> PaginatedListedReviewTaskInstanceModel:
+ """
+ Parameters
+ ----------
+ user_id : str
+
+ page_size : typing.Optional[int]
+ The number of tasks to return per page.
+
+ cursor : typing.Optional[str]
+ Cursor for pagination, using the cursor from the previous page.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PaginatedListedReviewTaskInstanceModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.list_tasks_instances_for_a_user_v_1_speech_to_text_reviews_producers_user_id_tasks_get(
+ user_id="user_id",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"v1/speech-to-text/reviews/producers/{jsonable_encoder(user_id)}/tasks",
+ method="GET",
+ params={
+ "page_size": page_size,
+ "cursor": cursor,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ PaginatedListedReviewTaskInstanceModel,
+ construct_type(
+ type_=PaginatedListedReviewTaskInstanceModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def compute_a_quote_for_a_asr_transcription_review_task_v_1_speech_to_text_reviews_get_quote_post(
+ self, *, request: QuoteRequestModel, request_options: typing.Optional[RequestOptions] = None
+ ) -> QuoteResponseModel:
+ """
+ Parameters
+ ----------
+ request : QuoteRequestModel
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ QuoteResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs, QuoteRequestModel
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.compute_a_quote_for_a_asr_transcription_review_task_v_1_speech_to_text_reviews_get_quote_post(
+ request=QuoteRequestModel(
+ content_hash="content_hash",
+ duration_s=1.1,
+ speaker_count=1,
+ language="language",
+ ),
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v1/speech-to-text/reviews/get-quote",
+ method="POST",
+ json=convert_and_respect_annotation_metadata(
+ object_=request, annotation=QuoteRequestModel, direction="write"
+ ),
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ QuoteResponseModel,
+ construct_type(
+ type_=QuoteResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
class AsyncBaseElevenLabs:
"""
@@ -207,6 +570,394 @@ def __init__(
self.speech_to_text = AsyncSpeechToTextClient(client_wrapper=self._client_wrapper)
self.conversational_ai = AsyncConversationalAiClient(client_wrapper=self._client_wrapper)
+ async def claim_a_task_v_1_speech_to_text_reviews_tasks_task_id_claim_post(
+ self, task_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.Optional[typing.Any]:
+ """
+ Parameters
+ ----------
+ task_id : str
+ The ID task to claim.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.Optional[typing.Any]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.claim_a_task_v_1_speech_to_text_reviews_tasks_task_id_claim_post(
+ task_id="task_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/speech-to-text/reviews/tasks/{jsonable_encoder(task_id)}/claim",
+ method="POST",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ typing.Optional[typing.Any],
+ construct_type(
+ type_=typing.Optional[typing.Any], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def submit_a_completed_task_v_1_speech_to_text_reviews_producers_user_id_tasks_task_id_submit_put(
+ self, user_id: str, task_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.Optional[typing.Any]:
+ """
+ Parameters
+ ----------
+ user_id : str
+
+ task_id : str
+ The ID task review to claim.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.Optional[typing.Any]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.submit_a_completed_task_v_1_speech_to_text_reviews_producers_user_id_tasks_task_id_submit_put(
+ user_id="user_id",
+ task_id="task_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/speech-to-text/reviews/producers/{jsonable_encoder(user_id)}/tasks/{jsonable_encoder(task_id)}/submit",
+ method="PUT",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ typing.Optional[typing.Any],
+ construct_type(
+ type_=typing.Optional[typing.Any], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list_unclaimed_reviews_v_1_speech_to_text_reviews_tasks_post(
+ self,
+ *,
+ tags: typing.Sequence[typing.Sequence[TagModel]],
+ page_size: typing.Optional[int] = None,
+ cursor: typing.Optional[str] = None,
+ unclaimed_only: typing.Optional[bool] = OMIT,
+ include_instances: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Optional[typing.Any]:
+ """
+ Parameters
+ ----------
+ tags : typing.Sequence[typing.Sequence[TagModel]]
+
+ page_size : typing.Optional[int]
+ The number of tasks to return per page.
+
+ cursor : typing.Optional[str]
+ Cursor for pagination, using the cursor from the previous page.
+
+ unclaimed_only : typing.Optional[bool]
+
+ include_instances : typing.Optional[bool]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.Optional[typing.Any]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs, TagModel
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.list_unclaimed_reviews_v_1_speech_to_text_reviews_tasks_post(
+ tags=[
+ [
+ TagModel(
+ kind="lang",
+ value="value",
+ )
+ ]
+ ],
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v1/speech-to-text/reviews/tasks",
+ method="POST",
+ params={
+ "page_size": page_size,
+ "cursor": cursor,
+ },
+ json={
+ "tags": convert_and_respect_annotation_metadata(
+ object_=tags, annotation=typing.Sequence[typing.Sequence[TagModel]], direction="write"
+ ),
+ "unclaimed_only": unclaimed_only,
+ "include_instances": include_instances,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ typing.Optional[typing.Any],
+ construct_type(
+ type_=typing.Optional[typing.Any], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list_tasks_instances_for_a_user_v_1_speech_to_text_reviews_producers_user_id_tasks_get(
+ self,
+ user_id: str,
+ *,
+ page_size: typing.Optional[int] = None,
+ cursor: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> PaginatedListedReviewTaskInstanceModel:
+ """
+ Parameters
+ ----------
+ user_id : str
+
+ page_size : typing.Optional[int]
+ The number of tasks to return per page.
+
+ cursor : typing.Optional[str]
+ Cursor for pagination, using the cursor from the previous page.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PaginatedListedReviewTaskInstanceModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.list_tasks_instances_for_a_user_v_1_speech_to_text_reviews_producers_user_id_tasks_get(
+ user_id="user_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"v1/speech-to-text/reviews/producers/{jsonable_encoder(user_id)}/tasks",
+ method="GET",
+ params={
+ "page_size": page_size,
+ "cursor": cursor,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ PaginatedListedReviewTaskInstanceModel,
+ construct_type(
+ type_=PaginatedListedReviewTaskInstanceModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def compute_a_quote_for_a_asr_transcription_review_task_v_1_speech_to_text_reviews_get_quote_post(
+ self, *, request: QuoteRequestModel, request_options: typing.Optional[RequestOptions] = None
+ ) -> QuoteResponseModel:
+ """
+ Parameters
+ ----------
+ request : QuoteRequestModel
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ QuoteResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs, QuoteRequestModel
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.compute_a_quote_for_a_asr_transcription_review_task_v_1_speech_to_text_reviews_get_quote_post(
+ request=QuoteRequestModel(
+ content_hash="content_hash",
+ duration_s=1.1,
+ speaker_count=1,
+ language="language",
+ ),
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v1/speech-to-text/reviews/get-quote",
+ method="POST",
+ json=convert_and_respect_annotation_metadata(
+ object_=request, annotation=QuoteRequestModel, direction="write"
+ ),
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ QuoteResponseModel,
+ construct_type(
+ type_=QuoteResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
def _get_base_url(*, base_url: typing.Optional[str] = None, environment: ElevenLabsEnvironment) -> str:
if base_url is not None:
diff --git a/src/elevenlabs/conversational_ai/__init__.py b/src/elevenlabs/conversational_ai/__init__.py
index a05e4b59..3f8438dc 100644
--- a/src/elevenlabs/conversational_ai/__init__.py
+++ b/src/elevenlabs/conversational_ai/__init__.py
@@ -4,10 +4,16 @@
BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem,
BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New,
BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored,
+ PatchConvaiSettingsRequestSecretsItem,
+ PatchConvaiSettingsRequestSecretsItem_New,
+ PatchConvaiSettingsRequestSecretsItem_Stored,
)
__all__ = [
"BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem",
"BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New",
"BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored",
+ "PatchConvaiSettingsRequestSecretsItem",
+ "PatchConvaiSettingsRequestSecretsItem_New",
+ "PatchConvaiSettingsRequestSecretsItem_Stored",
]
diff --git a/src/elevenlabs/conversational_ai/client.py b/src/elevenlabs/conversational_ai/client.py
index 6c6b8491..9b12641f 100644
--- a/src/elevenlabs/conversational_ai/client.py
+++ b/src/elevenlabs/conversational_ai/client.py
@@ -10,7 +10,7 @@
from json.decoder import JSONDecodeError
from ..core.api_error import ApiError
from ..types.conversational_config import ConversationalConfig
-from ..types.agent_platform_settings import AgentPlatformSettings
+from ..types.agent_platform_settings_request_model import AgentPlatformSettingsRequestModel
from ..types.create_agent_response_model import CreateAgentResponseModel
from ..core.serialization import convert_and_respect_annotation_metadata
from ..types.get_agent_response_model import GetAgentResponseModel
@@ -37,6 +37,12 @@
from ..types.tools_response_model import ToolsResponseModel
from ..types.tool_request_model import ToolRequestModel
from ..types.tool_response_model import ToolResponseModel
+from ..types.get_convai_settings_response_model import GetConvaiSettingsResponseModel
+from .types.patch_convai_settings_request_secrets_item import PatchConvaiSettingsRequestSecretsItem
+from ..types.conversation_initiation_client_data_webhook import ConversationInitiationClientDataWebhook
+from ..types.conv_ai_webhooks import ConvAiWebhooks
+from ..types.get_workspace_secrets_response_model import GetWorkspaceSecretsResponseModel
+from ..types.post_workspace_secret_response_model import PostWorkspaceSecretResponseModel
from ..core.client_wrapper import AsyncClientWrapper
# this is used as the default value for optional parameters
@@ -114,7 +120,7 @@ def create_agent(
*,
conversation_config: ConversationalConfig,
use_tool_ids: typing.Optional[bool] = None,
- platform_settings: typing.Optional[AgentPlatformSettings] = OMIT,
+ platform_settings: typing.Optional[AgentPlatformSettingsRequestModel] = OMIT,
name: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> CreateAgentResponseModel:
@@ -129,7 +135,7 @@ def create_agent(
use_tool_ids : typing.Optional[bool]
Use tool ids instead of tools specs from request payload.
- platform_settings : typing.Optional[AgentPlatformSettings]
+ platform_settings : typing.Optional[AgentPlatformSettingsRequestModel]
Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
name : typing.Optional[str]
@@ -165,7 +171,7 @@ def create_agent(
object_=conversation_config, annotation=ConversationalConfig, direction="write"
),
"platform_settings": convert_and_respect_annotation_metadata(
- object_=platform_settings, annotation=AgentPlatformSettings, direction="write"
+ object_=platform_settings, annotation=AgentPlatformSettingsRequestModel, direction="write"
),
"name": name,
},
@@ -647,8 +653,8 @@ def add_agent_secret(
)
client.conversational_ai.add_agent_secret(
agent_id="21m00Tcm4TlvDq8ikWAM",
- name="name",
- secret_value="secret_value",
+ name="MY API KEY",
+ secret_value="sk_api_12354abc",
)
"""
_response = self._client_wrapper.httpx_client.request(
@@ -1422,6 +1428,9 @@ def get_knowledge_base_list(
*,
cursor: typing.Optional[str] = None,
page_size: typing.Optional[int] = None,
+ search: typing.Optional[str] = None,
+ show_only_owned_documents: typing.Optional[bool] = None,
+ use_typesense: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> GetKnowledgeBaseListResponseModel:
"""
@@ -1435,6 +1444,15 @@ def get_knowledge_base_list(
page_size : typing.Optional[int]
How many documents to return at maximum. Can not exceed 100, defaults to 30.
+ search : typing.Optional[str]
+ If specified, the endpoint returns only such knowledge base documents whose names start with this string.
+
+ show_only_owned_documents : typing.Optional[bool]
+ If set to true, the endpoint will return only documents owned by you (and not shared from somebody else).
+
+ use_typesense : typing.Optional[bool]
+ If set to true, the endpoint will use typesense DB to search for the documents).
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1458,6 +1476,9 @@ def get_knowledge_base_list(
params={
"cursor": cursor,
"page_size": page_size,
+ "search": search,
+ "show_only_owned_documents": show_only_owned_documents,
+ "use_typesense": use_typesense,
},
request_options=request_options,
)
@@ -2070,6 +2091,278 @@ def update_tool(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ def get_settings(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetConvaiSettingsResponseModel:
+ """
+ Retrieve Convai settings for the workspace
+
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetConvaiSettingsResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_settings()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v1/convai/settings",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetConvaiSettingsResponseModel,
+ construct_type(
+ type_=GetConvaiSettingsResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_settings(
+ self,
+ *,
+ secrets: typing.Sequence[PatchConvaiSettingsRequestSecretsItem],
+ conversation_initiation_client_data_webhook: typing.Optional[ConversationInitiationClientDataWebhook] = OMIT,
+ webhooks: typing.Optional[ConvAiWebhooks] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetConvaiSettingsResponseModel:
+ """
+ Update Convai settings for the workspace
+
+ Parameters
+ ----------
+ secrets : typing.Sequence[PatchConvaiSettingsRequestSecretsItem]
+
+ conversation_initiation_client_data_webhook : typing.Optional[ConversationInitiationClientDataWebhook]
+
+ webhooks : typing.Optional[ConvAiWebhooks]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetConvaiSettingsResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+ from elevenlabs.conversational_ai import (
+ PatchConvaiSettingsRequestSecretsItem_New,
+ )
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.update_settings(
+ secrets=[
+ PatchConvaiSettingsRequestSecretsItem_New(
+ name="name",
+ value="value",
+ )
+ ],
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v1/convai/settings",
+ method="PATCH",
+ json={
+ "conversation_initiation_client_data_webhook": convert_and_respect_annotation_metadata(
+ object_=conversation_initiation_client_data_webhook,
+ annotation=ConversationInitiationClientDataWebhook,
+ direction="write",
+ ),
+ "webhooks": convert_and_respect_annotation_metadata(
+ object_=webhooks, annotation=ConvAiWebhooks, direction="write"
+ ),
+ "secrets": convert_and_respect_annotation_metadata(
+ object_=secrets,
+ annotation=typing.Sequence[PatchConvaiSettingsRequestSecretsItem],
+ direction="write",
+ ),
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetConvaiSettingsResponseModel,
+ construct_type(
+ type_=GetConvaiSettingsResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get_secrets(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetWorkspaceSecretsResponseModel:
+ """
+ Get all secrets for the workspace
+
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetWorkspaceSecretsResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.get_secrets()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v1/convai/secrets",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetWorkspaceSecretsResponseModel,
+ construct_type(
+ type_=GetWorkspaceSecretsResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def create_secret(
+ self, *, name: str, value: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> PostWorkspaceSecretResponseModel:
+ """
+ Create a new secret for the workspace
+
+ Parameters
+ ----------
+ name : str
+
+ value : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PostWorkspaceSecretResponseModel
+ Successful Response
+
+ Examples
+ --------
+ from elevenlabs import ElevenLabs
+
+ client = ElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+ client.conversational_ai.create_secret(
+ name="name",
+ value="value",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "v1/convai/secrets",
+ method="POST",
+ json={
+ "name": name,
+ "value": value,
+ "type": "new",
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ PostWorkspaceSecretResponseModel,
+ construct_type(
+ type_=PostWorkspaceSecretResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
class AsyncConversationalAiClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -2150,7 +2443,7 @@ async def create_agent(
*,
conversation_config: ConversationalConfig,
use_tool_ids: typing.Optional[bool] = None,
- platform_settings: typing.Optional[AgentPlatformSettings] = OMIT,
+ platform_settings: typing.Optional[AgentPlatformSettingsRequestModel] = OMIT,
name: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> CreateAgentResponseModel:
@@ -2165,7 +2458,7 @@ async def create_agent(
use_tool_ids : typing.Optional[bool]
Use tool ids instead of tools specs from request payload.
- platform_settings : typing.Optional[AgentPlatformSettings]
+ platform_settings : typing.Optional[AgentPlatformSettingsRequestModel]
Platform settings for the agent are all settings that aren't related to the conversation orchestration and content.
name : typing.Optional[str]
@@ -2209,7 +2502,7 @@ async def main() -> None:
object_=conversation_config, annotation=ConversationalConfig, direction="write"
),
"platform_settings": convert_and_respect_annotation_metadata(
- object_=platform_settings, annotation=AgentPlatformSettings, direction="write"
+ object_=platform_settings, annotation=AgentPlatformSettingsRequestModel, direction="write"
),
"name": name,
},
@@ -2744,8 +3037,8 @@ async def add_agent_secret(
async def main() -> None:
await client.conversational_ai.add_agent_secret(
agent_id="21m00Tcm4TlvDq8ikWAM",
- name="name",
- secret_value="secret_value",
+ name="MY API KEY",
+ secret_value="sk_api_12354abc",
)
@@ -3610,6 +3903,9 @@ async def get_knowledge_base_list(
*,
cursor: typing.Optional[str] = None,
page_size: typing.Optional[int] = None,
+ search: typing.Optional[str] = None,
+ show_only_owned_documents: typing.Optional[bool] = None,
+ use_typesense: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> GetKnowledgeBaseListResponseModel:
"""
@@ -3623,6 +3919,15 @@ async def get_knowledge_base_list(
page_size : typing.Optional[int]
How many documents to return at maximum. Can not exceed 100, defaults to 30.
+ search : typing.Optional[str]
+ If specified, the endpoint returns only such knowledge base documents whose names start with this string.
+
+ show_only_owned_documents : typing.Optional[bool]
+ If set to true, the endpoint will return only documents owned by you (and not shared from somebody else).
+
+ use_typesense : typing.Optional[bool]
+ If set to true, the endpoint will use typesense DB to search for the documents).
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -3654,6 +3959,9 @@ async def main() -> None:
params={
"cursor": cursor,
"page_size": page_size,
+ "search": search,
+ "show_only_owned_documents": show_only_owned_documents,
+ "use_typesense": use_typesense,
},
request_options=request_options,
)
@@ -4339,3 +4647,307 @@ async def main() -> None:
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_settings(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetConvaiSettingsResponseModel:
+ """
+ Retrieve Convai settings for the workspace
+
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetConvaiSettingsResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_settings()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v1/convai/settings",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetConvaiSettingsResponseModel,
+ construct_type(
+ type_=GetConvaiSettingsResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_settings(
+ self,
+ *,
+ secrets: typing.Sequence[PatchConvaiSettingsRequestSecretsItem],
+ conversation_initiation_client_data_webhook: typing.Optional[ConversationInitiationClientDataWebhook] = OMIT,
+ webhooks: typing.Optional[ConvAiWebhooks] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> GetConvaiSettingsResponseModel:
+ """
+ Update Convai settings for the workspace
+
+ Parameters
+ ----------
+ secrets : typing.Sequence[PatchConvaiSettingsRequestSecretsItem]
+
+ conversation_initiation_client_data_webhook : typing.Optional[ConversationInitiationClientDataWebhook]
+
+ webhooks : typing.Optional[ConvAiWebhooks]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetConvaiSettingsResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+ from elevenlabs.conversational_ai import (
+ PatchConvaiSettingsRequestSecretsItem_New,
+ )
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.update_settings(
+ secrets=[
+ PatchConvaiSettingsRequestSecretsItem_New(
+ name="name",
+ value="value",
+ )
+ ],
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v1/convai/settings",
+ method="PATCH",
+ json={
+ "conversation_initiation_client_data_webhook": convert_and_respect_annotation_metadata(
+ object_=conversation_initiation_client_data_webhook,
+ annotation=ConversationInitiationClientDataWebhook,
+ direction="write",
+ ),
+ "webhooks": convert_and_respect_annotation_metadata(
+ object_=webhooks, annotation=ConvAiWebhooks, direction="write"
+ ),
+ "secrets": convert_and_respect_annotation_metadata(
+ object_=secrets,
+ annotation=typing.Sequence[PatchConvaiSettingsRequestSecretsItem],
+ direction="write",
+ ),
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetConvaiSettingsResponseModel,
+ construct_type(
+ type_=GetConvaiSettingsResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_secrets(
+ self, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> GetWorkspaceSecretsResponseModel:
+ """
+ Get all secrets for the workspace
+
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ GetWorkspaceSecretsResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.get_secrets()
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v1/convai/secrets",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ GetWorkspaceSecretsResponseModel,
+ construct_type(
+ type_=GetWorkspaceSecretsResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def create_secret(
+ self, *, name: str, value: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> PostWorkspaceSecretResponseModel:
+ """
+ Create a new secret for the workspace
+
+ Parameters
+ ----------
+ name : str
+
+ value : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PostWorkspaceSecretResponseModel
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from elevenlabs import AsyncElevenLabs
+
+ client = AsyncElevenLabs(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.conversational_ai.create_secret(
+ name="name",
+ value="value",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "v1/convai/secrets",
+ method="POST",
+ json={
+ "name": name,
+ "value": value,
+ "type": "new",
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ PostWorkspaceSecretResponseModel,
+ construct_type(
+ type_=PostWorkspaceSecretResponseModel, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/elevenlabs/conversational_ai/types/__init__.py b/src/elevenlabs/conversational_ai/types/__init__.py
index 3d467b3a..48f22f3a 100644
--- a/src/elevenlabs/conversational_ai/types/__init__.py
+++ b/src/elevenlabs/conversational_ai/types/__init__.py
@@ -5,9 +5,17 @@
BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New,
BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored,
)
+from .patch_convai_settings_request_secrets_item import (
+ PatchConvaiSettingsRequestSecretsItem,
+ PatchConvaiSettingsRequestSecretsItem_New,
+ PatchConvaiSettingsRequestSecretsItem_Stored,
+)
__all__ = [
"BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem",
"BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New",
"BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored",
+ "PatchConvaiSettingsRequestSecretsItem",
+ "PatchConvaiSettingsRequestSecretsItem_New",
+ "PatchConvaiSettingsRequestSecretsItem_Stored",
]
diff --git a/src/elevenlabs/conversational_ai/types/patch_convai_settings_request_secrets_item.py b/src/elevenlabs/conversational_ai/types/patch_convai_settings_request_secrets_item.py
new file mode 100644
index 00000000..e5465b26
--- /dev/null
+++ b/src/elevenlabs/conversational_ai/types/patch_convai_settings_request_secrets_item.py
@@ -0,0 +1,45 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ...core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ...core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+import typing_extensions
+from ...core.unchecked_base_model import UnionMetadata
+
+
+class PatchConvaiSettingsRequestSecretsItem_New(UncheckedBaseModel):
+ type: typing.Literal["new"] = "new"
+ name: str
+ value: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class PatchConvaiSettingsRequestSecretsItem_Stored(UncheckedBaseModel):
+ type: typing.Literal["stored"] = "stored"
+ secret_id: str
+ name: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+PatchConvaiSettingsRequestSecretsItem = typing_extensions.Annotated[
+ typing.Union[PatchConvaiSettingsRequestSecretsItem_New, PatchConvaiSettingsRequestSecretsItem_Stored],
+ UnionMetadata(discriminant="type"),
+]
diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py
index 2efd1a97..f879b9f0 100644
--- a/src/elevenlabs/core/client_wrapper.py
+++ b/src/elevenlabs/core/client_wrapper.py
@@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "elevenlabs",
- "X-Fern-SDK-Version": "1.51.0",
+ "X-Fern-SDK-Version": "1.52.0",
}
if self._api_key is not None:
headers["xi-api-key"] = self._api_key
diff --git a/src/elevenlabs/errors/__init__.py b/src/elevenlabs/errors/__init__.py
index aa4eaab3..cb64e066 100644
--- a/src/elevenlabs/errors/__init__.py
+++ b/src/elevenlabs/errors/__init__.py
@@ -1,6 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
-from .bad_request_error import BadRequestError
from .unprocessable_entity_error import UnprocessableEntityError
-__all__ = ["BadRequestError", "UnprocessableEntityError"]
+__all__ = ["UnprocessableEntityError"]
diff --git a/src/elevenlabs/errors/bad_request_error.py b/src/elevenlabs/errors/bad_request_error.py
deleted file mode 100644
index 9c13c61f..00000000
--- a/src/elevenlabs/errors/bad_request_error.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.api_error import ApiError
-import typing
-
-
-class BadRequestError(ApiError):
- def __init__(self, body: typing.Optional[typing.Any]):
- super().__init__(status_code=400, body=body)
diff --git a/src/elevenlabs/projects/client.py b/src/elevenlabs/projects/client.py
index 43e1dea0..21a7f4b6 100644
--- a/src/elevenlabs/projects/client.py
+++ b/src/elevenlabs/projects/client.py
@@ -127,7 +127,7 @@ def create_podcast(
api_key="YOUR_API_KEY",
)
client.projects.create_podcast(
- model_id="model_id",
+ model_id="21m00Tcm4TlvDq8ikWAM",
mode=BodyCreatePodcastV1ProjectsPodcastCreatePostMode_Conversation(
conversation=PodcastConversationModeData(
host_voice_id="host_voice_id",
@@ -262,7 +262,6 @@ def add_project(
volume_normalization: typing.Optional[bool] = OMIT,
pronunciation_dictionary_locators: typing.Optional[typing.List[str]] = OMIT,
fiction: typing.Optional[AddProjectV1ProjectsAddPostRequestFiction] = OMIT,
- quality_check_on: typing.Optional[bool] = OMIT,
apply_text_normalization: typing.Optional[AddProjectV1ProjectsAddPostRequestApplyTextNormalization] = OMIT,
auto_convert: typing.Optional[bool] = OMIT,
auto_assign_voices: typing.Optional[bool] = OMIT,
@@ -341,9 +340,6 @@ def add_project(
fiction : typing.Optional[AddProjectV1ProjectsAddPostRequestFiction]
An optional specification of whether the content of this Studio project is fiction.
- quality_check_on : typing.Optional[bool]
- [Depracated] Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
-
apply_text_normalization : typing.Optional[AddProjectV1ProjectsAddPostRequestApplyTextNormalization]
This parameter controls text normalization with four modes: 'auto', 'on', 'apply_english' and 'off'.
@@ -404,7 +400,6 @@ def add_project(
"volume_normalization": volume_normalization,
"pronunciation_dictionary_locators": pronunciation_dictionary_locators,
"fiction": fiction,
- "quality_check_on": quality_check_on,
"apply_text_normalization": apply_text_normalization,
"auto_convert": auto_convert,
"auto_assign_voices": auto_assign_voices,
@@ -509,7 +504,6 @@ def edit_basic_project_info(
author: typing.Optional[str] = OMIT,
isbn_number: typing.Optional[str] = OMIT,
volume_normalization: typing.Optional[bool] = OMIT,
- quality_check_on: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> EditProjectResponseModel:
"""
@@ -541,9 +535,6 @@ def edit_basic_project_info(
volume_normalization : typing.Optional[bool]
When the Studio project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
- quality_check_on : typing.Optional[bool]
- [Depracated] Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -561,9 +552,9 @@ def edit_basic_project_info(
)
client.projects.edit_basic_project_info(
project_id="21m00Tcm4TlvDq8ikWAM",
- name="name",
- default_title_voice_id="default_title_voice_id",
- default_paragraph_voice_id="default_paragraph_voice_id",
+ name="Project 1",
+ default_title_voice_id="21m00Tcm4TlvDq8ikWAM",
+ default_paragraph_voice_id="21m00Tcm4TlvDq8ikWAM",
)
"""
_response = self._client_wrapper.httpx_client.request(
@@ -577,7 +568,6 @@ def edit_basic_project_info(
"author": author,
"isbn_number": isbn_number,
"volume_normalization": volume_normalization,
- "quality_check_on": quality_check_on,
},
headers={
"content-type": "application/json",
@@ -1297,7 +1287,7 @@ def add_chapter_to_a_project(
)
client.projects.add_chapter_to_a_project(
project_id="21m00Tcm4TlvDq8ikWAM",
- name="name",
+ name="Chapter 1",
)
"""
_response = self._client_wrapper.httpx_client.request(
@@ -1544,6 +1534,7 @@ def update_pronunciation_dictionaries(
project_id: str,
*,
pronunciation_dictionary_locators: typing.Sequence[PronunciationDictionaryVersionLocator],
+ invalidate_affected_text: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Optional[typing.Any]:
"""
@@ -1557,6 +1548,9 @@ def update_pronunciation_dictionaries(
pronunciation_dictionary_locators : typing.Sequence[PronunciationDictionaryVersionLocator]
A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first.
+ invalidate_affected_text : typing.Optional[bool]
+ This will automatically mark text in this project for reconversion when the new dictionary applies or the old one no longer does.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1591,6 +1585,7 @@ def update_pronunciation_dictionaries(
annotation=typing.Sequence[PronunciationDictionaryVersionLocator],
direction="write",
),
+ "invalidate_affected_text": invalidate_affected_text,
},
headers={
"content-type": "application/json",
@@ -1706,7 +1701,7 @@ async def create_podcast(
async def main() -> None:
await client.projects.create_podcast(
- model_id="model_id",
+ model_id="21m00Tcm4TlvDq8ikWAM",
mode=BodyCreatePodcastV1ProjectsPodcastCreatePostMode_Conversation(
conversation=PodcastConversationModeData(
host_voice_id="host_voice_id",
@@ -1852,7 +1847,6 @@ async def add_project(
volume_normalization: typing.Optional[bool] = OMIT,
pronunciation_dictionary_locators: typing.Optional[typing.List[str]] = OMIT,
fiction: typing.Optional[AddProjectV1ProjectsAddPostRequestFiction] = OMIT,
- quality_check_on: typing.Optional[bool] = OMIT,
apply_text_normalization: typing.Optional[AddProjectV1ProjectsAddPostRequestApplyTextNormalization] = OMIT,
auto_convert: typing.Optional[bool] = OMIT,
auto_assign_voices: typing.Optional[bool] = OMIT,
@@ -1931,9 +1925,6 @@ async def add_project(
fiction : typing.Optional[AddProjectV1ProjectsAddPostRequestFiction]
An optional specification of whether the content of this Studio project is fiction.
- quality_check_on : typing.Optional[bool]
- [Depracated] Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
-
apply_text_normalization : typing.Optional[AddProjectV1ProjectsAddPostRequestApplyTextNormalization]
This parameter controls text normalization with four modes: 'auto', 'on', 'apply_english' and 'off'.
@@ -2002,7 +1993,6 @@ async def main() -> None:
"volume_normalization": volume_normalization,
"pronunciation_dictionary_locators": pronunciation_dictionary_locators,
"fiction": fiction,
- "quality_check_on": quality_check_on,
"apply_text_normalization": apply_text_normalization,
"auto_convert": auto_convert,
"auto_assign_voices": auto_assign_voices,
@@ -2115,7 +2105,6 @@ async def edit_basic_project_info(
author: typing.Optional[str] = OMIT,
isbn_number: typing.Optional[str] = OMIT,
volume_normalization: typing.Optional[bool] = OMIT,
- quality_check_on: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> EditProjectResponseModel:
"""
@@ -2147,9 +2136,6 @@ async def edit_basic_project_info(
volume_normalization : typing.Optional[bool]
When the Studio project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
- quality_check_on : typing.Optional[bool]
- [Depracated] Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -2172,9 +2158,9 @@ async def edit_basic_project_info(
async def main() -> None:
await client.projects.edit_basic_project_info(
project_id="21m00Tcm4TlvDq8ikWAM",
- name="name",
- default_title_voice_id="default_title_voice_id",
- default_paragraph_voice_id="default_paragraph_voice_id",
+ name="Project 1",
+ default_title_voice_id="21m00Tcm4TlvDq8ikWAM",
+ default_paragraph_voice_id="21m00Tcm4TlvDq8ikWAM",
)
@@ -2191,7 +2177,6 @@ async def main() -> None:
"author": author,
"isbn_number": isbn_number,
"volume_normalization": volume_normalization,
- "quality_check_on": quality_check_on,
},
headers={
"content-type": "application/json",
@@ -2988,7 +2973,7 @@ async def add_chapter_to_a_project(
async def main() -> None:
await client.projects.add_chapter_to_a_project(
project_id="21m00Tcm4TlvDq8ikWAM",
- name="name",
+ name="Chapter 1",
)
@@ -3262,6 +3247,7 @@ async def update_pronunciation_dictionaries(
project_id: str,
*,
pronunciation_dictionary_locators: typing.Sequence[PronunciationDictionaryVersionLocator],
+ invalidate_affected_text: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Optional[typing.Any]:
"""
@@ -3275,6 +3261,9 @@ async def update_pronunciation_dictionaries(
pronunciation_dictionary_locators : typing.Sequence[PronunciationDictionaryVersionLocator]
A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first.
+ invalidate_affected_text : typing.Optional[bool]
+ This will automatically mark text in this project for reconversion when the new dictionary applies or the old one no longer does.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -3317,6 +3306,7 @@ async def main() -> None:
annotation=typing.Sequence[PronunciationDictionaryVersionLocator],
direction="write",
),
+ "invalidate_affected_text": invalidate_affected_text,
},
headers={
"content-type": "application/json",
diff --git a/src/elevenlabs/samples/client.py b/src/elevenlabs/samples/client.py
index 96b8df90..5be73d99 100644
--- a/src/elevenlabs/samples/client.py
+++ b/src/elevenlabs/samples/client.py
@@ -3,6 +3,7 @@
from ..core.client_wrapper import SyncClientWrapper
import typing
from ..core.request_options import RequestOptions
+from ..types.delete_sample_response_model import DeleteSampleResponseModel
from ..core.jsonable_encoder import jsonable_encoder
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
@@ -18,7 +19,7 @@ def __init__(self, *, client_wrapper: SyncClientWrapper):
def delete(
self, voice_id: str, sample_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> typing.Optional[typing.Any]:
+ ) -> DeleteSampleResponseModel:
"""
Removes a sample by its ID.
@@ -35,7 +36,7 @@ def delete(
Returns
-------
- typing.Optional[typing.Any]
+ DeleteSampleResponseModel
Successful Response
Examples
@@ -58,9 +59,9 @@ def delete(
try:
if 200 <= _response.status_code < 300:
return typing.cast(
- typing.Optional[typing.Any],
+ DeleteSampleResponseModel,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=DeleteSampleResponseModel, # type: ignore
object_=_response.json(),
),
)
@@ -147,7 +148,7 @@ def __init__(self, *, client_wrapper: AsyncClientWrapper):
async def delete(
self, voice_id: str, sample_id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> typing.Optional[typing.Any]:
+ ) -> DeleteSampleResponseModel:
"""
Removes a sample by its ID.
@@ -164,7 +165,7 @@ async def delete(
Returns
-------
- typing.Optional[typing.Any]
+ DeleteSampleResponseModel
Successful Response
Examples
@@ -195,9 +196,9 @@ async def main() -> None:
try:
if 200 <= _response.status_code < 300:
return typing.cast(
- typing.Optional[typing.Any],
+ DeleteSampleResponseModel,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=DeleteSampleResponseModel, # type: ignore
object_=_response.json(),
),
)
diff --git a/src/elevenlabs/speech_to_speech/client.py b/src/elevenlabs/speech_to_speech/client.py
index ed4366a9..207f087f 100644
--- a/src/elevenlabs/speech_to_speech/client.py
+++ b/src/elevenlabs/speech_to_speech/client.py
@@ -36,7 +36,7 @@ def convert(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[bytes]:
"""
- Create speech by combining the content and emotion of the uploaded audio with a voice of your choice.
+ Transform audio from one voice to another. Maintain full control over emotion, timing and delivery.
Parameters
----------
@@ -80,7 +80,7 @@ def convert(
Yields
------
typing.Iterator[bytes]
- Successful Response
+ The generated audio file
Examples
--------
@@ -152,7 +152,7 @@ def convert_as_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[bytes]:
"""
- Create speech by combining the content and emotion of the uploaded audio with a voice of your choice and returns an audio stream.
+ Stream audio from one voice to another. Maintain full control over emotion, timing and delivery.
Parameters
----------
@@ -196,7 +196,7 @@ def convert_as_stream(
Yields
------
typing.Iterator[bytes]
- Successful Response
+ Streaming audio data
Examples
--------
@@ -273,7 +273,7 @@ async def convert(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[bytes]:
"""
- Create speech by combining the content and emotion of the uploaded audio with a voice of your choice.
+ Transform audio from one voice to another. Maintain full control over emotion, timing and delivery.
Parameters
----------
@@ -317,7 +317,7 @@ async def convert(
Yields
------
typing.AsyncIterator[bytes]
- Successful Response
+ The generated audio file
Examples
--------
@@ -397,7 +397,7 @@ async def convert_as_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[bytes]:
"""
- Create speech by combining the content and emotion of the uploaded audio with a voice of your choice and returns an audio stream.
+ Stream audio from one voice to another. Maintain full control over emotion, timing and delivery.
Parameters
----------
@@ -441,7 +441,7 @@ async def convert_as_stream(
Yields
------
typing.AsyncIterator[bytes]
- Successful Response
+ Streaming audio data
Examples
--------
diff --git a/src/elevenlabs/speech_to_text/__init__.py b/src/elevenlabs/speech_to_text/__init__.py
index f3ea2659..8439be26 100644
--- a/src/elevenlabs/speech_to_text/__init__.py
+++ b/src/elevenlabs/speech_to_text/__init__.py
@@ -1,2 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
+from .types import SpeechToTextConvertRequestTimestampsGranularity
+
+__all__ = ["SpeechToTextConvertRequestTimestampsGranularity"]
diff --git a/src/elevenlabs/speech_to_text/client.py b/src/elevenlabs/speech_to_text/client.py
index aac393df..674f31d1 100644
--- a/src/elevenlabs/speech_to_text/client.py
+++ b/src/elevenlabs/speech_to_text/client.py
@@ -3,6 +3,7 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
from .. import core
+from .types.speech_to_text_convert_request_timestamps_granularity import SpeechToTextConvertRequestTimestampsGranularity
from ..core.request_options import RequestOptions
from ..types.speech_to_text_chunk_response_model import SpeechToTextChunkResponseModel
from ..core.unchecked_base_model import construct_type
@@ -10,9 +11,6 @@
from ..types.http_validation_error import HttpValidationError
from json.decoder import JSONDecodeError
from ..core.api_error import ApiError
-from ..types.speech_to_text_stream_response_model import SpeechToTextStreamResponseModel
-import json
-from ..errors.bad_request_error import BadRequestError
from ..core.client_wrapper import AsyncClientWrapper
# this is used as the default value for optional parameters
@@ -27,10 +25,12 @@ def convert(
self,
*,
model_id: str,
- file: typing.Optional[core.File] = OMIT,
+ file: core.File,
language_code: typing.Optional[str] = OMIT,
tag_audio_events: typing.Optional[bool] = OMIT,
num_speakers: typing.Optional[int] = OMIT,
+ timestamps_granularity: typing.Optional[SpeechToTextConvertRequestTimestampsGranularity] = OMIT,
+ diarize: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> SpeechToTextChunkResponseModel:
"""
@@ -41,7 +41,7 @@ def convert(
model_id : str
The ID of the model to use for transcription, currently only 'scribe_v1' is available.
- file : typing.Optional[core.File]
+ file : core.File
See core.File for more documentation
language_code : typing.Optional[str]
@@ -51,7 +51,13 @@ def convert(
Whether to tag audio events like (laughter), (footsteps), etc. in the transcription.
num_speakers : typing.Optional[int]
- The maximum amount of speakers talking in the uploaded file. Can help with predicting who speaks when. The maximum amount of speakers that can be predicted is 31. Defaults to null, in this case the amount of speakers is set to the maximum value the model supports.
+ The maximum amount of speakers talking in the uploaded file. Can help with predicting who speaks when. The maximum amount of speakers that can be predicted is 32. Defaults to null, in this case the amount of speakers is set to the maximum value the model supports.
+
+ timestamps_granularity : typing.Optional[SpeechToTextConvertRequestTimestampsGranularity]
+ The granularity of the timestamps in the transcription. 'word' provides word-level timestamps and 'character' provides character-level timestamps per word.
+
+ diarize : typing.Optional[bool]
+ Whether to annotate which speaker is currently talking in the uploaded file. Enabling this will limit the maximum duration of your inputs to 8 minutes.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -80,6 +86,8 @@ def convert(
"language_code": language_code,
"tag_audio_events": tag_audio_events,
"num_speakers": num_speakers,
+ "timestamps_granularity": timestamps_granularity,
+ "diarize": diarize,
},
files={
"file": file,
@@ -111,114 +119,6 @@ def convert(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def convert_as_stream(
- self,
- *,
- model_id: str,
- file: typing.Optional[core.File] = OMIT,
- language_code: typing.Optional[str] = OMIT,
- tag_audio_events: typing.Optional[bool] = OMIT,
- num_speakers: typing.Optional[int] = OMIT,
- request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.Iterator[SpeechToTextStreamResponseModel]:
- """
- Transcribe an audio or video file with streaming response. Returns chunks of transcription as they become available, with each chunk separated by double newlines (\n\n).
-
- Parameters
- ----------
- model_id : str
- The ID of the model to use for transcription, currently only 'scribe_v1' is available.
-
- file : typing.Optional[core.File]
- See core.File for more documentation
-
- language_code : typing.Optional[str]
- An ISO-639-1 or ISO-639-3 language_code corresponding to the language of the audio file. Can sometimes improve transcription performance if known beforehand. Defaults to null, in this case the language is predicted automatically.
-
- tag_audio_events : typing.Optional[bool]
- Whether to tag audio events like (laughter), (footsteps), etc. in the transcription.
-
- num_speakers : typing.Optional[int]
- The maximum amount of speakers talking in the uploaded file. Can help with predicting who speaks when. The maximum amount of speakers that can be predicted is 31. Defaults to null, in this case the amount of speakers is set to the maximum value the model supports.
-
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
-
- Yields
- ------
- typing.Iterator[SpeechToTextStreamResponseModel]
- Stream of transcription chunks
-
- Examples
- --------
- from elevenlabs import ElevenLabs
-
- client = ElevenLabs(
- api_key="YOUR_API_KEY",
- )
- response = client.speech_to_text.convert_as_stream(
- model_id="model_id",
- )
- for chunk in response:
- yield chunk
- """
- with self._client_wrapper.httpx_client.stream(
- "v1/speech-to-text/stream",
- method="POST",
- data={
- "model_id": model_id,
- "language_code": language_code,
- "tag_audio_events": tag_audio_events,
- "num_speakers": num_speakers,
- },
- files={
- "file": file,
- },
- request_options=request_options,
- omit=OMIT,
- ) as _response:
- try:
- if 200 <= _response.status_code < 300:
- for _text in _response.iter_lines():
- try:
- if len(_text) == 0:
- continue
- yield typing.cast(
- SpeechToTextStreamResponseModel,
- construct_type(
- type_=SpeechToTextStreamResponseModel, # type: ignore
- object_=json.loads(_text),
- ),
- )
- except:
- pass
- return
- _response.read()
- if _response.status_code == 400:
- raise BadRequestError(
- typing.cast(
- typing.Optional[typing.Any],
- construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
- object_=_response.json(),
- ),
- )
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
-
class AsyncSpeechToTextClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -228,10 +128,12 @@ async def convert(
self,
*,
model_id: str,
- file: typing.Optional[core.File] = OMIT,
+ file: core.File,
language_code: typing.Optional[str] = OMIT,
tag_audio_events: typing.Optional[bool] = OMIT,
num_speakers: typing.Optional[int] = OMIT,
+ timestamps_granularity: typing.Optional[SpeechToTextConvertRequestTimestampsGranularity] = OMIT,
+ diarize: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> SpeechToTextChunkResponseModel:
"""
@@ -242,7 +144,7 @@ async def convert(
model_id : str
The ID of the model to use for transcription, currently only 'scribe_v1' is available.
- file : typing.Optional[core.File]
+ file : core.File
See core.File for more documentation
language_code : typing.Optional[str]
@@ -252,7 +154,13 @@ async def convert(
Whether to tag audio events like (laughter), (footsteps), etc. in the transcription.
num_speakers : typing.Optional[int]
- The maximum amount of speakers talking in the uploaded file. Can help with predicting who speaks when. The maximum amount of speakers that can be predicted is 31. Defaults to null, in this case the amount of speakers is set to the maximum value the model supports.
+ The maximum amount of speakers talking in the uploaded file. Can help with predicting who speaks when. The maximum amount of speakers that can be predicted is 32. Defaults to null, in this case the amount of speakers is set to the maximum value the model supports.
+
+ timestamps_granularity : typing.Optional[SpeechToTextConvertRequestTimestampsGranularity]
+ The granularity of the timestamps in the transcription. 'word' provides word-level timestamps and 'character' provides character-level timestamps per word.
+
+ diarize : typing.Optional[bool]
+ Whether to annotate which speaker is currently talking in the uploaded file. Enabling this will limit the maximum duration of your inputs to 8 minutes.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -289,6 +197,8 @@ async def main() -> None:
"language_code": language_code,
"tag_audio_events": tag_audio_events,
"num_speakers": num_speakers,
+ "timestamps_granularity": timestamps_granularity,
+ "diarize": diarize,
},
files={
"file": file,
@@ -319,119 +229,3 @@ async def main() -> None:
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
-
- async def convert_as_stream(
- self,
- *,
- model_id: str,
- file: typing.Optional[core.File] = OMIT,
- language_code: typing.Optional[str] = OMIT,
- tag_audio_events: typing.Optional[bool] = OMIT,
- num_speakers: typing.Optional[int] = OMIT,
- request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.AsyncIterator[SpeechToTextStreamResponseModel]:
- """
- Transcribe an audio or video file with streaming response. Returns chunks of transcription as they become available, with each chunk separated by double newlines (\n\n).
-
- Parameters
- ----------
- model_id : str
- The ID of the model to use for transcription, currently only 'scribe_v1' is available.
-
- file : typing.Optional[core.File]
- See core.File for more documentation
-
- language_code : typing.Optional[str]
- An ISO-639-1 or ISO-639-3 language_code corresponding to the language of the audio file. Can sometimes improve transcription performance if known beforehand. Defaults to null, in this case the language is predicted automatically.
-
- tag_audio_events : typing.Optional[bool]
- Whether to tag audio events like (laughter), (footsteps), etc. in the transcription.
-
- num_speakers : typing.Optional[int]
- The maximum amount of speakers talking in the uploaded file. Can help with predicting who speaks when. The maximum amount of speakers that can be predicted is 31. Defaults to null, in this case the amount of speakers is set to the maximum value the model supports.
-
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
-
- Yields
- ------
- typing.AsyncIterator[SpeechToTextStreamResponseModel]
- Stream of transcription chunks
-
- Examples
- --------
- import asyncio
-
- from elevenlabs import AsyncElevenLabs
-
- client = AsyncElevenLabs(
- api_key="YOUR_API_KEY",
- )
-
-
- async def main() -> None:
- response = await client.speech_to_text.convert_as_stream(
- model_id="model_id",
- )
- async for chunk in response:
- yield chunk
-
-
- asyncio.run(main())
- """
- async with self._client_wrapper.httpx_client.stream(
- "v1/speech-to-text/stream",
- method="POST",
- data={
- "model_id": model_id,
- "language_code": language_code,
- "tag_audio_events": tag_audio_events,
- "num_speakers": num_speakers,
- },
- files={
- "file": file,
- },
- request_options=request_options,
- omit=OMIT,
- ) as _response:
- try:
- if 200 <= _response.status_code < 300:
- async for _text in _response.aiter_lines():
- try:
- if len(_text) == 0:
- continue
- yield typing.cast(
- SpeechToTextStreamResponseModel,
- construct_type(
- type_=SpeechToTextStreamResponseModel, # type: ignore
- object_=json.loads(_text),
- ),
- )
- except:
- pass
- return
- await _response.aread()
- if _response.status_code == 400:
- raise BadRequestError(
- typing.cast(
- typing.Optional[typing.Any],
- construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
- object_=_response.json(),
- ),
- )
- )
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/elevenlabs/speech_to_text/types/__init__.py b/src/elevenlabs/speech_to_text/types/__init__.py
new file mode 100644
index 00000000..56a43da3
--- /dev/null
+++ b/src/elevenlabs/speech_to_text/types/__init__.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .speech_to_text_convert_request_timestamps_granularity import SpeechToTextConvertRequestTimestampsGranularity
+
+__all__ = ["SpeechToTextConvertRequestTimestampsGranularity"]
diff --git a/src/elevenlabs/speech_to_text/types/speech_to_text_convert_request_timestamps_granularity.py b/src/elevenlabs/speech_to_text/types/speech_to_text_convert_request_timestamps_granularity.py
new file mode 100644
index 00000000..18d12c43
--- /dev/null
+++ b/src/elevenlabs/speech_to_text/types/speech_to_text_convert_request_timestamps_granularity.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SpeechToTextConvertRequestTimestampsGranularity = typing.Union[typing.Literal["word", "character"], typing.Any]
diff --git a/src/elevenlabs/studio/chapters/client.py b/src/elevenlabs/studio/chapters/client.py
index 6b52ba0a..14c0e512 100644
--- a/src/elevenlabs/studio/chapters/client.py
+++ b/src/elevenlabs/studio/chapters/client.py
@@ -124,7 +124,7 @@ def create(
)
client.studio.chapters.create(
project_id="21m00Tcm4TlvDq8ikWAM",
- name="name",
+ name="Chapter 1",
)
"""
_response = self._client_wrapper.httpx_client.request(
@@ -693,7 +693,7 @@ async def create(
async def main() -> None:
await client.studio.chapters.create(
project_id="21m00Tcm4TlvDq8ikWAM",
- name="name",
+ name="Chapter 1",
)
diff --git a/src/elevenlabs/studio/client.py b/src/elevenlabs/studio/client.py
index 7594aa62..7086fbe6 100644
--- a/src/elevenlabs/studio/client.py
+++ b/src/elevenlabs/studio/client.py
@@ -108,7 +108,7 @@ def create_podcast(
api_key="YOUR_API_KEY",
)
client.studio.create_podcast(
- model_id="model_id",
+ model_id="21m00Tcm4TlvDq8ikWAM",
mode=BodyCreatePodcastV1StudioPodcastsPostMode_Conversation(
conversation=PodcastConversationModeData(
host_voice_id="host_voice_id",
@@ -253,7 +253,7 @@ async def create_podcast(
async def main() -> None:
await client.studio.create_podcast(
- model_id="model_id",
+ model_id="21m00Tcm4TlvDq8ikWAM",
mode=BodyCreatePodcastV1StudioPodcastsPostMode_Conversation(
conversation=PodcastConversationModeData(
host_voice_id="host_voice_id",
diff --git a/src/elevenlabs/studio/projects/client.py b/src/elevenlabs/studio/projects/client.py
index 07ad6e77..b0a4d936 100644
--- a/src/elevenlabs/studio/projects/client.py
+++ b/src/elevenlabs/studio/projects/client.py
@@ -107,7 +107,6 @@ def add(
pronunciation_dictionary_locators: typing.Optional[typing.List[str]] = OMIT,
callback_url: typing.Optional[str] = OMIT,
fiction: typing.Optional[ProjectsAddRequestFiction] = OMIT,
- quality_check_on: typing.Optional[bool] = OMIT,
apply_text_normalization: typing.Optional[ProjectsAddRequestApplyTextNormalization] = OMIT,
auto_convert: typing.Optional[bool] = OMIT,
auto_assign_voices: typing.Optional[bool] = OMIT,
@@ -189,9 +188,6 @@ def add(
fiction : typing.Optional[ProjectsAddRequestFiction]
An optional specification of whether the content of this Studio project is fiction.
- quality_check_on : typing.Optional[bool]
- [Depracated] Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
-
apply_text_normalization : typing.Optional[ProjectsAddRequestApplyTextNormalization]
This parameter controls text normalization with four modes: 'auto', 'on', 'apply_english' and 'off'.
@@ -253,7 +249,6 @@ def add(
"pronunciation_dictionary_locators": pronunciation_dictionary_locators,
"callback_url": callback_url,
"fiction": fiction,
- "quality_check_on": quality_check_on,
"apply_text_normalization": apply_text_normalization,
"auto_convert": auto_convert,
"auto_assign_voices": auto_assign_voices,
@@ -358,7 +353,6 @@ def update_metadata(
author: typing.Optional[str] = OMIT,
isbn_number: typing.Optional[str] = OMIT,
volume_normalization: typing.Optional[bool] = OMIT,
- quality_check_on: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> EditProjectResponseModel:
"""
@@ -390,9 +384,6 @@ def update_metadata(
volume_normalization : typing.Optional[bool]
When the Studio project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
- quality_check_on : typing.Optional[bool]
- [Depracated] Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -410,9 +401,9 @@ def update_metadata(
)
client.studio.projects.update_metadata(
project_id="21m00Tcm4TlvDq8ikWAM",
- name="name",
- default_title_voice_id="default_title_voice_id",
- default_paragraph_voice_id="default_paragraph_voice_id",
+ name="Project 1",
+ default_title_voice_id="21m00Tcm4TlvDq8ikWAM",
+ default_paragraph_voice_id="21m00Tcm4TlvDq8ikWAM",
)
"""
_response = self._client_wrapper.httpx_client.request(
@@ -426,7 +417,6 @@ def update_metadata(
"author": author,
"isbn_number": isbn_number,
"volume_normalization": volume_normalization,
- "quality_check_on": quality_check_on,
},
headers={
"content-type": "application/json",
@@ -849,6 +839,7 @@ def update_pronunciation_dictionaries(
project_id: str,
*,
pronunciation_dictionary_locators: typing.Sequence[PronunciationDictionaryVersionLocator],
+ invalidate_affected_text: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Optional[typing.Any]:
"""
@@ -862,6 +853,9 @@ def update_pronunciation_dictionaries(
pronunciation_dictionary_locators : typing.Sequence[PronunciationDictionaryVersionLocator]
A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first.
+ invalidate_affected_text : typing.Optional[bool]
+ This will automatically mark text in this project for reconversion when the new dictionary applies or the old one no longer does.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -896,6 +890,7 @@ def update_pronunciation_dictionaries(
annotation=typing.Sequence[PronunciationDictionaryVersionLocator],
direction="write",
),
+ "invalidate_affected_text": invalidate_affected_text,
},
headers={
"content-type": "application/json",
@@ -1017,7 +1012,6 @@ async def add(
pronunciation_dictionary_locators: typing.Optional[typing.List[str]] = OMIT,
callback_url: typing.Optional[str] = OMIT,
fiction: typing.Optional[ProjectsAddRequestFiction] = OMIT,
- quality_check_on: typing.Optional[bool] = OMIT,
apply_text_normalization: typing.Optional[ProjectsAddRequestApplyTextNormalization] = OMIT,
auto_convert: typing.Optional[bool] = OMIT,
auto_assign_voices: typing.Optional[bool] = OMIT,
@@ -1099,9 +1093,6 @@ async def add(
fiction : typing.Optional[ProjectsAddRequestFiction]
An optional specification of whether the content of this Studio project is fiction.
- quality_check_on : typing.Optional[bool]
- [Depracated] Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
-
apply_text_normalization : typing.Optional[ProjectsAddRequestApplyTextNormalization]
This parameter controls text normalization with four modes: 'auto', 'on', 'apply_english' and 'off'.
@@ -1171,7 +1162,6 @@ async def main() -> None:
"pronunciation_dictionary_locators": pronunciation_dictionary_locators,
"callback_url": callback_url,
"fiction": fiction,
- "quality_check_on": quality_check_on,
"apply_text_normalization": apply_text_normalization,
"auto_convert": auto_convert,
"auto_assign_voices": auto_assign_voices,
@@ -1284,7 +1274,6 @@ async def update_metadata(
author: typing.Optional[str] = OMIT,
isbn_number: typing.Optional[str] = OMIT,
volume_normalization: typing.Optional[bool] = OMIT,
- quality_check_on: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> EditProjectResponseModel:
"""
@@ -1316,9 +1305,6 @@ async def update_metadata(
volume_normalization : typing.Optional[bool]
When the Studio project is downloaded, should the returned audio have postprocessing in order to make it compliant with audiobook normalized volume requirements
- quality_check_on : typing.Optional[bool]
- [Depracated] Whether to run quality check on the generated audio and regenerate if needed. Applies to individual block conversion.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1341,9 +1327,9 @@ async def update_metadata(
async def main() -> None:
await client.studio.projects.update_metadata(
project_id="21m00Tcm4TlvDq8ikWAM",
- name="name",
- default_title_voice_id="default_title_voice_id",
- default_paragraph_voice_id="default_paragraph_voice_id",
+ name="Project 1",
+ default_title_voice_id="21m00Tcm4TlvDq8ikWAM",
+ default_paragraph_voice_id="21m00Tcm4TlvDq8ikWAM",
)
@@ -1360,7 +1346,6 @@ async def main() -> None:
"author": author,
"isbn_number": isbn_number,
"volume_normalization": volume_normalization,
- "quality_check_on": quality_check_on,
},
headers={
"content-type": "application/json",
@@ -1831,6 +1816,7 @@ async def update_pronunciation_dictionaries(
project_id: str,
*,
pronunciation_dictionary_locators: typing.Sequence[PronunciationDictionaryVersionLocator],
+ invalidate_affected_text: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Optional[typing.Any]:
"""
@@ -1844,6 +1830,9 @@ async def update_pronunciation_dictionaries(
pronunciation_dictionary_locators : typing.Sequence[PronunciationDictionaryVersionLocator]
A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first.
+ invalidate_affected_text : typing.Optional[bool]
+ This will automatically mark text in this project for reconversion when the new dictionary applies or the old one no longer does.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1886,6 +1875,7 @@ async def main() -> None:
annotation=typing.Sequence[PronunciationDictionaryVersionLocator],
direction="write",
),
+ "invalidate_affected_text": invalidate_affected_text,
},
headers={
"content-type": "application/json",
diff --git a/src/elevenlabs/text_to_speech/__init__.py b/src/elevenlabs/text_to_speech/__init__.py
index 2ec7be88..3fadc417 100644
--- a/src/elevenlabs/text_to_speech/__init__.py
+++ b/src/elevenlabs/text_to_speech/__init__.py
@@ -5,9 +5,6 @@
BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization,
BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization,
BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization,
- TextToSpeechStreamWithTimestampsResponse,
- TextToSpeechStreamWithTimestampsResponseAlignment,
- TextToSpeechStreamWithTimestampsResponseNormalizedAlignment,
)
__all__ = [
@@ -15,7 +12,4 @@
"BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization",
"BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization",
"BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization",
- "TextToSpeechStreamWithTimestampsResponse",
- "TextToSpeechStreamWithTimestampsResponseAlignment",
- "TextToSpeechStreamWithTimestampsResponseNormalizedAlignment",
]
diff --git a/src/elevenlabs/text_to_speech/client.py b/src/elevenlabs/text_to_speech/client.py
index b99902cb..78116583 100644
--- a/src/elevenlabs/text_to_speech/client.py
+++ b/src/elevenlabs/text_to_speech/client.py
@@ -19,13 +19,14 @@
from .types.body_text_to_speech_with_timestamps_v_1_text_to_speech_voice_id_with_timestamps_post_apply_text_normalization import (
BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization,
)
+from ..types.audio_with_timestamps_response_model import AudioWithTimestampsResponseModel
from .types.body_text_to_speech_streaming_v_1_text_to_speech_voice_id_stream_post_apply_text_normalization import (
BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization,
)
from .types.body_text_to_speech_streaming_with_timestamps_v_1_text_to_speech_voice_id_stream_with_timestamps_post_apply_text_normalization import (
BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization,
)
-from .types.text_to_speech_stream_with_timestamps_response import TextToSpeechStreamWithTimestampsResponse
+from ..types.streaming_audio_chunk_with_timestamps_response_model import StreamingAudioChunkWithTimestampsResponseModel
import json
from ..core.client_wrapper import AsyncClientWrapper
@@ -105,16 +106,16 @@ def convert(
If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
- The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+ The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
next_text : typing.Optional[str]
- The text that comes after the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+ The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
previous_request_ids : typing.Optional[typing.Sequence[str]]
- A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
+ A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
next_request_ids : typing.Optional[typing.Sequence[str]]
- A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+ A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
use_pvc_as_ivc : typing.Optional[bool]
If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
@@ -128,7 +129,7 @@ def convert(
Yields
------
typing.Iterator[bytes]
- Successful Response
+ The generated audio file
Examples
--------
@@ -224,9 +225,9 @@ def convert_with_timestamps(
BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization
] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.Optional[typing.Any]:
+ ) -> AudioWithTimestampsResponseModel:
"""
- Converts text into speech using a voice of your choice and returns JSON containing audio as a base64 encoded string together with information on when which character was spoken.
+ Generate speech from text with precise character-level timing information for audio-text synchronization.
Parameters
----------
@@ -268,16 +269,16 @@ def convert_with_timestamps(
If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
- The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+ The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
next_text : typing.Optional[str]
- The text that comes after the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+ The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
previous_request_ids : typing.Optional[typing.Sequence[str]]
- A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
+ A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
next_request_ids : typing.Optional[typing.Sequence[str]]
- A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+ A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
use_pvc_as_ivc : typing.Optional[bool]
If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
@@ -290,7 +291,7 @@ def convert_with_timestamps(
Returns
-------
- typing.Optional[typing.Any]
+ AudioWithTimestampsResponseModel
Successful Response
Examples
@@ -301,10 +302,8 @@ def convert_with_timestamps(
api_key="YOUR_API_KEY",
)
client.text_to_speech.convert_with_timestamps(
- voice_id="JBFqnCBsd6RMkjVDRZzb",
- output_format="mp3_44100_128",
- text="The first move is what sets everything in motion.",
- model_id="eleven_multilingual_v2",
+ voice_id="21m00Tcm4TlvDq8ikWAM",
+ text="This is a test for the API of ElevenLabs.",
)
"""
_response = self._client_wrapper.httpx_client.request(
@@ -344,9 +343,9 @@ def convert_with_timestamps(
try:
if 200 <= _response.status_code < 300:
return typing.cast(
- typing.Optional[typing.Any],
+ AudioWithTimestampsResponseModel,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=AudioWithTimestampsResponseModel, # type: ignore
object_=_response.json(),
),
)
@@ -433,16 +432,16 @@ def convert_as_stream(
If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
- The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+ The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
next_text : typing.Optional[str]
- The text that comes after the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+ The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
previous_request_ids : typing.Optional[typing.Sequence[str]]
- A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
+ A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
next_request_ids : typing.Optional[typing.Sequence[str]]
- A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+ A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
use_pvc_as_ivc : typing.Optional[bool]
If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
@@ -456,7 +455,7 @@ def convert_as_stream(
Yields
------
typing.Iterator[bytes]
- Successful Response
+ Streaming audio data
Examples
--------
@@ -552,7 +551,7 @@ def stream_with_timestamps(
BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization
] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.Iterator[TextToSpeechStreamWithTimestampsResponse]:
+ ) -> typing.Iterator[StreamingAudioChunkWithTimestampsResponseModel]:
"""
Converts text into speech using a voice of your choice and returns a stream of JSONs containing audio as a base64 encoded string together with information on when which character was spoken.
@@ -596,16 +595,16 @@ def stream_with_timestamps(
If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
- The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+ The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
next_text : typing.Optional[str]
- The text that comes after the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+ The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
previous_request_ids : typing.Optional[typing.Sequence[str]]
- A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
+ A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
next_request_ids : typing.Optional[typing.Sequence[str]]
- A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+ A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
use_pvc_as_ivc : typing.Optional[bool]
If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
@@ -618,7 +617,7 @@ def stream_with_timestamps(
Yields
------
- typing.Iterator[TextToSpeechStreamWithTimestampsResponse]
+ typing.Iterator[StreamingAudioChunkWithTimestampsResponseModel]
Stream of JSON objects containing audio chunks and character timing information
Examples
@@ -678,9 +677,9 @@ def stream_with_timestamps(
if len(_text) == 0:
continue
yield typing.cast(
- TextToSpeechStreamWithTimestampsResponse,
+ StreamingAudioChunkWithTimestampsResponseModel,
construct_type(
- type_=TextToSpeechStreamWithTimestampsResponse, # type: ignore
+ type_=StreamingAudioChunkWithTimestampsResponseModel, # type: ignore
object_=json.loads(_text),
),
)
@@ -776,16 +775,16 @@ async def convert(
If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
- The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+ The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
next_text : typing.Optional[str]
- The text that comes after the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+ The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
previous_request_ids : typing.Optional[typing.Sequence[str]]
- A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
+ A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
next_request_ids : typing.Optional[typing.Sequence[str]]
- A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+ A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
use_pvc_as_ivc : typing.Optional[bool]
If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
@@ -799,7 +798,7 @@ async def convert(
Yields
------
typing.AsyncIterator[bytes]
- Successful Response
+ The generated audio file
Examples
--------
@@ -903,9 +902,9 @@ async def convert_with_timestamps(
BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization
] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.Optional[typing.Any]:
+ ) -> AudioWithTimestampsResponseModel:
"""
- Converts text into speech using a voice of your choice and returns JSON containing audio as a base64 encoded string together with information on when which character was spoken.
+ Generate speech from text with precise character-level timing information for audio-text synchronization.
Parameters
----------
@@ -947,16 +946,16 @@ async def convert_with_timestamps(
If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
- The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+ The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
next_text : typing.Optional[str]
- The text that comes after the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+ The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
previous_request_ids : typing.Optional[typing.Sequence[str]]
- A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
+ A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
next_request_ids : typing.Optional[typing.Sequence[str]]
- A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+ A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
use_pvc_as_ivc : typing.Optional[bool]
If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
@@ -969,7 +968,7 @@ async def convert_with_timestamps(
Returns
-------
- typing.Optional[typing.Any]
+ AudioWithTimestampsResponseModel
Successful Response
Examples
@@ -985,10 +984,8 @@ async def convert_with_timestamps(
async def main() -> None:
await client.text_to_speech.convert_with_timestamps(
- voice_id="JBFqnCBsd6RMkjVDRZzb",
- output_format="mp3_44100_128",
- text="The first move is what sets everything in motion.",
- model_id="eleven_multilingual_v2",
+ voice_id="21m00Tcm4TlvDq8ikWAM",
+ text="This is a test for the API of ElevenLabs.",
)
@@ -1031,9 +1028,9 @@ async def main() -> None:
try:
if 200 <= _response.status_code < 300:
return typing.cast(
- typing.Optional[typing.Any],
+ AudioWithTimestampsResponseModel,
construct_type(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=AudioWithTimestampsResponseModel, # type: ignore
object_=_response.json(),
),
)
@@ -1120,16 +1117,16 @@ async def convert_as_stream(
If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
- The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+ The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
next_text : typing.Optional[str]
- The text that comes after the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+ The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
previous_request_ids : typing.Optional[typing.Sequence[str]]
- A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
+ A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
next_request_ids : typing.Optional[typing.Sequence[str]]
- A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+ A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
use_pvc_as_ivc : typing.Optional[bool]
If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
@@ -1143,7 +1140,7 @@ async def convert_as_stream(
Yields
------
typing.AsyncIterator[bytes]
- Successful Response
+ Streaming audio data
Examples
--------
@@ -1247,7 +1244,7 @@ async def stream_with_timestamps(
BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization
] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.AsyncIterator[TextToSpeechStreamWithTimestampsResponse]:
+ ) -> typing.AsyncIterator[StreamingAudioChunkWithTimestampsResponseModel]:
"""
Converts text into speech using a voice of your choice and returns a stream of JSONs containing audio as a base64 encoded string together with information on when which character was spoken.
@@ -1291,16 +1288,16 @@ async def stream_with_timestamps(
If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
previous_text : typing.Optional[str]
- The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+ The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
next_text : typing.Optional[str]
- The text that comes after the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.
+ The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.
previous_request_ids : typing.Optional[typing.Sequence[str]]
- A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
+ A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.
next_request_ids : typing.Optional[typing.Sequence[str]]
- A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
+ A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
use_pvc_as_ivc : typing.Optional[bool]
If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
@@ -1313,7 +1310,7 @@ async def stream_with_timestamps(
Yields
------
- typing.AsyncIterator[TextToSpeechStreamWithTimestampsResponse]
+ typing.AsyncIterator[StreamingAudioChunkWithTimestampsResponseModel]
Stream of JSON objects containing audio chunks and character timing information
Examples
@@ -1381,9 +1378,9 @@ async def main() -> None:
if len(_text) == 0:
continue
yield typing.cast(
- TextToSpeechStreamWithTimestampsResponse,
+ StreamingAudioChunkWithTimestampsResponseModel,
construct_type(
- type_=TextToSpeechStreamWithTimestampsResponse, # type: ignore
+ type_=StreamingAudioChunkWithTimestampsResponseModel, # type: ignore
object_=json.loads(_text),
),
)
diff --git a/src/elevenlabs/text_to_speech/types/__init__.py b/src/elevenlabs/text_to_speech/types/__init__.py
index 527c7242..b05354ee 100644
--- a/src/elevenlabs/text_to_speech/types/__init__.py
+++ b/src/elevenlabs/text_to_speech/types/__init__.py
@@ -12,18 +12,10 @@
from .body_text_to_speech_with_timestamps_v_1_text_to_speech_voice_id_with_timestamps_post_apply_text_normalization import (
BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization,
)
-from .text_to_speech_stream_with_timestamps_response import TextToSpeechStreamWithTimestampsResponse
-from .text_to_speech_stream_with_timestamps_response_alignment import TextToSpeechStreamWithTimestampsResponseAlignment
-from .text_to_speech_stream_with_timestamps_response_normalized_alignment import (
- TextToSpeechStreamWithTimestampsResponseNormalizedAlignment,
-)
__all__ = [
"BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization",
"BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization",
"BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization",
"BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization",
- "TextToSpeechStreamWithTimestampsResponse",
- "TextToSpeechStreamWithTimestampsResponseAlignment",
- "TextToSpeechStreamWithTimestampsResponseNormalizedAlignment",
]
diff --git a/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response.py b/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response.py
deleted file mode 100644
index 07c25399..00000000
--- a/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ...core.unchecked_base_model import UncheckedBaseModel
-import typing_extensions
-import typing
-from ...core.serialization import FieldMetadata
-import pydantic
-from .text_to_speech_stream_with_timestamps_response_alignment import TextToSpeechStreamWithTimestampsResponseAlignment
-from .text_to_speech_stream_with_timestamps_response_normalized_alignment import (
- TextToSpeechStreamWithTimestampsResponseNormalizedAlignment,
-)
-from ...core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class TextToSpeechStreamWithTimestampsResponse(UncheckedBaseModel):
- audio_base_64: typing_extensions.Annotated[typing.Optional[str], FieldMetadata(alias="audio_base64")] = (
- pydantic.Field(default=None)
- )
- """
- Base64 encoded audio chunk
- """
-
- alignment: typing.Optional[TextToSpeechStreamWithTimestampsResponseAlignment] = None
- normalized_alignment: typing.Optional[TextToSpeechStreamWithTimestampsResponseNormalizedAlignment] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response_alignment.py b/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response_alignment.py
deleted file mode 100644
index f8230552..00000000
--- a/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response_alignment.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ...core.unchecked_base_model import UncheckedBaseModel
-import typing
-import pydantic
-from ...core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class TextToSpeechStreamWithTimestampsResponseAlignment(UncheckedBaseModel):
- characters: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
- """
- Array of individual characters from the input text
- """
-
- character_start_times_seconds: typing.Optional[typing.List[float]] = pydantic.Field(default=None)
- """
- Array of start times (in seconds) for each character
- """
-
- character_end_times_seconds: typing.Optional[typing.List[float]] = pydantic.Field(default=None)
- """
- Array of end times (in seconds) for each character
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response_normalized_alignment.py b/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response_normalized_alignment.py
deleted file mode 100644
index 2982e649..00000000
--- a/src/elevenlabs/text_to_speech/types/text_to_speech_stream_with_timestamps_response_normalized_alignment.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ...core.unchecked_base_model import UncheckedBaseModel
-import typing
-import pydantic
-from ...core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class TextToSpeechStreamWithTimestampsResponseNormalizedAlignment(UncheckedBaseModel):
- characters: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
- """
- Array of individual characters from the normalized text
- """
-
- character_start_times_seconds: typing.Optional[typing.List[float]] = pydantic.Field(default=None)
- """
- Array of start times (in seconds) for each normalized character
- """
-
- character_end_times_seconds: typing.Optional[typing.List[float]] = pydantic.Field(default=None)
- """
- Array of end times (in seconds) for each normalized character
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/__init__.py b/src/elevenlabs/types/__init__.py
index fd51e3cc..818d153c 100644
--- a/src/elevenlabs/types/__init__.py
+++ b/src/elevenlabs/types/__init__.py
@@ -16,9 +16,9 @@
from .agent_config_override import AgentConfigOverride
from .agent_config_override_config import AgentConfigOverrideConfig
from .agent_metadata_response_model import AgentMetadataResponseModel
-from .agent_platform_settings import AgentPlatformSettings
+from .agent_platform_settings_request_model import AgentPlatformSettingsRequestModel
+from .agent_platform_settings_response_model import AgentPlatformSettingsResponseModel
from .agent_summary_response_model import AgentSummaryResponseModel
-from .agent_summary_response_model_access_level import AgentSummaryResponseModelAccessLevel
from .allowlist_item import AllowlistItem
from .array_json_schema_property import ArrayJsonSchemaProperty
from .array_json_schema_property_items import ArrayJsonSchemaPropertyItems
@@ -28,6 +28,8 @@
from .asr_quality import AsrQuality
from .audio_native_create_project_response_model import AudioNativeCreateProjectResponseModel
from .audio_native_edit_content_response_model import AudioNativeEditContentResponseModel
+from .audio_native_project_settings_response_model import AudioNativeProjectSettingsResponseModel
+from .audio_with_timestamps_response_model import AudioWithTimestampsResponseModel
from .auth_settings import AuthSettings
from .authorization_method import AuthorizationMethod
from .ban_reason_type import BanReasonType
@@ -57,11 +59,25 @@
from .chapter_statistics_response import ChapterStatisticsResponse
from .chapter_with_content_response_model import ChapterWithContentResponseModel
from .chapter_with_content_response_model_state import ChapterWithContentResponseModelState
+from .character_alignment_response_model import CharacterAlignmentResponseModel
from .client_event import ClientEvent
from .client_tool_config import ClientToolConfig
from .conv_ai_new_secret_config import ConvAiNewSecretConfig
from .conv_ai_secret_locator import ConvAiSecretLocator
from .conv_ai_stored_secret_config import ConvAiStoredSecretConfig
+from .conv_ai_stored_secret_dependencies import ConvAiStoredSecretDependencies
+from .conv_ai_stored_secret_dependencies_agents_item import (
+ ConvAiStoredSecretDependenciesAgentsItem,
+ ConvAiStoredSecretDependenciesAgentsItem_Available,
+ ConvAiStoredSecretDependenciesAgentsItem_Unknown,
+)
+from .conv_ai_stored_secret_dependencies_tools_item import (
+ ConvAiStoredSecretDependenciesToolsItem,
+ ConvAiStoredSecretDependenciesToolsItem_Available,
+ ConvAiStoredSecretDependenciesToolsItem_Unknown,
+)
+from .conv_ai_webhooks import ConvAiWebhooks
+from .conv_ai_workspace_stored_secret_config import ConvAiWorkspaceStoredSecretConfig
from .conversation_charging_common_model import ConversationChargingCommonModel
from .conversation_config import ConversationConfig
from .conversation_config_client_override import ConversationConfigClientOverride
@@ -82,6 +98,10 @@
from .conversation_initiation_client_data_dynamic_variables_value import (
ConversationInitiationClientDataDynamicVariablesValue,
)
+from .conversation_initiation_client_data_webhook import ConversationInitiationClientDataWebhook
+from .conversation_initiation_client_data_webhook_request_headers_value import (
+ ConversationInitiationClientDataWebhookRequestHeadersValue,
+)
from .conversation_signed_url_response_model import ConversationSignedUrlResponseModel
from .conversation_summary_response_model import ConversationSummaryResponseModel
from .conversation_summary_response_model_status import ConversationSummaryResponseModelStatus
@@ -93,9 +113,13 @@
from .currency import Currency
from .custom_llm import CustomLlm
from .data_collection_result_common_model import DataCollectionResultCommonModel
+from .delete_sample_response_model import DeleteSampleResponseModel
from .dependent_available_agent_identifier import DependentAvailableAgentIdentifier
from .dependent_available_agent_identifier_access_level import DependentAvailableAgentIdentifierAccessLevel
+from .dependent_available_tool_identifier import DependentAvailableToolIdentifier
+from .dependent_available_tool_identifier_access_level import DependentAvailableToolIdentifierAccessLevel
from .dependent_unknown_agent_identifier import DependentUnknownAgentIdentifier
+from .dependent_unknown_tool_identifier import DependentUnknownToolIdentifier
from .do_dubbing_response import DoDubbingResponse
from .dubbing_media_metadata import DubbingMediaMetadata
from .dubbing_metadata_response import DubbingMetadataResponse
@@ -121,7 +145,9 @@
from .get_agent_link_response_model import GetAgentLinkResponseModel
from .get_agent_response_model import GetAgentResponseModel
from .get_agents_page_response_model import GetAgentsPageResponseModel
+from .get_audio_native_project_settings_response_model import GetAudioNativeProjectSettingsResponseModel
from .get_chapters_response import GetChaptersResponse
+from .get_convai_settings_response_model import GetConvaiSettingsResponseModel
from .get_conversation_response_model import GetConversationResponseModel
from .get_conversation_response_model_status import GetConversationResponseModelStatus
from .get_conversations_page_response_model import GetConversationsPageResponseModel
@@ -150,6 +176,7 @@
from .get_pronunciation_dictionary_metadata_response import GetPronunciationDictionaryMetadataResponse
from .get_speech_history_response import GetSpeechHistoryResponse
from .get_voices_response import GetVoicesResponse
+from .get_workspace_secrets_response_model import GetWorkspaceSecretsResponseModel
from .history_alignment_response_model import HistoryAlignmentResponseModel
from .history_alignments_response_model import HistoryAlignmentsResponseModel
from .history_item import HistoryItem
@@ -178,6 +205,7 @@
from .object_json_schema_property_properties_value import ObjectJsonSchemaPropertyPropertiesValue
from .orb_avatar import OrbAvatar
from .output_format import OutputFormat
+from .paginated_listed_review_task_instance_model import PaginatedListedReviewTaskInstanceModel
from .phone_number_agent_info import PhoneNumberAgentInfo
from .podcast_bulletin_mode import PodcastBulletinMode
from .podcast_bulletin_mode_data import PodcastBulletinModeData
@@ -187,6 +215,7 @@
from .podcast_text_source import PodcastTextSource
from .podcast_url_source import PodcastUrlSource
from .post_agent_avatar_response_model import PostAgentAvatarResponseModel
+from .post_workspace_secret_response_model import PostWorkspaceSecretResponseModel
from .privacy_config import PrivacyConfig
from .profile_page_response_model import ProfilePageResponseModel
from .project_creation_meta_response_model import ProjectCreationMetaResponseModel
@@ -223,28 +252,42 @@
from .pronunciation_dictionary_version_response_model import PronunciationDictionaryVersionResponseModel
from .pydantic_pronunciation_dictionary_version_locator import PydanticPronunciationDictionaryVersionLocator
from .query_params_json_schema import QueryParamsJsonSchema
+from .quote_request_model import QuoteRequestModel
+from .quote_response_model import QuoteResponseModel
from .reader_resource_response_model import ReaderResourceResponseModel
from .reader_resource_response_model_resource_type import ReaderResourceResponseModelResourceType
from .recording_response import RecordingResponse
from .remove_pronunciation_dictionary_rules_response_model import RemovePronunciationDictionaryRulesResponseModel
+from .resource_access_info import ResourceAccessInfo
+from .resource_access_info_role import ResourceAccessInfoRole
+from .review_state import ReviewState
from .review_status import ReviewStatus
-from .safety import Safety
+from .review_task_instance_response_model import ReviewTaskInstanceResponseModel
+from .safety_common_model import SafetyCommonModel
from .safety_evaluation import SafetyEvaluation
+from .safety_response_model import SafetyResponseModel
from .safety_rule import SafetyRule
+from .secret_dependency_type import SecretDependencyType
from .speech_history_item_response import SpeechHistoryItemResponse
from .speech_history_item_response_model_source import SpeechHistoryItemResponseModelSource
from .speech_history_item_response_model_voice_category import SpeechHistoryItemResponseModelVoiceCategory
+from .speech_to_text_character_response_model import SpeechToTextCharacterResponseModel
from .speech_to_text_chunk_response_model import SpeechToTextChunkResponseModel
-from .speech_to_text_stream_response_model import SpeechToTextStreamResponseModel
from .speech_to_text_word_response_model import SpeechToTextWordResponseModel
from .speech_to_text_word_response_model_type import SpeechToTextWordResponseModelType
+from .streaming_audio_chunk_with_timestamps_response_model import StreamingAudioChunkWithTimestampsResponseModel
from .subscription import Subscription
from .subscription_response import SubscriptionResponse
from .subscription_response_model_billing_period import SubscriptionResponseModelBillingPeriod
from .subscription_response_model_character_refresh_period import SubscriptionResponseModelCharacterRefreshPeriod
from .subscription_response_model_currency import SubscriptionResponseModelCurrency
from .subscription_status import SubscriptionStatus
+from .subscription_usage_response_model import SubscriptionUsageResponseModel
from .system_tool_config import SystemToolConfig
+from .tag_kind import TagKind
+from .tag_model import TagModel
+from .task_instance_event_kind import TaskInstanceEventKind
+from .task_instance_event_response_model import TaskInstanceEventResponseModel
from .telephony_provider import TelephonyProvider
from .text_to_speech_as_stream_request import TextToSpeechAsStreamRequest
from .tool_request_model import ToolRequestModel
@@ -283,6 +326,7 @@
from .validation_error import ValidationError
from .validation_error_loc_item import ValidationErrorLocItem
from .verification_attempt_response import VerificationAttemptResponse
+from .verified_voice_language_response_model import VerifiedVoiceLanguageResponseModel
from .voice import Voice
from .voice_generation_parameter_option_response import VoiceGenerationParameterOptionResponse
from .voice_generation_parameter_response import VoiceGenerationParameterResponse
@@ -315,6 +359,7 @@
WidgetConfigResponseModelAvatar_Orb,
WidgetConfigResponseModelAvatar_Url,
)
+from .widget_expandable import WidgetExpandable
from .widget_feedback_mode import WidgetFeedbackMode
from .workspace_group_by_name_response_model import WorkspaceGroupByNameResponseModel
@@ -335,9 +380,9 @@
"AgentConfigOverride",
"AgentConfigOverrideConfig",
"AgentMetadataResponseModel",
- "AgentPlatformSettings",
+ "AgentPlatformSettingsRequestModel",
+ "AgentPlatformSettingsResponseModel",
"AgentSummaryResponseModel",
- "AgentSummaryResponseModelAccessLevel",
"AllowlistItem",
"ArrayJsonSchemaProperty",
"ArrayJsonSchemaPropertyItems",
@@ -347,6 +392,8 @@
"AsrQuality",
"AudioNativeCreateProjectResponseModel",
"AudioNativeEditContentResponseModel",
+ "AudioNativeProjectSettingsResponseModel",
+ "AudioWithTimestampsResponseModel",
"AuthSettings",
"AuthorizationMethod",
"BanReasonType",
@@ -370,11 +417,21 @@
"ChapterStatisticsResponse",
"ChapterWithContentResponseModel",
"ChapterWithContentResponseModelState",
+ "CharacterAlignmentResponseModel",
"ClientEvent",
"ClientToolConfig",
"ConvAiNewSecretConfig",
"ConvAiSecretLocator",
"ConvAiStoredSecretConfig",
+ "ConvAiStoredSecretDependencies",
+ "ConvAiStoredSecretDependenciesAgentsItem",
+ "ConvAiStoredSecretDependenciesAgentsItem_Available",
+ "ConvAiStoredSecretDependenciesAgentsItem_Unknown",
+ "ConvAiStoredSecretDependenciesToolsItem",
+ "ConvAiStoredSecretDependenciesToolsItem_Available",
+ "ConvAiStoredSecretDependenciesToolsItem_Unknown",
+ "ConvAiWebhooks",
+ "ConvAiWorkspaceStoredSecretConfig",
"ConversationChargingCommonModel",
"ConversationConfig",
"ConversationConfigClientOverride",
@@ -391,6 +448,8 @@
"ConversationInitiationClientData",
"ConversationInitiationClientDataConfig",
"ConversationInitiationClientDataDynamicVariablesValue",
+ "ConversationInitiationClientDataWebhook",
+ "ConversationInitiationClientDataWebhookRequestHeadersValue",
"ConversationSignedUrlResponseModel",
"ConversationSummaryResponseModel",
"ConversationSummaryResponseModelStatus",
@@ -402,9 +461,13 @@
"Currency",
"CustomLlm",
"DataCollectionResultCommonModel",
+ "DeleteSampleResponseModel",
"DependentAvailableAgentIdentifier",
"DependentAvailableAgentIdentifierAccessLevel",
+ "DependentAvailableToolIdentifier",
+ "DependentAvailableToolIdentifierAccessLevel",
"DependentUnknownAgentIdentifier",
+ "DependentUnknownToolIdentifier",
"DoDubbingResponse",
"DubbingMediaMetadata",
"DubbingMetadataResponse",
@@ -426,7 +489,9 @@
"GetAgentLinkResponseModel",
"GetAgentResponseModel",
"GetAgentsPageResponseModel",
+ "GetAudioNativeProjectSettingsResponseModel",
"GetChaptersResponse",
+ "GetConvaiSettingsResponseModel",
"GetConversationResponseModel",
"GetConversationResponseModelStatus",
"GetConversationsPageResponseModel",
@@ -451,6 +516,7 @@
"GetPronunciationDictionaryMetadataResponse",
"GetSpeechHistoryResponse",
"GetVoicesResponse",
+ "GetWorkspaceSecretsResponseModel",
"HistoryAlignmentResponseModel",
"HistoryAlignmentsResponseModel",
"HistoryItem",
@@ -479,6 +545,7 @@
"ObjectJsonSchemaPropertyPropertiesValue",
"OrbAvatar",
"OutputFormat",
+ "PaginatedListedReviewTaskInstanceModel",
"PhoneNumberAgentInfo",
"PodcastBulletinMode",
"PodcastBulletinModeData",
@@ -488,6 +555,7 @@
"PodcastTextSource",
"PodcastUrlSource",
"PostAgentAvatarResponseModel",
+ "PostWorkspaceSecretResponseModel",
"PrivacyConfig",
"ProfilePageResponseModel",
"ProjectCreationMetaResponseModel",
@@ -522,28 +590,42 @@
"PronunciationDictionaryVersionResponseModel",
"PydanticPronunciationDictionaryVersionLocator",
"QueryParamsJsonSchema",
+ "QuoteRequestModel",
+ "QuoteResponseModel",
"ReaderResourceResponseModel",
"ReaderResourceResponseModelResourceType",
"RecordingResponse",
"RemovePronunciationDictionaryRulesResponseModel",
+ "ResourceAccessInfo",
+ "ResourceAccessInfoRole",
+ "ReviewState",
"ReviewStatus",
- "Safety",
+ "ReviewTaskInstanceResponseModel",
+ "SafetyCommonModel",
"SafetyEvaluation",
+ "SafetyResponseModel",
"SafetyRule",
+ "SecretDependencyType",
"SpeechHistoryItemResponse",
"SpeechHistoryItemResponseModelSource",
"SpeechHistoryItemResponseModelVoiceCategory",
+ "SpeechToTextCharacterResponseModel",
"SpeechToTextChunkResponseModel",
- "SpeechToTextStreamResponseModel",
"SpeechToTextWordResponseModel",
"SpeechToTextWordResponseModelType",
+ "StreamingAudioChunkWithTimestampsResponseModel",
"Subscription",
"SubscriptionResponse",
"SubscriptionResponseModelBillingPeriod",
"SubscriptionResponseModelCharacterRefreshPeriod",
"SubscriptionResponseModelCurrency",
"SubscriptionStatus",
+ "SubscriptionUsageResponseModel",
"SystemToolConfig",
+ "TagKind",
+ "TagModel",
+ "TaskInstanceEventKind",
+ "TaskInstanceEventResponseModel",
"TelephonyProvider",
"TextToSpeechAsStreamRequest",
"ToolRequestModel",
@@ -576,6 +658,7 @@
"ValidationError",
"ValidationErrorLocItem",
"VerificationAttemptResponse",
+ "VerifiedVoiceLanguageResponseModel",
"Voice",
"VoiceGenerationParameterOptionResponse",
"VoiceGenerationParameterResponse",
@@ -604,6 +687,7 @@
"WidgetConfigResponseModelAvatar_Image",
"WidgetConfigResponseModelAvatar_Orb",
"WidgetConfigResponseModelAvatar_Url",
+ "WidgetExpandable",
"WidgetFeedbackMode",
"WorkspaceGroupByNameResponseModel",
]
diff --git a/src/elevenlabs/types/agent_platform_settings.py b/src/elevenlabs/types/agent_platform_settings_request_model.py
similarity index 89%
rename from src/elevenlabs/types/agent_platform_settings.py
rename to src/elevenlabs/types/agent_platform_settings_request_model.py
index a7f89b14..cbf41e45 100644
--- a/src/elevenlabs/types/agent_platform_settings.py
+++ b/src/elevenlabs/types/agent_platform_settings_request_model.py
@@ -9,13 +9,13 @@
from .conversation_initiation_client_data_config import ConversationInitiationClientDataConfig
from .agent_call_limits import AgentCallLimits
from .agent_ban import AgentBan
-from .safety import Safety
from .privacy_config import PrivacyConfig
+from .safety_common_model import SafetyCommonModel
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import pydantic
-class AgentPlatformSettings(UncheckedBaseModel):
+class AgentPlatformSettingsRequestModel(UncheckedBaseModel):
auth: typing.Optional[AuthSettings] = None
evaluation: typing.Optional[EvaluationSettings] = None
widget: typing.Optional[WidgetConfig] = None
@@ -23,8 +23,8 @@ class AgentPlatformSettings(UncheckedBaseModel):
overrides: typing.Optional[ConversationInitiationClientDataConfig] = None
call_limits: typing.Optional[AgentCallLimits] = None
ban: typing.Optional[AgentBan] = None
- safety: typing.Optional[Safety] = None
privacy: typing.Optional[PrivacyConfig] = None
+ safety: typing.Optional[SafetyCommonModel] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/agent_platform_settings_response_model.py b/src/elevenlabs/types/agent_platform_settings_response_model.py
new file mode 100644
index 00000000..bf11cd89
--- /dev/null
+++ b/src/elevenlabs/types/agent_platform_settings_response_model.py
@@ -0,0 +1,36 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .auth_settings import AuthSettings
+from .evaluation_settings import EvaluationSettings
+from .widget_config import WidgetConfig
+from .literal_json_schema_property import LiteralJsonSchemaProperty
+from .conversation_initiation_client_data_config import ConversationInitiationClientDataConfig
+from .agent_call_limits import AgentCallLimits
+from .agent_ban import AgentBan
+from .privacy_config import PrivacyConfig
+from .safety_response_model import SafetyResponseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentPlatformSettingsResponseModel(UncheckedBaseModel):
+ auth: typing.Optional[AuthSettings] = None
+ evaluation: typing.Optional[EvaluationSettings] = None
+ widget: typing.Optional[WidgetConfig] = None
+ data_collection: typing.Optional[typing.Dict[str, LiteralJsonSchemaProperty]] = None
+ overrides: typing.Optional[ConversationInitiationClientDataConfig] = None
+ call_limits: typing.Optional[AgentCallLimits] = None
+ ban: typing.Optional[AgentBan] = None
+ privacy: typing.Optional[PrivacyConfig] = None
+ safety: typing.Optional[SafetyResponseModel] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/agent_summary_response_model.py b/src/elevenlabs/types/agent_summary_response_model.py
index 9e19c6c1..fc9a631c 100644
--- a/src/elevenlabs/types/agent_summary_response_model.py
+++ b/src/elevenlabs/types/agent_summary_response_model.py
@@ -1,7 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_summary_response_model_access_level import AgentSummaryResponseModelAccessLevel
+from .resource_access_info import ResourceAccessInfo
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import typing
import pydantic
@@ -11,7 +11,7 @@ class AgentSummaryResponseModel(UncheckedBaseModel):
agent_id: str
name: str
created_at_unix_secs: int
- access_level: AgentSummaryResponseModelAccessLevel
+ access_info: ResourceAccessInfo
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/agent_summary_response_model_access_level.py b/src/elevenlabs/types/agent_summary_response_model_access_level.py
deleted file mode 100644
index f555e924..00000000
--- a/src/elevenlabs/types/agent_summary_response_model_access_level.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-AgentSummaryResponseModelAccessLevel = typing.Union[typing.Literal["admin", "editor", "viewer"], typing.Any]
diff --git a/src/elevenlabs/types/audio_native_project_settings_response_model.py b/src/elevenlabs/types/audio_native_project_settings_response_model.py
new file mode 100644
index 00000000..0b6c5742
--- /dev/null
+++ b/src/elevenlabs/types/audio_native_project_settings_response_model.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class AudioNativeProjectSettingsResponseModel(UncheckedBaseModel):
+ title: str
+ image: str
+ author: str
+ small: bool
+ text_color: str
+ background_color: str
+ sessionization: int
+ audio_path: str
+ audio_url: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/audio_with_timestamps_response_model.py b/src/elevenlabs/types/audio_with_timestamps_response_model.py
new file mode 100644
index 00000000..beac8471
--- /dev/null
+++ b/src/elevenlabs/types/audio_with_timestamps_response_model.py
@@ -0,0 +1,35 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing_extensions
+from ..core.serialization import FieldMetadata
+import pydantic
+import typing
+from .character_alignment_response_model import CharacterAlignmentResponseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AudioWithTimestampsResponseModel(UncheckedBaseModel):
+ audio_base_64: typing_extensions.Annotated[str, FieldMetadata(alias="audio_base64")] = pydantic.Field()
+ """
+ Base64 encoded audio data
+ """
+
+ alignment: typing.Optional[CharacterAlignmentResponseModel] = pydantic.Field(default=None)
+ """
+ Timestamp information for each character in the original text
+ """
+
+ normalized_alignment: typing.Optional[CharacterAlignmentResponseModel] = pydantic.Field(default=None)
+ """
+ Timestamp information for each character in the normalized text
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/character_alignment_response_model.py b/src/elevenlabs/types/character_alignment_response_model.py
new file mode 100644
index 00000000..e493b6c5
--- /dev/null
+++ b/src/elevenlabs/types/character_alignment_response_model.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class CharacterAlignmentResponseModel(UncheckedBaseModel):
+ characters: typing.List[str]
+ character_start_times_seconds: typing.List[float]
+ character_end_times_seconds: typing.List[float]
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conv_ai_stored_secret_dependencies.py b/src/elevenlabs/types/conv_ai_stored_secret_dependencies.py
new file mode 100644
index 00000000..500d6af1
--- /dev/null
+++ b/src/elevenlabs/types/conv_ai_stored_secret_dependencies.py
@@ -0,0 +1,24 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conv_ai_stored_secret_dependencies_tools_item import ConvAiStoredSecretDependenciesToolsItem
+from .conv_ai_stored_secret_dependencies_agents_item import ConvAiStoredSecretDependenciesAgentsItem
+from .secret_dependency_type import SecretDependencyType
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConvAiStoredSecretDependencies(UncheckedBaseModel):
+ tools: typing.List[ConvAiStoredSecretDependenciesToolsItem]
+ agents: typing.List[ConvAiStoredSecretDependenciesAgentsItem]
+ others: typing.List[SecretDependencyType]
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conv_ai_stored_secret_dependencies_agents_item.py b/src/elevenlabs/types/conv_ai_stored_secret_dependencies_agents_item.py
new file mode 100644
index 00000000..136a828b
--- /dev/null
+++ b/src/elevenlabs/types/conv_ai_stored_secret_dependencies_agents_item.py
@@ -0,0 +1,46 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .dependent_available_agent_identifier_access_level import DependentAvailableAgentIdentifierAccessLevel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+import typing_extensions
+from ..core.unchecked_base_model import UnionMetadata
+
+
+class ConvAiStoredSecretDependenciesAgentsItem_Available(UncheckedBaseModel):
+ type: typing.Literal["available"] = "available"
+ id: str
+ name: str
+ created_at_unix_secs: int
+ access_level: DependentAvailableAgentIdentifierAccessLevel
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class ConvAiStoredSecretDependenciesAgentsItem_Unknown(UncheckedBaseModel):
+ type: typing.Literal["unknown"] = "unknown"
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+ConvAiStoredSecretDependenciesAgentsItem = typing_extensions.Annotated[
+ typing.Union[ConvAiStoredSecretDependenciesAgentsItem_Available, ConvAiStoredSecretDependenciesAgentsItem_Unknown],
+ UnionMetadata(discriminant="type"),
+]
diff --git a/src/elevenlabs/types/conv_ai_stored_secret_dependencies_tools_item.py b/src/elevenlabs/types/conv_ai_stored_secret_dependencies_tools_item.py
new file mode 100644
index 00000000..6c802f72
--- /dev/null
+++ b/src/elevenlabs/types/conv_ai_stored_secret_dependencies_tools_item.py
@@ -0,0 +1,46 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .dependent_available_tool_identifier_access_level import DependentAvailableToolIdentifierAccessLevel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+import typing_extensions
+from ..core.unchecked_base_model import UnionMetadata
+
+
+class ConvAiStoredSecretDependenciesToolsItem_Available(UncheckedBaseModel):
+ type: typing.Literal["available"] = "available"
+ id: str
+ name: str
+ created_at_unix_secs: int
+ access_level: DependentAvailableToolIdentifierAccessLevel
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class ConvAiStoredSecretDependenciesToolsItem_Unknown(UncheckedBaseModel):
+ type: typing.Literal["unknown"] = "unknown"
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+ConvAiStoredSecretDependenciesToolsItem = typing_extensions.Annotated[
+ typing.Union[ConvAiStoredSecretDependenciesToolsItem_Available, ConvAiStoredSecretDependenciesToolsItem_Unknown],
+ UnionMetadata(discriminant="type"),
+]
diff --git a/src/elevenlabs/types/conv_ai_webhooks.py b/src/elevenlabs/types/conv_ai_webhooks.py
new file mode 100644
index 00000000..27839bfc
--- /dev/null
+++ b/src/elevenlabs/types/conv_ai_webhooks.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConvAiWebhooks(UncheckedBaseModel):
+ post_call_webhook_id: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conv_ai_workspace_stored_secret_config.py b/src/elevenlabs/types/conv_ai_workspace_stored_secret_config.py
new file mode 100644
index 00000000..04edf768
--- /dev/null
+++ b/src/elevenlabs/types/conv_ai_workspace_stored_secret_config.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conv_ai_stored_secret_dependencies import ConvAiStoredSecretDependencies
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConvAiWorkspaceStoredSecretConfig(UncheckedBaseModel):
+ type: typing.Literal["stored"] = "stored"
+ secret_id: str
+ name: str
+ used_by: ConvAiStoredSecretDependencies
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_initiation_client_data_config.py b/src/elevenlabs/types/conversation_initiation_client_data_config.py
index a92f1e28..97fd54b4 100644
--- a/src/elevenlabs/types/conversation_initiation_client_data_config.py
+++ b/src/elevenlabs/types/conversation_initiation_client_data_config.py
@@ -10,6 +10,7 @@
class ConversationInitiationClientDataConfig(UncheckedBaseModel):
conversation_config_override: typing.Optional[ConversationConfigClientOverrideConfig] = None
custom_llm_extra_body: typing.Optional[bool] = None
+ enable_conversation_initiation_client_data_from_webhook: typing.Optional[bool] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/conversation_initiation_client_data_webhook.py b/src/elevenlabs/types/conversation_initiation_client_data_webhook.py
new file mode 100644
index 00000000..a1967790
--- /dev/null
+++ b/src/elevenlabs/types/conversation_initiation_client_data_webhook.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conversation_initiation_client_data_webhook_request_headers_value import (
+ ConversationInitiationClientDataWebhookRequestHeadersValue,
+)
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ConversationInitiationClientDataWebhook(UncheckedBaseModel):
+ url: str
+ request_headers: typing.Dict[str, ConversationInitiationClientDataWebhookRequestHeadersValue]
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/conversation_initiation_client_data_webhook_request_headers_value.py b/src/elevenlabs/types/conversation_initiation_client_data_webhook_request_headers_value.py
new file mode 100644
index 00000000..aaa3a8ac
--- /dev/null
+++ b/src/elevenlabs/types/conversation_initiation_client_data_webhook_request_headers_value.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .conv_ai_secret_locator import ConvAiSecretLocator
+
+ConversationInitiationClientDataWebhookRequestHeadersValue = typing.Union[str, ConvAiSecretLocator]
diff --git a/src/elevenlabs/types/delete_sample_response_model.py b/src/elevenlabs/types/delete_sample_response_model.py
new file mode 100644
index 00000000..07e5b359
--- /dev/null
+++ b/src/elevenlabs/types/delete_sample_response_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+
+
+class DeleteSampleResponseModel(UncheckedBaseModel):
+ status: str = pydantic.Field()
+ """
+ The status of the sample deletion request. If the request was successful, the status will be 'ok'. Otherwise an error message with status 500 will be returned.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/dependent_available_tool_identifier.py b/src/elevenlabs/types/dependent_available_tool_identifier.py
new file mode 100644
index 00000000..5db19958
--- /dev/null
+++ b/src/elevenlabs/types/dependent_available_tool_identifier.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .dependent_available_tool_identifier_access_level import DependentAvailableToolIdentifierAccessLevel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class DependentAvailableToolIdentifier(UncheckedBaseModel):
+ id: str
+ name: str
+ created_at_unix_secs: int
+ access_level: DependentAvailableToolIdentifierAccessLevel
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/dependent_available_tool_identifier_access_level.py b/src/elevenlabs/types/dependent_available_tool_identifier_access_level.py
new file mode 100644
index 00000000..0b22e39f
--- /dev/null
+++ b/src/elevenlabs/types/dependent_available_tool_identifier_access_level.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+DependentAvailableToolIdentifierAccessLevel = typing.Union[typing.Literal["admin", "editor", "viewer"], typing.Any]
diff --git a/src/elevenlabs/types/dependent_unknown_tool_identifier.py b/src/elevenlabs/types/dependent_unknown_tool_identifier.py
new file mode 100644
index 00000000..0bb875c7
--- /dev/null
+++ b/src/elevenlabs/types/dependent_unknown_tool_identifier.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class DependentUnknownToolIdentifier(UncheckedBaseModel):
+ """
+ A model that represents an tool dependent on a knowledge base/tools
+ to which the user has no direct access.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/get_agent_response_model.py b/src/elevenlabs/types/get_agent_response_model.py
index b23ea801..7c1ab6a9 100644
--- a/src/elevenlabs/types/get_agent_response_model.py
+++ b/src/elevenlabs/types/get_agent_response_model.py
@@ -7,8 +7,9 @@
from .conversational_config import ConversationalConfig
from .agent_metadata_response_model import AgentMetadataResponseModel
import typing
-from .agent_platform_settings import AgentPlatformSettings
+from .agent_platform_settings_response_model import AgentPlatformSettingsResponseModel
from .conv_ai_stored_secret_config import ConvAiStoredSecretConfig
+from .get_phone_number_response_model import GetPhoneNumberResponseModel
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import pydantic
from ..core.pydantic_utilities import update_forward_refs
@@ -19,8 +20,9 @@ class GetAgentResponseModel(UncheckedBaseModel):
name: str
conversation_config: ConversationalConfig
metadata: AgentMetadataResponseModel
- platform_settings: typing.Optional[AgentPlatformSettings] = None
+ platform_settings: typing.Optional[AgentPlatformSettingsResponseModel] = None
secrets: typing.List[ConvAiStoredSecretConfig]
+ phone_numbers: typing.Optional[typing.List[GetPhoneNumberResponseModel]] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/get_audio_native_project_settings_response_model.py b/src/elevenlabs/types/get_audio_native_project_settings_response_model.py
new file mode 100644
index 00000000..57be4dbd
--- /dev/null
+++ b/src/elevenlabs/types/get_audio_native_project_settings_response_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .audio_native_project_settings_response_model import AudioNativeProjectSettingsResponseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class GetAudioNativeProjectSettingsResponseModel(UncheckedBaseModel):
+ enabled: bool
+ snapshot_id: str
+ settings: AudioNativeProjectSettingsResponseModel
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/get_convai_settings_response_model.py b/src/elevenlabs/types/get_convai_settings_response_model.py
new file mode 100644
index 00000000..8810c5b2
--- /dev/null
+++ b/src/elevenlabs/types/get_convai_settings_response_model.py
@@ -0,0 +1,24 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conversation_initiation_client_data_webhook import ConversationInitiationClientDataWebhook
+from .conv_ai_webhooks import ConvAiWebhooks
+from .conv_ai_workspace_stored_secret_config import ConvAiWorkspaceStoredSecretConfig
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class GetConvaiSettingsResponseModel(UncheckedBaseModel):
+ conversation_initiation_client_data_webhook: typing.Optional[ConversationInitiationClientDataWebhook] = None
+ webhooks: typing.Optional[ConvAiWebhooks] = None
+ secrets: typing.List[ConvAiWorkspaceStoredSecretConfig]
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/get_workspace_secrets_response_model.py b/src/elevenlabs/types/get_workspace_secrets_response_model.py
new file mode 100644
index 00000000..bf8d43ea
--- /dev/null
+++ b/src/elevenlabs/types/get_workspace_secrets_response_model.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .conv_ai_workspace_stored_secret_config import ConvAiWorkspaceStoredSecretConfig
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class GetWorkspaceSecretsResponseModel(UncheckedBaseModel):
+ secrets: typing.List[ConvAiWorkspaceStoredSecretConfig]
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/invoice.py b/src/elevenlabs/types/invoice.py
index c2e6675b..2a9c9a10 100644
--- a/src/elevenlabs/types/invoice.py
+++ b/src/elevenlabs/types/invoice.py
@@ -1,14 +1,21 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import typing
-import pydantic
class Invoice(UncheckedBaseModel):
- amount_due_cents: int
- next_payment_attempt_unix: int
+ amount_due_cents: int = pydantic.Field()
+ """
+ The amount due in cents.
+ """
+
+ next_payment_attempt_unix: int = pydantic.Field()
+ """
+ The Unix timestamp of the next payment attempt.
+ """
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/language_response.py b/src/elevenlabs/types/language_response.py
index deac6247..ae2648b9 100644
--- a/src/elevenlabs/types/language_response.py
+++ b/src/elevenlabs/types/language_response.py
@@ -1,14 +1,21 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import typing
-import pydantic
class LanguageResponse(UncheckedBaseModel):
- language_id: str
- name: str
+ language_id: str = pydantic.Field()
+ """
+ The unique identifier of the language.
+ """
+
+ name: str = pydantic.Field()
+ """
+ The name of the language.
+ """
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/library_voice_response.py b/src/elevenlabs/types/library_voice_response.py
index 84de7efb..1cbfed94 100644
--- a/src/elevenlabs/types/library_voice_response.py
+++ b/src/elevenlabs/types/library_voice_response.py
@@ -5,6 +5,7 @@
import typing_extensions
from ..core.serialization import FieldMetadata
import typing
+from .verified_voice_language_response_model import VerifiedVoiceLanguageResponseModel
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import pydantic
@@ -33,12 +34,14 @@ class LibraryVoiceResponse(UncheckedBaseModel):
free_users_allowed: bool
live_moderation_enabled: bool
featured: bool
+ verified_languages: typing.Optional[typing.List[VerifiedVoiceLanguageResponseModel]] = None
notice_period: typing.Optional[int] = None
instagram_username: typing.Optional[str] = None
twitter_username: typing.Optional[str] = None
youtube_username: typing.Optional[str] = None
tiktok_username: typing.Optional[str] = None
image_url: typing.Optional[str] = None
+ is_added_by_user: typing.Optional[bool] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/model.py b/src/elevenlabs/types/model.py
index 74fb3815..41fbb0df 100644
--- a/src/elevenlabs/types/model.py
+++ b/src/elevenlabs/types/model.py
@@ -1,32 +1,99 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
import typing
from .language_response import LanguageResponse
from .model_rates_response_model import ModelRatesResponseModel
from .model_response_model_concurrency_group import ModelResponseModelConcurrencyGroup
from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import pydantic
class Model(UncheckedBaseModel):
- model_id: str
- name: typing.Optional[str] = None
- can_be_finetuned: typing.Optional[bool] = None
- can_do_text_to_speech: typing.Optional[bool] = None
- can_do_voice_conversion: typing.Optional[bool] = None
- can_use_style: typing.Optional[bool] = None
- can_use_speaker_boost: typing.Optional[bool] = None
- serves_pro_voices: typing.Optional[bool] = None
- token_cost_factor: typing.Optional[float] = None
- description: typing.Optional[str] = None
- requires_alpha_access: typing.Optional[bool] = None
- max_characters_request_free_user: typing.Optional[int] = None
- max_characters_request_subscribed_user: typing.Optional[int] = None
- maximum_text_length_per_request: typing.Optional[int] = None
- languages: typing.Optional[typing.List[LanguageResponse]] = None
- model_rates: typing.Optional[ModelRatesResponseModel] = None
- concurrency_group: typing.Optional[ModelResponseModelConcurrencyGroup] = None
+ model_id: str = pydantic.Field()
+ """
+ The unique identifier of the model.
+ """
+
+ name: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the model.
+ """
+
+ can_be_finetuned: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the model can be finetuned.
+ """
+
+ can_do_text_to_speech: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the model can do text-to-speech.
+ """
+
+ can_do_voice_conversion: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the model can do voice conversion.
+ """
+
+ can_use_style: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the model can use style.
+ """
+
+ can_use_speaker_boost: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the model can use speaker boost.
+ """
+
+ serves_pro_voices: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the model serves pro voices.
+ """
+
+ token_cost_factor: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ The cost factor for the model.
+ """
+
+ description: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The description of the model.
+ """
+
+ requires_alpha_access: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the model requires alpha access.
+ """
+
+ max_characters_request_free_user: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of characters that can be requested by a free user.
+ """
+
+ max_characters_request_subscribed_user: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of characters that can be requested by a subscribed user.
+ """
+
+ maximum_text_length_per_request: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum length of text that can be requested for this model.
+ """
+
+ languages: typing.Optional[typing.List[LanguageResponse]] = pydantic.Field(default=None)
+ """
+ The languages supported by the model.
+ """
+
+ model_rates: typing.Optional[ModelRatesResponseModel] = pydantic.Field(default=None)
+ """
+ The rates for the model.
+ """
+
+ concurrency_group: typing.Optional[ModelResponseModelConcurrencyGroup] = pydantic.Field(default=None)
+ """
+ The concurrency group for the model.
+ """
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/model_rates_response_model.py b/src/elevenlabs/types/model_rates_response_model.py
index a695d91b..fe0403f0 100644
--- a/src/elevenlabs/types/model_rates_response_model.py
+++ b/src/elevenlabs/types/model_rates_response_model.py
@@ -1,13 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import typing
-import pydantic
class ModelRatesResponseModel(UncheckedBaseModel):
- character_cost_multiplier: float
+ character_cost_multiplier: float = pydantic.Field()
+ """
+ The cost multiplier for characters.
+ """
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/moderation_status_response_model.py b/src/elevenlabs/types/moderation_status_response_model.py
index 288b3910..a5820019 100644
--- a/src/elevenlabs/types/moderation_status_response_model.py
+++ b/src/elevenlabs/types/moderation_status_response_model.py
@@ -1,23 +1,58 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+import typing
from .moderation_status_response_model_safety_status import ModerationStatusResponseModelSafetyStatus
from .moderation_status_response_model_warning_status import ModerationStatusResponseModelWarningStatus
from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import typing
-import pydantic
class ModerationStatusResponseModel(UncheckedBaseModel):
- is_in_probation: bool
- enterprise_check_nogo_voice: bool
- enterprise_check_block_nogo_voice: bool
- never_live_moderate: bool
- nogo_voice_similar_voice_upload_count: int
- enterprise_background_moderation_enabled: bool
- safety_status: ModerationStatusResponseModelSafetyStatus
- warning_status: ModerationStatusResponseModelWarningStatus
- on_watchlist: bool
+ is_in_probation: bool = pydantic.Field()
+ """
+ Whether the user is in probation.
+ """
+
+ enterprise_check_nogo_voice: bool = pydantic.Field()
+ """
+ Whether the user's enterprise check nogo voice is enabled.
+ """
+
+ enterprise_check_block_nogo_voice: bool = pydantic.Field()
+ """
+ Whether the user's enterprise check block nogo voice is enabled.
+ """
+
+ never_live_moderate: bool = pydantic.Field()
+ """
+ Whether the user's never live moderate is enabled.
+ """
+
+ nogo_voice_similar_voice_upload_count: int = pydantic.Field()
+ """
+ The number of similar voice uploads that have been blocked.
+ """
+
+ enterprise_background_moderation_enabled: bool = pydantic.Field()
+ """
+ Whether the user's enterprise background moderation is enabled.
+ """
+
+ safety_status: typing.Optional[ModerationStatusResponseModelSafetyStatus] = pydantic.Field(default=None)
+ """
+ The safety status of the user.
+ """
+
+ warning_status: typing.Optional[ModerationStatusResponseModelWarningStatus] = pydantic.Field(default=None)
+ """
+ The warning status of the user.
+ """
+
+ on_watchlist: bool = pydantic.Field()
+ """
+ Whether the user is on the watchlist.
+ """
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/paginated_listed_review_task_instance_model.py b/src/elevenlabs/types/paginated_listed_review_task_instance_model.py
new file mode 100644
index 00000000..0586fbb1
--- /dev/null
+++ b/src/elevenlabs/types/paginated_listed_review_task_instance_model.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .review_task_instance_response_model import ReviewTaskInstanceResponseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class PaginatedListedReviewTaskInstanceModel(UncheckedBaseModel):
+ review_tasks: typing.List[ReviewTaskInstanceResponseModel]
+ cursor: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/post_workspace_secret_response_model.py b/src/elevenlabs/types/post_workspace_secret_response_model.py
new file mode 100644
index 00000000..abae6225
--- /dev/null
+++ b/src/elevenlabs/types/post_workspace_secret_response_model.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class PostWorkspaceSecretResponseModel(UncheckedBaseModel):
+ type: typing.Literal["stored"] = "stored"
+ secret_id: str
+ name: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/quote_request_model.py b/src/elevenlabs/types/quote_request_model.py
new file mode 100644
index 00000000..43dff3b0
--- /dev/null
+++ b/src/elevenlabs/types/quote_request_model.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class QuoteRequestModel(UncheckedBaseModel):
+ content_hash: str
+ duration_s: float
+ speaker_count: int
+ language: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/quote_response_model.py b/src/elevenlabs/types/quote_response_model.py
new file mode 100644
index 00000000..c0f749b3
--- /dev/null
+++ b/src/elevenlabs/types/quote_response_model.py
@@ -0,0 +1,24 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import datetime as dt
+from .quote_request_model import QuoteRequestModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class QuoteResponseModel(UncheckedBaseModel):
+ quote_usd: float
+ valid_until: dt.datetime
+ request: QuoteRequestModel
+ quote_token: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/resource_access_info.py b/src/elevenlabs/types/resource_access_info.py
new file mode 100644
index 00000000..1bf2f6db
--- /dev/null
+++ b/src/elevenlabs/types/resource_access_info.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .resource_access_info_role import ResourceAccessInfoRole
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class ResourceAccessInfo(UncheckedBaseModel):
+ is_creator: bool
+ creator_name: str
+ creator_email: str
+ role: ResourceAccessInfoRole
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/resource_access_info_role.py b/src/elevenlabs/types/resource_access_info_role.py
new file mode 100644
index 00000000..1281fe4a
--- /dev/null
+++ b/src/elevenlabs/types/resource_access_info_role.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ResourceAccessInfoRole = typing.Union[typing.Literal["admin", "editor", "viewer"], typing.Any]
diff --git a/src/elevenlabs/types/review_state.py b/src/elevenlabs/types/review_state.py
new file mode 100644
index 00000000..721cb591
--- /dev/null
+++ b/src/elevenlabs/types/review_state.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ReviewState = typing.Union[
+ typing.Literal["unclaimed", "claimed", "submitted", "done", "rejected", "in_progress"], typing.Any
+]
diff --git a/src/elevenlabs/types/review_task_instance_response_model.py b/src/elevenlabs/types/review_task_instance_response_model.py
new file mode 100644
index 00000000..0bea3547
--- /dev/null
+++ b/src/elevenlabs/types/review_task_instance_response_model.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .review_state import ReviewState
+import typing
+from .task_instance_event_response_model import TaskInstanceEventResponseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class ReviewTaskInstanceResponseModel(UncheckedBaseModel):
+ id: str
+ owner_id: str
+ description_id: str
+ state: ReviewState
+ events: typing.List[TaskInstanceEventResponseModel]
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/safety.py b/src/elevenlabs/types/safety_common_model.py
similarity index 94%
rename from src/elevenlabs/types/safety.py
rename to src/elevenlabs/types/safety_common_model.py
index d5980e39..3cb0287b 100644
--- a/src/elevenlabs/types/safety.py
+++ b/src/elevenlabs/types/safety_common_model.py
@@ -7,7 +7,7 @@
import pydantic
-class Safety(UncheckedBaseModel):
+class SafetyCommonModel(UncheckedBaseModel):
"""
Safety object that has the information of safety evaluations based on used voice.
"""
diff --git a/src/elevenlabs/types/safety_response_model.py b/src/elevenlabs/types/safety_response_model.py
new file mode 100644
index 00000000..ca09058e
--- /dev/null
+++ b/src/elevenlabs/types/safety_response_model.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class SafetyResponseModel(UncheckedBaseModel):
+ is_blocked_ivc: typing.Optional[bool] = None
+ is_blocked_non_ivc: typing.Optional[bool] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/secret_dependency_type.py b/src/elevenlabs/types/secret_dependency_type.py
new file mode 100644
index 00000000..1a2f9f49
--- /dev/null
+++ b/src/elevenlabs/types/secret_dependency_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SecretDependencyType = typing.Literal["conversation_initiation_webhook"]
diff --git a/src/elevenlabs/types/speech_to_text_character_response_model.py b/src/elevenlabs/types/speech_to_text_character_response_model.py
new file mode 100644
index 00000000..c3caec22
--- /dev/null
+++ b/src/elevenlabs/types/speech_to_text_character_response_model.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+
+
+class SpeechToTextCharacterResponseModel(UncheckedBaseModel):
+ text: str = pydantic.Field()
+ """
+ The character that was transcribed.
+ """
+
+ start: float = pydantic.Field()
+ """
+ The start time of the character in seconds.
+ """
+
+ end: float = pydantic.Field()
+ """
+ The end time of the character in seconds.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/speech_to_text_stream_response_model.py b/src/elevenlabs/types/speech_to_text_stream_response_model.py
deleted file mode 100644
index ba69b8b1..00000000
--- a/src/elevenlabs/types/speech_to_text_stream_response_model.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
-import typing
-from .speech_to_text_word_response_model import SpeechToTextWordResponseModel
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class SpeechToTextStreamResponseModel(UncheckedBaseModel):
- """
-
- The streaming endpoint returns a sequence of these chunks, each separated by double newlines.
- Example stream (formatted for readability):
-
- {"language_code": "en", "language_probability": 0.0, "text": "This is an example", "words": [...]}\n\n
- {"language_code": "en", "language_probability": 0.0, "text": "of a streamed transcription", "words": [...]}\n\n
- {"language_code": "en", "language_probability": 1.0, "text": "with each chunk separated by double newlines.", "words": [...]}
-
- """
-
- language_code: str = pydantic.Field()
- """
- The detected language code (e.g. 'eng' for English).
- """
-
- language_probability: float = pydantic.Field()
- """
- The confidence score of the language detection (0 to 1).
- """
-
- text: str = pydantic.Field()
- """
- The raw text of the transcription.
- """
-
- words: typing.List[SpeechToTextWordResponseModel] = pydantic.Field()
- """
- List of words with their timing information.
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/speech_to_text_word_response_model.py b/src/elevenlabs/types/speech_to_text_word_response_model.py
index 0aff35f2..5de12fcc 100644
--- a/src/elevenlabs/types/speech_to_text_word_response_model.py
+++ b/src/elevenlabs/types/speech_to_text_word_response_model.py
@@ -4,6 +4,7 @@
import pydantic
import typing
from .speech_to_text_word_response_model_type import SpeechToTextWordResponseModelType
+from .speech_to_text_character_response_model import SpeechToTextCharacterResponseModel
from ..core.pydantic_utilities import IS_PYDANTIC_V2
@@ -37,6 +38,11 @@ class SpeechToTextWordResponseModel(UncheckedBaseModel):
Unique identifier for the speaker of this word.
"""
+ characters: typing.Optional[typing.List[SpeechToTextCharacterResponseModel]] = pydantic.Field(default=None)
+ """
+ The characters that make up the word and their timing information.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
diff --git a/src/elevenlabs/types/streaming_audio_chunk_with_timestamps_response_model.py b/src/elevenlabs/types/streaming_audio_chunk_with_timestamps_response_model.py
new file mode 100644
index 00000000..3c285ebe
--- /dev/null
+++ b/src/elevenlabs/types/streaming_audio_chunk_with_timestamps_response_model.py
@@ -0,0 +1,35 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing_extensions
+from ..core.serialization import FieldMetadata
+import pydantic
+import typing
+from .character_alignment_response_model import CharacterAlignmentResponseModel
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class StreamingAudioChunkWithTimestampsResponseModel(UncheckedBaseModel):
+ audio_base_64: typing_extensions.Annotated[str, FieldMetadata(alias="audio_base64")] = pydantic.Field()
+ """
+ Base64 encoded audio data
+ """
+
+ alignment: typing.Optional[CharacterAlignmentResponseModel] = pydantic.Field(default=None)
+ """
+ Timestamp information for each character in the original text
+ """
+
+ normalized_alignment: typing.Optional[CharacterAlignmentResponseModel] = pydantic.Field(default=None)
+ """
+ Timestamp information for each character in the normalized text
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/subscription.py b/src/elevenlabs/types/subscription.py
index 40ed3f0f..275bef66 100644
--- a/src/elevenlabs/types/subscription.py
+++ b/src/elevenlabs/types/subscription.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
import typing
from .extended_subscription_response_model_currency import ExtendedSubscriptionResponseModelCurrency
from .subscription_status import SubscriptionStatus
@@ -10,30 +11,110 @@
)
from .invoice import Invoice
from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import pydantic
class Subscription(UncheckedBaseModel):
- tier: str
- character_count: int
- character_limit: int
- can_extend_character_limit: bool
- allowed_to_extend_character_limit: bool
- next_character_count_reset_unix: int
- voice_slots_used: typing.Optional[int] = None
- voice_limit: int
- max_voice_add_edits: typing.Optional[int] = None
- voice_add_edit_counter: typing.Optional[int] = None
- professional_voice_limit: int
- can_extend_voice_limit: bool
- can_use_instant_voice_cloning: bool
- can_use_professional_voice_cloning: bool
- currency: typing.Optional[ExtendedSubscriptionResponseModelCurrency] = None
- status: typing.Optional[SubscriptionStatus] = None
- billing_period: typing.Optional[ExtendedSubscriptionResponseModelBillingPeriod] = None
- character_refresh_period: typing.Optional[ExtendedSubscriptionResponseModelCharacterRefreshPeriod] = None
- next_invoice: typing.Optional[Invoice] = None
- has_open_invoices: typing.Optional[bool] = None
+ tier: str = pydantic.Field()
+ """
+ The tier of the user's subscription.
+ """
+
+ character_count: int = pydantic.Field()
+ """
+ The number of characters used by the user.
+ """
+
+ character_limit: int = pydantic.Field()
+ """
+ The maximum number of characters allowed in the current billing period.
+ """
+
+ can_extend_character_limit: bool = pydantic.Field()
+ """
+ Whether the user can extend their character limit.
+ """
+
+ allowed_to_extend_character_limit: bool = pydantic.Field()
+ """
+ Whether the user is allowed to extend their character limit.
+ """
+
+ next_character_count_reset_unix: int = pydantic.Field()
+ """
+ The Unix timestamp of the next character count reset.
+ """
+
+ voice_slots_used: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The number of voice slots used by the user.
+ """
+
+ voice_limit: int = pydantic.Field()
+ """
+ The maximum number of voice slots allowed for the user.
+ """
+
+ max_voice_add_edits: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of voice add/edits allowed for the user.
+ """
+
+ voice_add_edit_counter: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The number of voice add/edits used by the user.
+ """
+
+ professional_voice_limit: int = pydantic.Field()
+ """
+ The maximum number of professional voices allowed for the user.
+ """
+
+ can_extend_voice_limit: bool = pydantic.Field()
+ """
+ Whether the user can extend their voice limit.
+ """
+
+ can_use_instant_voice_cloning: bool = pydantic.Field()
+ """
+ Whether the user can use instant voice cloning.
+ """
+
+ can_use_professional_voice_cloning: bool = pydantic.Field()
+ """
+ Whether the user can use professional voice cloning.
+ """
+
+ currency: typing.Optional[ExtendedSubscriptionResponseModelCurrency] = pydantic.Field(default=None)
+ """
+ The currency of the user's subscription.
+ """
+
+ status: typing.Optional[SubscriptionStatus] = pydantic.Field(default=None)
+ """
+ The status of the user's subscription.
+ """
+
+ billing_period: typing.Optional[ExtendedSubscriptionResponseModelBillingPeriod] = pydantic.Field(default=None)
+ """
+ The billing period of the user's subscription.
+ """
+
+ character_refresh_period: typing.Optional[ExtendedSubscriptionResponseModelCharacterRefreshPeriod] = pydantic.Field(
+ default=None
+ )
+ """
+ The character refresh period of the user's subscription.
+ """
+
+ next_invoice: typing.Optional[Invoice] = pydantic.Field(default=None)
+ """
+ The next invoice for the user.
+ """
+
+ has_open_invoices: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the user has open invoices.
+ """
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/subscription_response.py b/src/elevenlabs/types/subscription_response.py
index 20a8f7e4..1d2b059f 100644
--- a/src/elevenlabs/types/subscription_response.py
+++ b/src/elevenlabs/types/subscription_response.py
@@ -1,34 +1,107 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+import typing
from .subscription_response_model_currency import SubscriptionResponseModelCurrency
from .subscription_status import SubscriptionStatus
from .subscription_response_model_billing_period import SubscriptionResponseModelBillingPeriod
from .subscription_response_model_character_refresh_period import SubscriptionResponseModelCharacterRefreshPeriod
from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import typing
-import pydantic
class SubscriptionResponse(UncheckedBaseModel):
- tier: str
- character_count: int
- character_limit: int
- can_extend_character_limit: bool
- allowed_to_extend_character_limit: bool
- next_character_count_reset_unix: int
- voice_slots_used: int
- voice_limit: int
- max_voice_add_edits: int
- voice_add_edit_counter: int
- professional_voice_limit: int
- can_extend_voice_limit: bool
- can_use_instant_voice_cloning: bool
- can_use_professional_voice_cloning: bool
- currency: SubscriptionResponseModelCurrency
- status: SubscriptionStatus
- billing_period: SubscriptionResponseModelBillingPeriod
- character_refresh_period: SubscriptionResponseModelCharacterRefreshPeriod
+ tier: str = pydantic.Field()
+ """
+ The tier of the user's subscription.
+ """
+
+ character_count: int = pydantic.Field()
+ """
+ The number of characters used by the user.
+ """
+
+ character_limit: int = pydantic.Field()
+ """
+ The maximum number of characters allowed in the current billing period.
+ """
+
+ can_extend_character_limit: bool = pydantic.Field()
+ """
+ Whether the user can extend their character limit.
+ """
+
+ allowed_to_extend_character_limit: bool = pydantic.Field()
+ """
+ Whether the user is allowed to extend their character limit.
+ """
+
+ next_character_count_reset_unix: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The Unix timestamp of the next character count reset.
+ """
+
+ voice_slots_used: int = pydantic.Field()
+ """
+ The number of voice slots used by the user.
+ """
+
+ voice_limit: int = pydantic.Field()
+ """
+ The maximum number of voice slots allowed for the user.
+ """
+
+ max_voice_add_edits: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of voice add/edits allowed for the user.
+ """
+
+ voice_add_edit_counter: int = pydantic.Field()
+ """
+ The number of voice add/edits used by the user.
+ """
+
+ professional_voice_limit: int = pydantic.Field()
+ """
+ The maximum number of professional voices allowed for the user.
+ """
+
+ can_extend_voice_limit: bool = pydantic.Field()
+ """
+ Whether the user can extend their voice limit.
+ """
+
+ can_use_instant_voice_cloning: bool = pydantic.Field()
+ """
+ Whether the user can use instant voice cloning.
+ """
+
+ can_use_professional_voice_cloning: bool = pydantic.Field()
+ """
+ Whether the user can use professional voice cloning.
+ """
+
+ currency: typing.Optional[SubscriptionResponseModelCurrency] = pydantic.Field(default=None)
+ """
+ The currency of the user's subscription.
+ """
+
+ status: SubscriptionStatus = pydantic.Field()
+ """
+ The status of the user's subscription.
+ """
+
+ billing_period: typing.Optional[SubscriptionResponseModelBillingPeriod] = pydantic.Field(default=None)
+ """
+ The billing period of the user's subscription.
+ """
+
+ character_refresh_period: typing.Optional[SubscriptionResponseModelCharacterRefreshPeriod] = pydantic.Field(
+ default=None
+ )
+ """
+ The character refresh period of the user's subscription.
+ """
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/subscription_usage_response_model.py b/src/elevenlabs/types/subscription_usage_response_model.py
new file mode 100644
index 00000000..0ebf0aa4
--- /dev/null
+++ b/src/elevenlabs/types/subscription_usage_response_model.py
@@ -0,0 +1,57 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+
+
+class SubscriptionUsageResponseModel(UncheckedBaseModel):
+ rollover_credits_quota: int = pydantic.Field()
+ """
+ The rollover credits quota.
+ """
+
+ subscription_cycle_credits_quota: int = pydantic.Field()
+ """
+ The subscription cycle credits quota.
+ """
+
+ manually_gifted_credits_quota: int = pydantic.Field()
+ """
+ The manually gifted credits quota.
+ """
+
+ rollover_credits_used: int = pydantic.Field()
+ """
+ The rollover credits used.
+ """
+
+ subscription_cycle_credits_used: int = pydantic.Field()
+ """
+ The subscription cycle credits used.
+ """
+
+ manually_gifted_credits_used: int = pydantic.Field()
+ """
+ The manually gifted credits used.
+ """
+
+ paid_usage_based_credits_used: int = pydantic.Field()
+ """
+ The paid usage based credits used.
+ """
+
+ actual_reported_credits: int = pydantic.Field()
+ """
+ The actual reported credits.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/tag_kind.py b/src/elevenlabs/types/tag_kind.py
new file mode 100644
index 00000000..f5c47a0a
--- /dev/null
+++ b/src/elevenlabs/types/tag_kind.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+TagKind = typing.Union[typing.Literal["lang", "job_type"], typing.Any]
diff --git a/src/elevenlabs/types/tag_model.py b/src/elevenlabs/types/tag_model.py
new file mode 100644
index 00000000..283fd3f4
--- /dev/null
+++ b/src/elevenlabs/types/tag_model.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .tag_kind import TagKind
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+import pydantic
+
+
+class TagModel(UncheckedBaseModel):
+ kind: TagKind
+ value: str
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/task_instance_event_kind.py b/src/elevenlabs/types/task_instance_event_kind.py
new file mode 100644
index 00000000..17c4a8a6
--- /dev/null
+++ b/src/elevenlabs/types/task_instance_event_kind.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+TaskInstanceEventKind = typing.Union[
+ typing.Literal["claim", "assign", "reject", "submit", "approve", "done"], typing.Any
+]
diff --git a/src/elevenlabs/types/task_instance_event_response_model.py b/src/elevenlabs/types/task_instance_event_response_model.py
new file mode 100644
index 00000000..c000f54f
--- /dev/null
+++ b/src/elevenlabs/types/task_instance_event_response_model.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import datetime as dt
+from .task_instance_event_kind import TaskInstanceEventKind
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class TaskInstanceEventResponseModel(UncheckedBaseModel):
+ timestamp: dt.datetime
+ kind: TaskInstanceEventKind
+ meta: typing.Optional[typing.Dict[str, str]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/user.py b/src/elevenlabs/types/user.py
index 84d8f54f..18c03a4d 100644
--- a/src/elevenlabs/types/user.py
+++ b/src/elevenlabs/types/user.py
@@ -1,26 +1,73 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
from .subscription_response import SubscriptionResponse
import typing
from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import pydantic
class User(UncheckedBaseModel):
- user_id: str
- subscription: SubscriptionResponse
- subscription_extras: typing.Optional[typing.Any] = None
- is_new_user: bool
- xi_api_key: str
- can_use_delayed_payment_methods: bool
- is_onboarding_completed: bool
- is_onboarding_checklist_completed: bool
- first_name: typing.Optional[str] = None
- is_api_key_hashed: typing.Optional[bool] = None
- xi_api_key_preview: typing.Optional[str] = None
- referral_link_code: typing.Optional[str] = None
- partnerstack_partner_default_link: typing.Optional[str] = None
+ user_id: str = pydantic.Field()
+ """
+ The unique identifier of the user.
+ """
+
+ subscription: SubscriptionResponse = pydantic.Field()
+ """
+ Details of the user's subscription.
+ """
+
+ subscription_extras: typing.Optional[typing.Optional[typing.Any]] = None
+ is_new_user: bool = pydantic.Field()
+ """
+ Whether the user is new.
+ """
+
+ xi_api_key: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The API key of the user.
+ """
+
+ can_use_delayed_payment_methods: bool = pydantic.Field()
+ """
+ Whether the user can use delayed payment methods.
+ """
+
+ is_onboarding_completed: bool = pydantic.Field()
+ """
+ Whether the user's onboarding is completed.
+ """
+
+ is_onboarding_checklist_completed: bool = pydantic.Field()
+ """
+ Whether the user's onboarding checklist is completed.
+ """
+
+ first_name: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ First name of the user.
+ """
+
+ is_api_key_hashed: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the user's API key is hashed.
+ """
+
+ xi_api_key_preview: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The preview of the user's API key.
+ """
+
+ referral_link_code: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The referral link code of the user.
+ """
+
+ partnerstack_partner_default_link: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The Partnerstack partner default link of the user.
+ """
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/verified_voice_language_response_model.py b/src/elevenlabs/types/verified_voice_language_response_model.py
new file mode 100644
index 00000000..97151b4f
--- /dev/null
+++ b/src/elevenlabs/types/verified_voice_language_response_model.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class VerifiedVoiceLanguageResponseModel(UncheckedBaseModel):
+ language: str
+ model_id: str
+ accent: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/elevenlabs/types/voice.py b/src/elevenlabs/types/voice.py
index 08ee9932..2c068214 100644
--- a/src/elevenlabs/types/voice.py
+++ b/src/elevenlabs/types/voice.py
@@ -7,6 +7,7 @@
from .fine_tuning_response import FineTuningResponse
from .voice_settings import VoiceSettings
from .voice_sharing_response import VoiceSharingResponse
+from .verified_voice_language_response_model import VerifiedVoiceLanguageResponseModel
from .voice_response_model_safety_control import VoiceResponseModelSafetyControl
from .voice_verification_response import VoiceVerificationResponse
from ..core.pydantic_utilities import IS_PYDANTIC_V2
@@ -26,6 +27,7 @@ class Voice(UncheckedBaseModel):
settings: typing.Optional[VoiceSettings] = None
sharing: typing.Optional[VoiceSharingResponse] = None
high_quality_base_model_ids: typing.Optional[typing.List[str]] = None
+ verified_languages: typing.Optional[typing.List[VerifiedVoiceLanguageResponseModel]] = None
safety_control: typing.Optional[VoiceResponseModelSafetyControl] = None
voice_verification: typing.Optional[VoiceVerificationResponse] = None
permission_on_resource: typing.Optional[str] = None
diff --git a/src/elevenlabs/types/voice_settings.py b/src/elevenlabs/types/voice_settings.py
index 6b6ffd18..d44fc78d 100644
--- a/src/elevenlabs/types/voice_settings.py
+++ b/src/elevenlabs/types/voice_settings.py
@@ -11,6 +11,7 @@ class VoiceSettings(UncheckedBaseModel):
similarity_boost: typing.Optional[float] = None
style: typing.Optional[float] = None
use_speaker_boost: typing.Optional[bool] = None
+ speed: typing.Optional[float] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/elevenlabs/types/widget_config.py b/src/elevenlabs/types/widget_config.py
index c5bcc0d3..85a52738 100644
--- a/src/elevenlabs/types/widget_config.py
+++ b/src/elevenlabs/types/widget_config.py
@@ -3,14 +3,16 @@
from ..core.unchecked_base_model import UncheckedBaseModel
import typing
from .embed_variant import EmbedVariant
+from .widget_expandable import WidgetExpandable
from .widget_config_avatar import WidgetConfigAvatar
from .widget_feedback_mode import WidgetFeedbackMode
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
class WidgetConfig(UncheckedBaseModel):
variant: typing.Optional[EmbedVariant] = None
+ expandable: typing.Optional[WidgetExpandable] = None
avatar: typing.Optional[WidgetConfigAvatar] = None
feedback_mode: typing.Optional[WidgetFeedbackMode] = None
bg_color: typing.Optional[str] = None
@@ -31,6 +33,12 @@ class WidgetConfig(UncheckedBaseModel):
terms_text: typing.Optional[str] = None
terms_html: typing.Optional[str] = None
terms_key: typing.Optional[str] = None
+ show_avatar_when_collapsed: typing.Optional[bool] = None
+ disable_banner: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether to disable the ConvAI widget banner
+ """
+
language_selector: typing.Optional[bool] = None
custom_avatar_path: typing.Optional[str] = None
diff --git a/src/elevenlabs/types/widget_config_response_model.py b/src/elevenlabs/types/widget_config_response_model.py
index fd30d596..a12838ff 100644
--- a/src/elevenlabs/types/widget_config_response_model.py
+++ b/src/elevenlabs/types/widget_config_response_model.py
@@ -3,14 +3,16 @@
from ..core.unchecked_base_model import UncheckedBaseModel
import typing
from .embed_variant import EmbedVariant
+from .widget_expandable import WidgetExpandable
from .widget_config_response_model_avatar import WidgetConfigResponseModelAvatar
from .widget_feedback_mode import WidgetFeedbackMode
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
class WidgetConfigResponseModel(UncheckedBaseModel):
variant: typing.Optional[EmbedVariant] = None
+ expandable: typing.Optional[WidgetExpandable] = None
avatar: typing.Optional[WidgetConfigResponseModelAvatar] = None
feedback_mode: typing.Optional[WidgetFeedbackMode] = None
bg_color: typing.Optional[str] = None
@@ -31,6 +33,12 @@ class WidgetConfigResponseModel(UncheckedBaseModel):
terms_text: typing.Optional[str] = None
terms_html: typing.Optional[str] = None
terms_key: typing.Optional[str] = None
+ show_avatar_when_collapsed: typing.Optional[bool] = None
+ disable_banner: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether to disable the ConvAI widget banner
+ """
+
language: str
supported_language_overrides: typing.Optional[typing.List[str]] = None
diff --git a/src/elevenlabs/types/widget_expandable.py b/src/elevenlabs/types/widget_expandable.py
new file mode 100644
index 00000000..028ac36a
--- /dev/null
+++ b/src/elevenlabs/types/widget_expandable.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+WidgetExpandable = typing.Union[typing.Literal["never", "mobile", "desktop", "always"], typing.Any]
diff --git a/src/elevenlabs/voices/client.py b/src/elevenlabs/voices/client.py
index ba99d3bb..9467096f 100644
--- a/src/elevenlabs/voices/client.py
+++ b/src/elevenlabs/voices/client.py
@@ -656,6 +656,7 @@ def get_shared(
use_cases: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
descriptives: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
featured: typing.Optional[bool] = None,
+ min_notice_period_days: typing.Optional[int] = None,
reader_app_enabled: typing.Optional[bool] = None,
owner_id: typing.Optional[str] = None,
sort: typing.Optional[str] = None,
@@ -697,6 +698,9 @@ def get_shared(
featured : typing.Optional[bool]
Filter featured voices
+ min_notice_period_days : typing.Optional[int]
+ Filter voices with a minimum notice period of the given number of days.
+
reader_app_enabled : typing.Optional[bool]
Filter voices that are enabled for the reader app
@@ -743,6 +747,7 @@ def get_shared(
"use_cases": use_cases,
"descriptives": descriptives,
"featured": featured,
+ "min_notice_period_days": min_notice_period_days,
"reader_app_enabled": reader_app_enabled,
"owner_id": owner_id,
"sort": sort,
@@ -1616,6 +1621,7 @@ async def get_shared(
use_cases: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
descriptives: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
featured: typing.Optional[bool] = None,
+ min_notice_period_days: typing.Optional[int] = None,
reader_app_enabled: typing.Optional[bool] = None,
owner_id: typing.Optional[str] = None,
sort: typing.Optional[str] = None,
@@ -1657,6 +1663,9 @@ async def get_shared(
featured : typing.Optional[bool]
Filter featured voices
+ min_notice_period_days : typing.Optional[int]
+ Filter voices with a minimum notice period of the given number of days.
+
reader_app_enabled : typing.Optional[bool]
Filter voices that are enabled for the reader app
@@ -1711,6 +1720,7 @@ async def main() -> None:
"use_cases": use_cases,
"descriptives": descriptives,
"featured": featured,
+ "min_notice_period_days": min_notice_period_days,
"reader_app_enabled": reader_app_enabled,
"owner_id": owner_id,
"sort": sort,
diff --git a/src/elevenlabs/workspace/client.py b/src/elevenlabs/workspace/client.py
index a72b3b11..a39bc845 100644
--- a/src/elevenlabs/workspace/client.py
+++ b/src/elevenlabs/workspace/client.py
@@ -259,7 +259,7 @@ def invite_user(
api_key="YOUR_API_KEY",
)
client.workspace.invite_user(
- email="email",
+ email="john.doe@testmail.com",
)
"""
_response = self._client_wrapper.httpx_client.request(
@@ -400,7 +400,7 @@ def delete_existing_invitation(
api_key="YOUR_API_KEY",
)
client.workspace.delete_existing_invitation(
- email="email",
+ email="john.doe@testmail.com",
)
"""
_response = self._client_wrapper.httpx_client.request(
@@ -788,7 +788,7 @@ async def invite_user(
async def main() -> None:
await client.workspace.invite_user(
- email="email",
+ email="john.doe@testmail.com",
)
@@ -945,7 +945,7 @@ async def delete_existing_invitation(
async def main() -> None:
await client.workspace.delete_existing_invitation(
- email="email",
+ email="john.doe@testmail.com",
)
diff --git a/tests/test_tts.py b/tests/test_tts.py
index 0baf8312..42828d90 100644
--- a/tests/test_tts.py
+++ b/tests/test_tts.py
@@ -96,11 +96,17 @@ def test_tts_convert_with_timestamps() -> None:
text=DEFAULT_TEXT, voice_id=DEFAULT_VOICE, model_id=DEFAULT_MODEL
)
- assert "alignment" in result # type: ignore
- assert "characters" in result["alignment"] # type: ignore
+ # Check that the alignment data exists and has the expected structure
+ assert hasattr(result, 'alignment')
+ assert hasattr(result, 'audio_base_64')
+
+ # Verify alignment contains timing data
+ assert result.alignment is not None
+ assert len(result.alignment.character_start_times_seconds) > 0
+ assert len(result.alignment.character_end_times_seconds) > 0
if not IN_GITHUB:
- audio_bytes = base64.b64decode(result["audio_base64"]) # type: ignore
+ audio_bytes = base64.b64decode(result.audio_base_64)
play(audio_bytes)