Skip to content

Commit 041792c

Browse files
authored
chore: sync sdk code with DeepLearning repo (#116)
1 parent 1aeb966 commit 041792c

File tree

3 files changed

+58
-5
lines changed

3 files changed

+58
-5
lines changed

assemblyai/__version__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = "0.39.1"
1+
__version__ = "0.40.0"

assemblyai/types.py

Lines changed: 27 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,13 +19,13 @@
1919

2020
try:
2121
# pydantic v2 import
22-
from pydantic import UUID4, BaseModel, ConfigDict, Field
22+
from pydantic import UUID4, BaseModel, ConfigDict, Field, field_validator
2323
from pydantic_settings import BaseSettings, SettingsConfigDict
2424

2525
pydantic_v2 = True
2626
except ImportError:
2727
# pydantic v1 import
28-
from pydantic.v1 import UUID4, BaseModel, BaseSettings, ConfigDict, Field
28+
from pydantic.v1 import UUID4, BaseModel, BaseSettings, ConfigDict, Field, validator
2929

3030
pydantic_v2 = False
3131

@@ -1468,6 +1468,19 @@ class Word(BaseModel):
14681468
speaker: Optional[str] = None
14691469
channel: Optional[str] = None
14701470

1471+
# This is a workaround to address an issue where sentiment_analysis_results
1472+
# may return contains sentiments where `start` is null.
1473+
if pydantic_v2:
1474+
1475+
@field_validator("start", mode="before")
1476+
def set_start_default(cls, v):
1477+
return 0 if v is None else v
1478+
else:
1479+
1480+
@validator("start", pre=True)
1481+
def set_start_default(cls, v):
1482+
return 0 if v is None else v
1483+
14711484

14721485
class UtteranceWord(Word):
14731486
channel: Optional[str] = None
@@ -2031,19 +2044,29 @@ class LemurModel(str, Enum):
20312044
LeMUR features different model modes that allow you to configure your request to suit your needs.
20322045
"""
20332046

2047+
claude3_7_sonnet_20250219 = "anthropic/claude-3-7-sonnet"
2048+
"""
2049+
Claude 3.7 Sonnet is the most intelligent model to date, providing the highest level of intelligence and capability with toggleable extended thinking.
2050+
"""
2051+
20342052
claude3_5_sonnet = "anthropic/claude-3-5-sonnet"
20352053
"""
2036-
Claude 3.5 Sonnet is the most intelligent model to date, outperforming Claude 3 Opus on a wide range of evaluations, with the speed and cost of Claude 3 Sonnet.
2054+
Claude 3.5 Sonnet is the previous most intelligent model to date, providing high level of intelligence and capability.
20372055
"""
20382056

20392057
claude3_opus = "anthropic/claude-3-opus"
20402058
"""
20412059
Claude 3 Opus is good at handling complex analysis, longer tasks with many steps, and higher-order math and coding tasks.
20422060
"""
20432061

2062+
claude3_5_haiku_20241022 = "anthropic/claude-3-5-haiku"
2063+
"""
2064+
Claude 3.5 Haiku is the fastest model, providing intelligence at blazing speeds.
2065+
"""
2066+
20442067
claude3_haiku = "anthropic/claude-3-haiku"
20452068
"""
2046-
Claude 3 Haiku is the fastest model that can execute lightweight actions.
2069+
Claude 3 Haiku is the fastest and most compact model for near-instant responsiveness.
20472070
"""
20482071

20492072
claude3_sonnet = "anthropic/claude-3-sonnet"

tests/unit/test_sentiment_analysis.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,3 +76,33 @@ def test_sentiment_analysis_enabled(httpx_mock: HTTPXMock):
7676
assert (
7777
transcript_sentiment_result.speaker == response_sentiment_result["speaker"]
7878
)
79+
80+
81+
def test_sentiment_analysis_null_start(httpx_mock: HTTPXMock):
82+
"""
83+
Tests that `start` converts null values to 0.
84+
"""
85+
mock_response = {
86+
"audio_url": "https://example/audio.mp3",
87+
"status": "completed",
88+
"sentiment_analysis_results": [
89+
{
90+
"text": "hi",
91+
"start": None,
92+
"end": 100,
93+
"confidence": 0.99,
94+
"sentiment": "POSITIVE",
95+
}
96+
],
97+
}
98+
request_body, transcript = unit_test_utils.submit_mock_transcription_request(
99+
httpx_mock,
100+
mock_response=mock_response,
101+
config=aai.TranscriptionConfig(sentiment_analysis=True),
102+
)
103+
104+
for response_sentiment_result, transcript_sentiment_result in zip(
105+
mock_response["sentiment_analysis_results"],
106+
transcript.sentiment_analysis,
107+
):
108+
assert transcript_sentiment_result.start == 0

0 commit comments

Comments
 (0)