diff --git a/.fern/metadata.json b/.fern/metadata.json
new file mode 100644
index 00000000..b98313a7
--- /dev/null
+++ b/.fern/metadata.json
@@ -0,0 +1,15 @@
+{
+ "cliVersion": "3.1.0",
+ "generatorName": "fernapi/fern-python-sdk",
+ "generatorVersion": "4.42.0",
+ "generatorConfig": {
+ "client": {
+ "class_name": "BaseClient",
+ "filename": "base_client.py",
+ "exported_class_name": "DeepgramClient",
+ "exported_filename": "client.py"
+ },
+ "use_typeddict_requests": true,
+ "should_generate_websocket_clients": true
+ }
+}
\ No newline at end of file
diff --git a/poetry.lock b/poetry.lock
index 7b42b5ff..f0f92c43 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -38,13 +38,13 @@ trio = ["trio (>=0.26.1)"]
[[package]]
name = "certifi"
-version = "2025.10.5"
+version = "2026.1.4"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.7"
files = [
- {file = "certifi-2025.10.5-py3-none-any.whl", hash = "sha256:0f212c2744a9bb6de0c56639a6f68afe01ecd92d91f14ae897c4fe7bbeeef0de"},
- {file = "certifi-2025.10.5.tar.gz", hash = "sha256:47c09d31ccf2acf0be3f701ea53595ee7e0b8fa08801c6624be771df09ae7b43"},
+ {file = "certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c"},
+ {file = "certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120"},
]
[[package]]
@@ -60,13 +60,13 @@ files = [
[[package]]
name = "exceptiongroup"
-version = "1.3.0"
+version = "1.3.1"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
files = [
- {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"},
- {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"},
+ {file = "exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598"},
+ {file = "exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219"},
]
[package.dependencies]
@@ -75,6 +75,20 @@ typing-extensions = {version = ">=4.6.0", markers = "python_version < \"3.13\""}
[package.extras]
test = ["pytest (>=6)"]
+[[package]]
+name = "execnet"
+version = "2.1.2"
+description = "execnet: rapid multi-Python deployment"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "execnet-2.1.2-py3-none-any.whl", hash = "sha256:67fba928dd5a544b783f6056f449e5e3931a5c378b128bc18501f7ea79e296ec"},
+ {file = "execnet-2.1.2.tar.gz", hash = "sha256:63d83bfdd9a23e35b9c6a3261412324f964c2ec8dcd8d3c6916ee9373e0befcd"},
+]
+
+[package.extras]
+testing = ["hatch", "pre-commit", "pytest", "tox"]
+
[[package]]
name = "h11"
version = "0.16.0"
@@ -418,6 +432,26 @@ pytest = ">=7.0.0,<9"
docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"]
testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"]
+[[package]]
+name = "pytest-xdist"
+version = "3.6.1"
+description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"},
+ {file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"},
+]
+
+[package.dependencies]
+execnet = ">=2.1"
+pytest = ">=7.0.0"
+
+[package.extras]
+psutil = ["psutil (>=3.0)"]
+setproctitle = ["setproctitle"]
+testing = ["filelock"]
+
[[package]]
name = "python-dateutil"
version = "2.9.0.post0"
@@ -652,4 +686,4 @@ files = [
[metadata]
lock-version = "2.0"
python-versions = "^3.8"
-content-hash = "a531afb5127832a42cf10cb4a62d7f98f1a85f76739d6cfec3d1033e11764a01"
+content-hash = "997b22d5b3fdb1c542b2547ad8fbdc255dfb656b6c50d082140f482620f4e88a"
diff --git a/reference.md b/reference.md
index 54899f1e..8e7fcede 100644
--- a/reference.md
+++ b/reference.md
@@ -163,6 +163,41 @@ client = DeepgramClient(
api_key="YOUR_API_KEY",
)
client.listen.v1.media.transcribe_url(
+ callback="callback",
+ callback_method="POST",
+ extra="extra",
+ sentiment=True,
+ summarize="v2",
+ tag="tag",
+ topics=True,
+ custom_topic="custom_topic",
+ custom_topic_mode="extended",
+ intents=True,
+ custom_intent="custom_intent",
+ custom_intent_mode="extended",
+ detect_entities=True,
+ detect_language=True,
+ diarize=True,
+ dictation=True,
+ encoding="linear16",
+ filler_words=True,
+ keywords="keywords",
+ language="language",
+ measurements=True,
+ model="nova-3",
+ multichannel=True,
+ numerals=True,
+ paragraphs=True,
+ profanity_filter=True,
+ punctuate=True,
+ redact="redact",
+ replace="replace",
+ search="search",
+ smart_format=True,
+ utterances=True,
+ utt_split=1.1,
+ version="latest",
+ mip_opt_out=True,
url="https://dpgr.am/spacewalk.wav",
)
@@ -877,7 +912,9 @@ from deepgram import DeepgramClient
client = DeepgramClient(
api_key="YOUR_API_KEY",
)
-client.manage.v1.models.list()
+client.manage.v1.models.list(
+ include_outdated=True,
+)
```
@@ -1078,6 +1115,8 @@ client = DeepgramClient(
)
client.manage.v1.projects.get(
project_id="123456-7890-1234-5678-901234",
+ limit=1.1,
+ page=1.1,
)
```
@@ -1383,6 +1422,7 @@ client = DeepgramClient(
)
client.manage.v1.projects.keys.list(
project_id="123456-7890-1234-5678-901234",
+ status="active",
)
```
@@ -1849,6 +1889,7 @@ client = DeepgramClient(
)
client.manage.v1.projects.models.list(
project_id="123456-7890-1234-5678-901234",
+ include_outdated=True,
)
```
@@ -2000,6 +2041,8 @@ Generates a list of requests for a specific project
```python
+import datetime
+
from deepgram import DeepgramClient
client = DeepgramClient(
@@ -2007,8 +2050,20 @@ client = DeepgramClient(
)
client.manage.v1.projects.requests.list(
project_id="123456-7890-1234-5678-901234",
+ start=datetime.datetime.fromisoformat(
+ "2024-01-15 09:30:00+00:00",
+ ),
+ end=datetime.datetime.fromisoformat(
+ "2024-01-15 09:30:00+00:00",
+ ),
+ limit=1.1,
+ page=1.1,
accessor="12345678-1234-1234-1234-123456789012",
request_id="12345678-1234-1234-1234-123456789012",
+ deployment="hosted",
+ endpoint="listen",
+ method="sync",
+ status="succeeded",
)
```
@@ -2239,10 +2294,50 @@ client = DeepgramClient(
)
client.manage.v1.projects.usage.get(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
accessor="12345678-1234-1234-1234-123456789012",
+ alternatives=True,
+ callback_method=True,
+ callback=True,
+ channels=True,
+ custom_intent_mode=True,
+ custom_intent=True,
+ custom_topic_mode=True,
+ custom_topic=True,
+ deployment="hosted",
+ detect_entities=True,
+ detect_language=True,
+ diarize=True,
+ dictation=True,
+ encoding=True,
+ endpoint="listen",
+ extra=True,
+ filler_words=True,
+ intents=True,
+ keyterm=True,
+ keywords=True,
+ language=True,
+ measurements=True,
+ method="sync",
model="6f548761-c9c0-429a-9315-11a1d28499c8",
+ multichannel=True,
+ numerals=True,
+ paragraphs=True,
+ profanity_filter=True,
+ punctuate=True,
+ redact=True,
+ replace=True,
sample_rate=True,
+ search=True,
+ sentiment=True,
+ smart_format=True,
+ summarize=True,
tag="tag1",
+ topics=True,
+ utt_split=True,
+ utterances=True,
+ version=True,
)
```
@@ -2816,7 +2911,10 @@ client = DeepgramClient(
)
client.manage.v1.projects.billing.breakdown.list(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
accessor="12345678-1234-1234-1234-123456789012",
+ deployment="hosted",
tag="tag1",
line_item="streaming::nova-3",
)
@@ -2951,6 +3049,8 @@ client = DeepgramClient(
)
client.manage.v1.projects.billing.fields.list(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
)
```
@@ -3038,6 +3138,7 @@ client = DeepgramClient(
)
client.manage.v1.projects.billing.purchases.list(
project_id="123456-7890-1234-5678-901234",
+ limit=1.1,
)
```
@@ -3523,10 +3624,51 @@ client = DeepgramClient(
)
client.manage.v1.projects.usage.breakdown.get(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
+ grouping="accessor",
accessor="12345678-1234-1234-1234-123456789012",
+ alternatives=True,
+ callback_method=True,
+ callback=True,
+ channels=True,
+ custom_intent_mode=True,
+ custom_intent=True,
+ custom_topic_mode=True,
+ custom_topic=True,
+ deployment="hosted",
+ detect_entities=True,
+ detect_language=True,
+ diarize=True,
+ dictation=True,
+ encoding=True,
+ endpoint="listen",
+ extra=True,
+ filler_words=True,
+ intents=True,
+ keyterm=True,
+ keywords=True,
+ language=True,
+ measurements=True,
+ method="sync",
model="6f548761-c9c0-429a-9315-11a1d28499c8",
+ multichannel=True,
+ numerals=True,
+ paragraphs=True,
+ profanity_filter=True,
+ punctuate=True,
+ redact=True,
+ replace=True,
sample_rate=True,
+ search=True,
+ sentiment=True,
+ smart_format=True,
+ summarize=True,
tag="tag1",
+ topics=True,
+ utt_split=True,
+ utterances=True,
+ version=True,
)
```
@@ -3958,6 +4100,8 @@ client = DeepgramClient(
)
client.manage.v1.projects.usage.fields.list(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
)
```
@@ -4044,6 +4188,18 @@ client = DeepgramClient(
api_key="YOUR_API_KEY",
)
client.read.v1.text.analyze(
+ callback="callback",
+ callback_method="POST",
+ sentiment=True,
+ summarize="v2",
+ tag="tag",
+ topics=True,
+ custom_topic="custom_topic",
+ custom_topic_mode="extended",
+ intents=True,
+ custom_intent="custom_intent",
+ custom_intent_mode="extended",
+ language="language",
request={"url": "url"},
)
diff --git a/src/deepgram/__init__.py b/src/deepgram/__init__.py
index e4d9cf16..548f35cb 100644
--- a/src/deepgram/__init__.py
+++ b/src/deepgram/__init__.py
@@ -81,6 +81,7 @@
ListenV1Callback,
ListenV1CallbackMethod,
ListenV1Channels,
+ ListenV1DetectEntities,
ListenV1Diarize,
ListenV1Dictation,
ListenV1Encoding,
@@ -109,6 +110,7 @@
ListenV1ResponseResultsChannels,
ListenV1ResponseResultsChannelsItem,
ListenV1ResponseResultsChannelsItemAlternativesItem,
+ ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItem,
ListenV1ResponseResultsChannelsItemAlternativesItemParagraphs,
ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItem,
ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItemSentencesItem,
@@ -257,6 +259,7 @@
ListenV1ResponseMetadataSummaryInfoParams,
ListenV1ResponseMetadataTopicsInfoParams,
ListenV1ResponseParams,
+ ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItemParams,
ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItemParams,
ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItemSentencesItemParams,
ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParams,
@@ -458,6 +461,7 @@
"ListenV1Callback": ".types",
"ListenV1CallbackMethod": ".types",
"ListenV1Channels": ".types",
+ "ListenV1DetectEntities": ".types",
"ListenV1Diarize": ".types",
"ListenV1Dictation": ".types",
"ListenV1Encoding": ".types",
@@ -492,6 +496,8 @@
"ListenV1ResponseResultsChannels": ".types",
"ListenV1ResponseResultsChannelsItem": ".types",
"ListenV1ResponseResultsChannelsItemAlternativesItem": ".types",
+ "ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItem": ".types",
+ "ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItemParams": ".requests",
"ListenV1ResponseResultsChannelsItemAlternativesItemParagraphs": ".types",
"ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItem": ".types",
"ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItemParams": ".requests",
@@ -794,6 +800,7 @@ def __dir__():
"ListenV1Callback",
"ListenV1CallbackMethod",
"ListenV1Channels",
+ "ListenV1DetectEntities",
"ListenV1Diarize",
"ListenV1Dictation",
"ListenV1Encoding",
@@ -828,6 +835,8 @@ def __dir__():
"ListenV1ResponseResultsChannels",
"ListenV1ResponseResultsChannelsItem",
"ListenV1ResponseResultsChannelsItemAlternativesItem",
+ "ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItem",
+ "ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItemParams",
"ListenV1ResponseResultsChannelsItemAlternativesItemParagraphs",
"ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItem",
"ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItemParams",
diff --git a/src/deepgram/agent/__init__.py b/src/deepgram/agent/__init__.py
index 148ad154..6ed13aa0 100644
--- a/src/deepgram/agent/__init__.py
+++ b/src/deepgram/agent/__init__.py
@@ -7,7 +7,461 @@
if typing.TYPE_CHECKING:
from . import v1
-_dynamic_imports: typing.Dict[str, str] = {"v1": ".v1"}
+ from .v1 import (
+ AgentV1AgentAudioDone,
+ AgentV1AgentAudioDoneParams,
+ AgentV1AgentStartedSpeaking,
+ AgentV1AgentStartedSpeakingParams,
+ AgentV1AgentThinking,
+ AgentV1AgentThinkingParams,
+ AgentV1ConversationText,
+ AgentV1ConversationTextParams,
+ AgentV1ConversationTextRole,
+ AgentV1Error,
+ AgentV1ErrorParams,
+ AgentV1FunctionCallRequest,
+ AgentV1FunctionCallRequestFunctionsItem,
+ AgentV1FunctionCallRequestFunctionsItemParams,
+ AgentV1FunctionCallRequestParams,
+ AgentV1InjectAgentMessage,
+ AgentV1InjectAgentMessageParams,
+ AgentV1InjectUserMessage,
+ AgentV1InjectUserMessageParams,
+ AgentV1InjectionRefused,
+ AgentV1InjectionRefusedParams,
+ AgentV1KeepAlive,
+ AgentV1KeepAliveParams,
+ AgentV1PromptUpdated,
+ AgentV1PromptUpdatedParams,
+ AgentV1ReceiveFunctionCallResponse,
+ AgentV1ReceiveFunctionCallResponseParams,
+ AgentV1SendFunctionCallResponse,
+ AgentV1SendFunctionCallResponseParams,
+ AgentV1Settings,
+ AgentV1SettingsAgent,
+ AgentV1SettingsAgentContext,
+ AgentV1SettingsAgentContextMessagesItem,
+ AgentV1SettingsAgentContextMessagesItemContent,
+ AgentV1SettingsAgentContextMessagesItemContentParams,
+ AgentV1SettingsAgentContextMessagesItemContentRole,
+ AgentV1SettingsAgentContextMessagesItemFunctionCalls,
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem,
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams,
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsParams,
+ AgentV1SettingsAgentContextMessagesItemParams,
+ AgentV1SettingsAgentContextParams,
+ AgentV1SettingsAgentListen,
+ AgentV1SettingsAgentListenParams,
+ AgentV1SettingsAgentListenProvider,
+ AgentV1SettingsAgentListenProviderParams,
+ AgentV1SettingsAgentListenProviderV1,
+ AgentV1SettingsAgentListenProviderV1Params,
+ AgentV1SettingsAgentListenProviderV2,
+ AgentV1SettingsAgentListenProviderV2Params,
+ AgentV1SettingsAgentListenProvider_V1,
+ AgentV1SettingsAgentListenProvider_V1Params,
+ AgentV1SettingsAgentListenProvider_V2,
+ AgentV1SettingsAgentListenProvider_V2Params,
+ AgentV1SettingsAgentParams,
+ AgentV1SettingsAgentSpeak,
+ AgentV1SettingsAgentSpeakEndpoint,
+ AgentV1SettingsAgentSpeakEndpointEndpoint,
+ AgentV1SettingsAgentSpeakEndpointEndpointParams,
+ AgentV1SettingsAgentSpeakEndpointParams,
+ AgentV1SettingsAgentSpeakEndpointProvider,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPolly,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice,
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId,
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice,
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams,
+ AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel,
+ AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId,
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel,
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice,
+ AgentV1SettingsAgentSpeakEndpointProviderParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly,
+ AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_Cartesia,
+ AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_Deepgram,
+ AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs,
+ AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_OpenAi,
+ AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams,
+ AgentV1SettingsAgentSpeakOneItem,
+ AgentV1SettingsAgentSpeakOneItemEndpoint,
+ AgentV1SettingsAgentSpeakOneItemEndpointParams,
+ AgentV1SettingsAgentSpeakOneItemParams,
+ AgentV1SettingsAgentSpeakOneItemProvider,
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPolly,
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials,
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams,
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType,
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine,
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyParams,
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice,
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId,
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice,
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams,
+ AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel,
+ AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId,
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel,
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice,
+ AgentV1SettingsAgentSpeakOneItemProviderParams,
+ AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly,
+ AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams,
+ AgentV1SettingsAgentSpeakOneItemProvider_Cartesia,
+ AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams,
+ AgentV1SettingsAgentSpeakOneItemProvider_Deepgram,
+ AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams,
+ AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs,
+ AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams,
+ AgentV1SettingsAgentSpeakOneItemProvider_OpenAi,
+ AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams,
+ AgentV1SettingsAgentSpeakParams,
+ AgentV1SettingsAgentThink,
+ AgentV1SettingsAgentThinkContextLength,
+ AgentV1SettingsAgentThinkContextLengthParams,
+ AgentV1SettingsAgentThinkEndpoint,
+ AgentV1SettingsAgentThinkEndpointParams,
+ AgentV1SettingsAgentThinkFunctionsItem,
+ AgentV1SettingsAgentThinkFunctionsItemEndpoint,
+ AgentV1SettingsAgentThinkFunctionsItemEndpointParams,
+ AgentV1SettingsAgentThinkFunctionsItemParams,
+ AgentV1SettingsAgentThinkParams,
+ AgentV1SettingsAgentThinkProvider,
+ AgentV1SettingsAgentThinkProviderAnthropicModel,
+ AgentV1SettingsAgentThinkProviderAwsBedrock,
+ AgentV1SettingsAgentThinkProviderAwsBedrockCredentials,
+ AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsParams,
+ AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsType,
+ AgentV1SettingsAgentThinkProviderAwsBedrockModel,
+ AgentV1SettingsAgentThinkProviderAwsBedrockParams,
+ AgentV1SettingsAgentThinkProviderGoogleModel,
+ AgentV1SettingsAgentThinkProviderOpenAiModel,
+ AgentV1SettingsAgentThinkProviderParams,
+ AgentV1SettingsAgentThinkProvider_Anthropic,
+ AgentV1SettingsAgentThinkProvider_AnthropicParams,
+ AgentV1SettingsAgentThinkProvider_AwsBedrock,
+ AgentV1SettingsAgentThinkProvider_AwsBedrockParams,
+ AgentV1SettingsAgentThinkProvider_Google,
+ AgentV1SettingsAgentThinkProvider_GoogleParams,
+ AgentV1SettingsAgentThinkProvider_Groq,
+ AgentV1SettingsAgentThinkProvider_GroqParams,
+ AgentV1SettingsAgentThinkProvider_OpenAi,
+ AgentV1SettingsAgentThinkProvider_OpenAiParams,
+ AgentV1SettingsApplied,
+ AgentV1SettingsAppliedParams,
+ AgentV1SettingsAudio,
+ AgentV1SettingsAudioInput,
+ AgentV1SettingsAudioInputEncoding,
+ AgentV1SettingsAudioInputParams,
+ AgentV1SettingsAudioOutput,
+ AgentV1SettingsAudioOutputEncoding,
+ AgentV1SettingsAudioOutputParams,
+ AgentV1SettingsAudioParams,
+ AgentV1SettingsFlags,
+ AgentV1SettingsFlagsParams,
+ AgentV1SettingsParams,
+ AgentV1SpeakUpdated,
+ AgentV1SpeakUpdatedParams,
+ AgentV1UpdatePrompt,
+ AgentV1UpdatePromptParams,
+ AgentV1UpdateSpeak,
+ AgentV1UpdateSpeakParams,
+ AgentV1UpdateSpeakSpeak,
+ AgentV1UpdateSpeakSpeakEndpoint,
+ AgentV1UpdateSpeakSpeakEndpointParams,
+ AgentV1UpdateSpeakSpeakParams,
+ AgentV1UpdateSpeakSpeakProvider,
+ AgentV1UpdateSpeakSpeakProviderAwsPolly,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyEngine,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyParams,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyVoice,
+ AgentV1UpdateSpeakSpeakProviderCartesia,
+ AgentV1UpdateSpeakSpeakProviderCartesiaModelId,
+ AgentV1UpdateSpeakSpeakProviderCartesiaParams,
+ AgentV1UpdateSpeakSpeakProviderCartesiaVoice,
+ AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams,
+ AgentV1UpdateSpeakSpeakProviderDeepgram,
+ AgentV1UpdateSpeakSpeakProviderDeepgramModel,
+ AgentV1UpdateSpeakSpeakProviderDeepgramParams,
+ AgentV1UpdateSpeakSpeakProviderElevenLabs,
+ AgentV1UpdateSpeakSpeakProviderElevenLabsModelId,
+ AgentV1UpdateSpeakSpeakProviderElevenLabsParams,
+ AgentV1UpdateSpeakSpeakProviderOpenAi,
+ AgentV1UpdateSpeakSpeakProviderOpenAiModel,
+ AgentV1UpdateSpeakSpeakProviderOpenAiParams,
+ AgentV1UpdateSpeakSpeakProviderOpenAiVoice,
+ AgentV1UpdateSpeakSpeakProviderParams,
+ AgentV1UpdateSpeakSpeakProvider_AwsPolly,
+ AgentV1UpdateSpeakSpeakProvider_AwsPollyParams,
+ AgentV1UpdateSpeakSpeakProvider_Cartesia,
+ AgentV1UpdateSpeakSpeakProvider_CartesiaParams,
+ AgentV1UpdateSpeakSpeakProvider_Deepgram,
+ AgentV1UpdateSpeakSpeakProvider_DeepgramParams,
+ AgentV1UpdateSpeakSpeakProvider_ElevenLabs,
+ AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams,
+ AgentV1UpdateSpeakSpeakProvider_OpenAi,
+ AgentV1UpdateSpeakSpeakProvider_OpenAiParams,
+ AgentV1UserStartedSpeaking,
+ AgentV1UserStartedSpeakingParams,
+ AgentV1Warning,
+ AgentV1WarningParams,
+ AgentV1Welcome,
+ AgentV1WelcomeParams,
+ Anthropic,
+ AnthropicParams,
+ Cartesia,
+ CartesiaParams,
+ Deepgram,
+ DeepgramParams,
+ ElevenLabs,
+ ElevenLabsParams,
+ Google,
+ GoogleParams,
+ Groq,
+ GroqParams,
+ OpenAi,
+ OpenAiParams,
+ )
+_dynamic_imports: typing.Dict[str, str] = {
+ "AgentV1AgentAudioDone": ".v1",
+ "AgentV1AgentAudioDoneParams": ".v1",
+ "AgentV1AgentStartedSpeaking": ".v1",
+ "AgentV1AgentStartedSpeakingParams": ".v1",
+ "AgentV1AgentThinking": ".v1",
+ "AgentV1AgentThinkingParams": ".v1",
+ "AgentV1ConversationText": ".v1",
+ "AgentV1ConversationTextParams": ".v1",
+ "AgentV1ConversationTextRole": ".v1",
+ "AgentV1Error": ".v1",
+ "AgentV1ErrorParams": ".v1",
+ "AgentV1FunctionCallRequest": ".v1",
+ "AgentV1FunctionCallRequestFunctionsItem": ".v1",
+ "AgentV1FunctionCallRequestFunctionsItemParams": ".v1",
+ "AgentV1FunctionCallRequestParams": ".v1",
+ "AgentV1InjectAgentMessage": ".v1",
+ "AgentV1InjectAgentMessageParams": ".v1",
+ "AgentV1InjectUserMessage": ".v1",
+ "AgentV1InjectUserMessageParams": ".v1",
+ "AgentV1InjectionRefused": ".v1",
+ "AgentV1InjectionRefusedParams": ".v1",
+ "AgentV1KeepAlive": ".v1",
+ "AgentV1KeepAliveParams": ".v1",
+ "AgentV1PromptUpdated": ".v1",
+ "AgentV1PromptUpdatedParams": ".v1",
+ "AgentV1ReceiveFunctionCallResponse": ".v1",
+ "AgentV1ReceiveFunctionCallResponseParams": ".v1",
+ "AgentV1SendFunctionCallResponse": ".v1",
+ "AgentV1SendFunctionCallResponseParams": ".v1",
+ "AgentV1Settings": ".v1",
+ "AgentV1SettingsAgent": ".v1",
+ "AgentV1SettingsAgentContext": ".v1",
+ "AgentV1SettingsAgentContextMessagesItem": ".v1",
+ "AgentV1SettingsAgentContextMessagesItemContent": ".v1",
+ "AgentV1SettingsAgentContextMessagesItemContentParams": ".v1",
+ "AgentV1SettingsAgentContextMessagesItemContentRole": ".v1",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCalls": ".v1",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem": ".v1",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams": ".v1",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsParams": ".v1",
+ "AgentV1SettingsAgentContextMessagesItemParams": ".v1",
+ "AgentV1SettingsAgentContextParams": ".v1",
+ "AgentV1SettingsAgentListen": ".v1",
+ "AgentV1SettingsAgentListenParams": ".v1",
+ "AgentV1SettingsAgentListenProvider": ".v1",
+ "AgentV1SettingsAgentListenProviderParams": ".v1",
+ "AgentV1SettingsAgentListenProviderV1": ".v1",
+ "AgentV1SettingsAgentListenProviderV1Params": ".v1",
+ "AgentV1SettingsAgentListenProviderV2": ".v1",
+ "AgentV1SettingsAgentListenProviderV2Params": ".v1",
+ "AgentV1SettingsAgentListenProvider_V1": ".v1",
+ "AgentV1SettingsAgentListenProvider_V1Params": ".v1",
+ "AgentV1SettingsAgentListenProvider_V2": ".v1",
+ "AgentV1SettingsAgentListenProvider_V2Params": ".v1",
+ "AgentV1SettingsAgentParams": ".v1",
+ "AgentV1SettingsAgentSpeak": ".v1",
+ "AgentV1SettingsAgentSpeakEndpoint": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointEndpoint": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointEndpointParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPolly": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProviderParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Cartesia": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Deepgram": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAi": ".v1",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams": ".v1",
+ "AgentV1SettingsAgentSpeakOneItem": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemEndpoint": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemEndpointParams": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemParams": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProvider": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPolly": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyParams": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProviderParams": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProvider_Cartesia": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProvider_Deepgram": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProvider_OpenAi": ".v1",
+ "AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams": ".v1",
+ "AgentV1SettingsAgentSpeakParams": ".v1",
+ "AgentV1SettingsAgentThink": ".v1",
+ "AgentV1SettingsAgentThinkContextLength": ".v1",
+ "AgentV1SettingsAgentThinkContextLengthParams": ".v1",
+ "AgentV1SettingsAgentThinkEndpoint": ".v1",
+ "AgentV1SettingsAgentThinkEndpointParams": ".v1",
+ "AgentV1SettingsAgentThinkFunctionsItem": ".v1",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpoint": ".v1",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpointParams": ".v1",
+ "AgentV1SettingsAgentThinkFunctionsItemParams": ".v1",
+ "AgentV1SettingsAgentThinkParams": ".v1",
+ "AgentV1SettingsAgentThinkProvider": ".v1",
+ "AgentV1SettingsAgentThinkProviderAnthropicModel": ".v1",
+ "AgentV1SettingsAgentThinkProviderAwsBedrock": ".v1",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockCredentials": ".v1",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsParams": ".v1",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsType": ".v1",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockModel": ".v1",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockParams": ".v1",
+ "AgentV1SettingsAgentThinkProviderGoogleModel": ".v1",
+ "AgentV1SettingsAgentThinkProviderOpenAiModel": ".v1",
+ "AgentV1SettingsAgentThinkProviderParams": ".v1",
+ "AgentV1SettingsAgentThinkProvider_Anthropic": ".v1",
+ "AgentV1SettingsAgentThinkProvider_AnthropicParams": ".v1",
+ "AgentV1SettingsAgentThinkProvider_AwsBedrock": ".v1",
+ "AgentV1SettingsAgentThinkProvider_AwsBedrockParams": ".v1",
+ "AgentV1SettingsAgentThinkProvider_Google": ".v1",
+ "AgentV1SettingsAgentThinkProvider_GoogleParams": ".v1",
+ "AgentV1SettingsAgentThinkProvider_Groq": ".v1",
+ "AgentV1SettingsAgentThinkProvider_GroqParams": ".v1",
+ "AgentV1SettingsAgentThinkProvider_OpenAi": ".v1",
+ "AgentV1SettingsAgentThinkProvider_OpenAiParams": ".v1",
+ "AgentV1SettingsApplied": ".v1",
+ "AgentV1SettingsAppliedParams": ".v1",
+ "AgentV1SettingsAudio": ".v1",
+ "AgentV1SettingsAudioInput": ".v1",
+ "AgentV1SettingsAudioInputEncoding": ".v1",
+ "AgentV1SettingsAudioInputParams": ".v1",
+ "AgentV1SettingsAudioOutput": ".v1",
+ "AgentV1SettingsAudioOutputEncoding": ".v1",
+ "AgentV1SettingsAudioOutputParams": ".v1",
+ "AgentV1SettingsAudioParams": ".v1",
+ "AgentV1SettingsFlags": ".v1",
+ "AgentV1SettingsFlagsParams": ".v1",
+ "AgentV1SettingsParams": ".v1",
+ "AgentV1SpeakUpdated": ".v1",
+ "AgentV1SpeakUpdatedParams": ".v1",
+ "AgentV1UpdatePrompt": ".v1",
+ "AgentV1UpdatePromptParams": ".v1",
+ "AgentV1UpdateSpeak": ".v1",
+ "AgentV1UpdateSpeakParams": ".v1",
+ "AgentV1UpdateSpeakSpeak": ".v1",
+ "AgentV1UpdateSpeakSpeakEndpoint": ".v1",
+ "AgentV1UpdateSpeakSpeakEndpointParams": ".v1",
+ "AgentV1UpdateSpeakSpeakParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderAwsPolly": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyEngine": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyVoice": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderCartesia": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaModelId": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoice": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderDeepgram": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramModel": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabs": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsModelId": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderOpenAi": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiModel": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiVoice": ".v1",
+ "AgentV1UpdateSpeakSpeakProviderParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPolly": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPollyParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider_Cartesia": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider_CartesiaParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider_Deepgram": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider_DeepgramParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabs": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAi": ".v1",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAiParams": ".v1",
+ "AgentV1UserStartedSpeaking": ".v1",
+ "AgentV1UserStartedSpeakingParams": ".v1",
+ "AgentV1Warning": ".v1",
+ "AgentV1WarningParams": ".v1",
+ "AgentV1Welcome": ".v1",
+ "AgentV1WelcomeParams": ".v1",
+ "Anthropic": ".v1",
+ "AnthropicParams": ".v1",
+ "Cartesia": ".v1",
+ "CartesiaParams": ".v1",
+ "Deepgram": ".v1",
+ "DeepgramParams": ".v1",
+ "ElevenLabs": ".v1",
+ "ElevenLabsParams": ".v1",
+ "Google": ".v1",
+ "GoogleParams": ".v1",
+ "Groq": ".v1",
+ "GroqParams": ".v1",
+ "OpenAi": ".v1",
+ "OpenAiParams": ".v1",
+ "v1": ".v1",
+}
def __getattr__(attr_name: str) -> typing.Any:
@@ -31,4 +485,231 @@ def __dir__():
return sorted(lazy_attrs)
-__all__ = ["v1"]
+__all__ = [
+ "AgentV1AgentAudioDone",
+ "AgentV1AgentAudioDoneParams",
+ "AgentV1AgentStartedSpeaking",
+ "AgentV1AgentStartedSpeakingParams",
+ "AgentV1AgentThinking",
+ "AgentV1AgentThinkingParams",
+ "AgentV1ConversationText",
+ "AgentV1ConversationTextParams",
+ "AgentV1ConversationTextRole",
+ "AgentV1Error",
+ "AgentV1ErrorParams",
+ "AgentV1FunctionCallRequest",
+ "AgentV1FunctionCallRequestFunctionsItem",
+ "AgentV1FunctionCallRequestFunctionsItemParams",
+ "AgentV1FunctionCallRequestParams",
+ "AgentV1InjectAgentMessage",
+ "AgentV1InjectAgentMessageParams",
+ "AgentV1InjectUserMessage",
+ "AgentV1InjectUserMessageParams",
+ "AgentV1InjectionRefused",
+ "AgentV1InjectionRefusedParams",
+ "AgentV1KeepAlive",
+ "AgentV1KeepAliveParams",
+ "AgentV1PromptUpdated",
+ "AgentV1PromptUpdatedParams",
+ "AgentV1ReceiveFunctionCallResponse",
+ "AgentV1ReceiveFunctionCallResponseParams",
+ "AgentV1SendFunctionCallResponse",
+ "AgentV1SendFunctionCallResponseParams",
+ "AgentV1Settings",
+ "AgentV1SettingsAgent",
+ "AgentV1SettingsAgentContext",
+ "AgentV1SettingsAgentContextMessagesItem",
+ "AgentV1SettingsAgentContextMessagesItemContent",
+ "AgentV1SettingsAgentContextMessagesItemContentParams",
+ "AgentV1SettingsAgentContextMessagesItemContentRole",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCalls",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsParams",
+ "AgentV1SettingsAgentContextMessagesItemParams",
+ "AgentV1SettingsAgentContextParams",
+ "AgentV1SettingsAgentListen",
+ "AgentV1SettingsAgentListenParams",
+ "AgentV1SettingsAgentListenProvider",
+ "AgentV1SettingsAgentListenProviderParams",
+ "AgentV1SettingsAgentListenProviderV1",
+ "AgentV1SettingsAgentListenProviderV1Params",
+ "AgentV1SettingsAgentListenProviderV2",
+ "AgentV1SettingsAgentListenProviderV2Params",
+ "AgentV1SettingsAgentListenProvider_V1",
+ "AgentV1SettingsAgentListenProvider_V1Params",
+ "AgentV1SettingsAgentListenProvider_V2",
+ "AgentV1SettingsAgentListenProvider_V2Params",
+ "AgentV1SettingsAgentParams",
+ "AgentV1SettingsAgentSpeak",
+ "AgentV1SettingsAgentSpeakEndpoint",
+ "AgentV1SettingsAgentSpeakEndpointEndpoint",
+ "AgentV1SettingsAgentSpeakEndpointEndpointParams",
+ "AgentV1SettingsAgentSpeakEndpointParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPolly",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice",
+ "AgentV1SettingsAgentSpeakEndpointProviderParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Cartesia",
+ "AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Deepgram",
+ "AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAi",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams",
+ "AgentV1SettingsAgentSpeakOneItem",
+ "AgentV1SettingsAgentSpeakOneItemEndpoint",
+ "AgentV1SettingsAgentSpeakOneItemEndpointParams",
+ "AgentV1SettingsAgentSpeakOneItemParams",
+ "AgentV1SettingsAgentSpeakOneItemProvider",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPolly",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyParams",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice",
+ "AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId",
+ "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice",
+ "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams",
+ "AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel",
+ "AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId",
+ "AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel",
+ "AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice",
+ "AgentV1SettingsAgentSpeakOneItemProviderParams",
+ "AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly",
+ "AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams",
+ "AgentV1SettingsAgentSpeakOneItemProvider_Cartesia",
+ "AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams",
+ "AgentV1SettingsAgentSpeakOneItemProvider_Deepgram",
+ "AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams",
+ "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs",
+ "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams",
+ "AgentV1SettingsAgentSpeakOneItemProvider_OpenAi",
+ "AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams",
+ "AgentV1SettingsAgentSpeakParams",
+ "AgentV1SettingsAgentThink",
+ "AgentV1SettingsAgentThinkContextLength",
+ "AgentV1SettingsAgentThinkContextLengthParams",
+ "AgentV1SettingsAgentThinkEndpoint",
+ "AgentV1SettingsAgentThinkEndpointParams",
+ "AgentV1SettingsAgentThinkFunctionsItem",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpoint",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpointParams",
+ "AgentV1SettingsAgentThinkFunctionsItemParams",
+ "AgentV1SettingsAgentThinkParams",
+ "AgentV1SettingsAgentThinkProvider",
+ "AgentV1SettingsAgentThinkProviderAnthropicModel",
+ "AgentV1SettingsAgentThinkProviderAwsBedrock",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockCredentials",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsParams",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsType",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockModel",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockParams",
+ "AgentV1SettingsAgentThinkProviderGoogleModel",
+ "AgentV1SettingsAgentThinkProviderOpenAiModel",
+ "AgentV1SettingsAgentThinkProviderParams",
+ "AgentV1SettingsAgentThinkProvider_Anthropic",
+ "AgentV1SettingsAgentThinkProvider_AnthropicParams",
+ "AgentV1SettingsAgentThinkProvider_AwsBedrock",
+ "AgentV1SettingsAgentThinkProvider_AwsBedrockParams",
+ "AgentV1SettingsAgentThinkProvider_Google",
+ "AgentV1SettingsAgentThinkProvider_GoogleParams",
+ "AgentV1SettingsAgentThinkProvider_Groq",
+ "AgentV1SettingsAgentThinkProvider_GroqParams",
+ "AgentV1SettingsAgentThinkProvider_OpenAi",
+ "AgentV1SettingsAgentThinkProvider_OpenAiParams",
+ "AgentV1SettingsApplied",
+ "AgentV1SettingsAppliedParams",
+ "AgentV1SettingsAudio",
+ "AgentV1SettingsAudioInput",
+ "AgentV1SettingsAudioInputEncoding",
+ "AgentV1SettingsAudioInputParams",
+ "AgentV1SettingsAudioOutput",
+ "AgentV1SettingsAudioOutputEncoding",
+ "AgentV1SettingsAudioOutputParams",
+ "AgentV1SettingsAudioParams",
+ "AgentV1SettingsFlags",
+ "AgentV1SettingsFlagsParams",
+ "AgentV1SettingsParams",
+ "AgentV1SpeakUpdated",
+ "AgentV1SpeakUpdatedParams",
+ "AgentV1UpdatePrompt",
+ "AgentV1UpdatePromptParams",
+ "AgentV1UpdateSpeak",
+ "AgentV1UpdateSpeakParams",
+ "AgentV1UpdateSpeakSpeak",
+ "AgentV1UpdateSpeakSpeakEndpoint",
+ "AgentV1UpdateSpeakSpeakEndpointParams",
+ "AgentV1UpdateSpeakSpeakParams",
+ "AgentV1UpdateSpeakSpeakProvider",
+ "AgentV1UpdateSpeakSpeakProviderAwsPolly",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyEngine",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyParams",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyVoice",
+ "AgentV1UpdateSpeakSpeakProviderCartesia",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaModelId",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaParams",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoice",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams",
+ "AgentV1UpdateSpeakSpeakProviderDeepgram",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramModel",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramParams",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabs",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsModelId",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsParams",
+ "AgentV1UpdateSpeakSpeakProviderOpenAi",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiModel",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiParams",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiVoice",
+ "AgentV1UpdateSpeakSpeakProviderParams",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPolly",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPollyParams",
+ "AgentV1UpdateSpeakSpeakProvider_Cartesia",
+ "AgentV1UpdateSpeakSpeakProvider_CartesiaParams",
+ "AgentV1UpdateSpeakSpeakProvider_Deepgram",
+ "AgentV1UpdateSpeakSpeakProvider_DeepgramParams",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabs",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAi",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAiParams",
+ "AgentV1UserStartedSpeaking",
+ "AgentV1UserStartedSpeakingParams",
+ "AgentV1Warning",
+ "AgentV1WarningParams",
+ "AgentV1Welcome",
+ "AgentV1WelcomeParams",
+ "Anthropic",
+ "AnthropicParams",
+ "Cartesia",
+ "CartesiaParams",
+ "Deepgram",
+ "DeepgramParams",
+ "ElevenLabs",
+ "ElevenLabsParams",
+ "Google",
+ "GoogleParams",
+ "Groq",
+ "GroqParams",
+ "OpenAi",
+ "OpenAiParams",
+ "v1",
+]
diff --git a/src/deepgram/agent/v1/__init__.py b/src/deepgram/agent/v1/__init__.py
index 31fcb147..2c4025de 100644
--- a/src/deepgram/agent/v1/__init__.py
+++ b/src/deepgram/agent/v1/__init__.py
@@ -6,8 +6,464 @@
from importlib import import_module
if typing.TYPE_CHECKING:
+ from .types import (
+ AgentV1AgentAudioDone,
+ AgentV1AgentStartedSpeaking,
+ AgentV1AgentThinking,
+ AgentV1ConversationText,
+ AgentV1ConversationTextRole,
+ AgentV1Error,
+ AgentV1FunctionCallRequest,
+ AgentV1FunctionCallRequestFunctionsItem,
+ AgentV1InjectAgentMessage,
+ AgentV1InjectUserMessage,
+ AgentV1InjectionRefused,
+ AgentV1KeepAlive,
+ AgentV1PromptUpdated,
+ AgentV1ReceiveFunctionCallResponse,
+ AgentV1SendFunctionCallResponse,
+ AgentV1Settings,
+ AgentV1SettingsAgent,
+ AgentV1SettingsAgentContext,
+ AgentV1SettingsAgentContextMessagesItem,
+ AgentV1SettingsAgentContextMessagesItemContent,
+ AgentV1SettingsAgentContextMessagesItemContentRole,
+ AgentV1SettingsAgentContextMessagesItemFunctionCalls,
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem,
+ AgentV1SettingsAgentListen,
+ AgentV1SettingsAgentListenProvider,
+ AgentV1SettingsAgentListenProviderV1,
+ AgentV1SettingsAgentListenProviderV2,
+ AgentV1SettingsAgentListenProvider_V1,
+ AgentV1SettingsAgentListenProvider_V2,
+ AgentV1SettingsAgentSpeak,
+ AgentV1SettingsAgentSpeakEndpoint,
+ AgentV1SettingsAgentSpeakEndpointEndpoint,
+ AgentV1SettingsAgentSpeakEndpointProvider,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPolly,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice,
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId,
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice,
+ AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel,
+ AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId,
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel,
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice,
+ AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly,
+ AgentV1SettingsAgentSpeakEndpointProvider_Cartesia,
+ AgentV1SettingsAgentSpeakEndpointProvider_Deepgram,
+ AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs,
+ AgentV1SettingsAgentSpeakEndpointProvider_OpenAi,
+ AgentV1SettingsAgentSpeakOneItem,
+ AgentV1SettingsAgentSpeakOneItemEndpoint,
+ AgentV1SettingsAgentSpeakOneItemProvider,
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPolly,
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials,
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType,
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine,
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice,
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId,
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice,
+ AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel,
+ AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId,
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel,
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice,
+ AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly,
+ AgentV1SettingsAgentSpeakOneItemProvider_Cartesia,
+ AgentV1SettingsAgentSpeakOneItemProvider_Deepgram,
+ AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs,
+ AgentV1SettingsAgentSpeakOneItemProvider_OpenAi,
+ AgentV1SettingsAgentThink,
+ AgentV1SettingsAgentThinkContextLength,
+ AgentV1SettingsAgentThinkEndpoint,
+ AgentV1SettingsAgentThinkFunctionsItem,
+ AgentV1SettingsAgentThinkFunctionsItemEndpoint,
+ AgentV1SettingsAgentThinkProvider,
+ AgentV1SettingsAgentThinkProviderAnthropicModel,
+ AgentV1SettingsAgentThinkProviderAwsBedrock,
+ AgentV1SettingsAgentThinkProviderAwsBedrockCredentials,
+ AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsType,
+ AgentV1SettingsAgentThinkProviderAwsBedrockModel,
+ AgentV1SettingsAgentThinkProviderGoogleModel,
+ AgentV1SettingsAgentThinkProviderOpenAiModel,
+ AgentV1SettingsAgentThinkProvider_Anthropic,
+ AgentV1SettingsAgentThinkProvider_AwsBedrock,
+ AgentV1SettingsAgentThinkProvider_Google,
+ AgentV1SettingsAgentThinkProvider_Groq,
+ AgentV1SettingsAgentThinkProvider_OpenAi,
+ AgentV1SettingsApplied,
+ AgentV1SettingsAudio,
+ AgentV1SettingsAudioInput,
+ AgentV1SettingsAudioInputEncoding,
+ AgentV1SettingsAudioOutput,
+ AgentV1SettingsAudioOutputEncoding,
+ AgentV1SettingsFlags,
+ AgentV1SpeakUpdated,
+ AgentV1UpdatePrompt,
+ AgentV1UpdateSpeak,
+ AgentV1UpdateSpeakSpeak,
+ AgentV1UpdateSpeakSpeakEndpoint,
+ AgentV1UpdateSpeakSpeakProvider,
+ AgentV1UpdateSpeakSpeakProviderAwsPolly,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyEngine,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyVoice,
+ AgentV1UpdateSpeakSpeakProviderCartesia,
+ AgentV1UpdateSpeakSpeakProviderCartesiaModelId,
+ AgentV1UpdateSpeakSpeakProviderCartesiaVoice,
+ AgentV1UpdateSpeakSpeakProviderDeepgram,
+ AgentV1UpdateSpeakSpeakProviderDeepgramModel,
+ AgentV1UpdateSpeakSpeakProviderElevenLabs,
+ AgentV1UpdateSpeakSpeakProviderElevenLabsModelId,
+ AgentV1UpdateSpeakSpeakProviderOpenAi,
+ AgentV1UpdateSpeakSpeakProviderOpenAiModel,
+ AgentV1UpdateSpeakSpeakProviderOpenAiVoice,
+ AgentV1UpdateSpeakSpeakProvider_AwsPolly,
+ AgentV1UpdateSpeakSpeakProvider_Cartesia,
+ AgentV1UpdateSpeakSpeakProvider_Deepgram,
+ AgentV1UpdateSpeakSpeakProvider_ElevenLabs,
+ AgentV1UpdateSpeakSpeakProvider_OpenAi,
+ AgentV1UserStartedSpeaking,
+ AgentV1Warning,
+ AgentV1Welcome,
+ Anthropic,
+ Cartesia,
+ Deepgram,
+ ElevenLabs,
+ Google,
+ Groq,
+ OpenAi,
+ )
from . import settings
-_dynamic_imports: typing.Dict[str, str] = {"settings": ".settings"}
+ from .requests import (
+ AgentV1AgentAudioDoneParams,
+ AgentV1AgentStartedSpeakingParams,
+ AgentV1AgentThinkingParams,
+ AgentV1ConversationTextParams,
+ AgentV1ErrorParams,
+ AgentV1FunctionCallRequestFunctionsItemParams,
+ AgentV1FunctionCallRequestParams,
+ AgentV1InjectAgentMessageParams,
+ AgentV1InjectUserMessageParams,
+ AgentV1InjectionRefusedParams,
+ AgentV1KeepAliveParams,
+ AgentV1PromptUpdatedParams,
+ AgentV1ReceiveFunctionCallResponseParams,
+ AgentV1SendFunctionCallResponseParams,
+ AgentV1SettingsAgentContextMessagesItemContentParams,
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams,
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsParams,
+ AgentV1SettingsAgentContextMessagesItemParams,
+ AgentV1SettingsAgentContextParams,
+ AgentV1SettingsAgentListenParams,
+ AgentV1SettingsAgentListenProviderParams,
+ AgentV1SettingsAgentListenProviderV1Params,
+ AgentV1SettingsAgentListenProviderV2Params,
+ AgentV1SettingsAgentListenProvider_V1Params,
+ AgentV1SettingsAgentListenProvider_V2Params,
+ AgentV1SettingsAgentParams,
+ AgentV1SettingsAgentSpeakEndpointEndpointParams,
+ AgentV1SettingsAgentSpeakEndpointParams,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams,
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams,
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams,
+ AgentV1SettingsAgentSpeakEndpointProviderParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams,
+ AgentV1SettingsAgentSpeakOneItemEndpointParams,
+ AgentV1SettingsAgentSpeakOneItemParams,
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams,
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyParams,
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams,
+ AgentV1SettingsAgentSpeakOneItemProviderParams,
+ AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams,
+ AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams,
+ AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams,
+ AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams,
+ AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams,
+ AgentV1SettingsAgentSpeakParams,
+ AgentV1SettingsAgentThinkContextLengthParams,
+ AgentV1SettingsAgentThinkEndpointParams,
+ AgentV1SettingsAgentThinkFunctionsItemEndpointParams,
+ AgentV1SettingsAgentThinkFunctionsItemParams,
+ AgentV1SettingsAgentThinkParams,
+ AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsParams,
+ AgentV1SettingsAgentThinkProviderAwsBedrockParams,
+ AgentV1SettingsAgentThinkProviderParams,
+ AgentV1SettingsAgentThinkProvider_AnthropicParams,
+ AgentV1SettingsAgentThinkProvider_AwsBedrockParams,
+ AgentV1SettingsAgentThinkProvider_GoogleParams,
+ AgentV1SettingsAgentThinkProvider_GroqParams,
+ AgentV1SettingsAgentThinkProvider_OpenAiParams,
+ AgentV1SettingsAppliedParams,
+ AgentV1SettingsAudioInputParams,
+ AgentV1SettingsAudioOutputParams,
+ AgentV1SettingsAudioParams,
+ AgentV1SettingsFlagsParams,
+ AgentV1SettingsParams,
+ AgentV1SpeakUpdatedParams,
+ AgentV1UpdatePromptParams,
+ AgentV1UpdateSpeakParams,
+ AgentV1UpdateSpeakSpeakEndpointParams,
+ AgentV1UpdateSpeakSpeakParams,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams,
+ AgentV1UpdateSpeakSpeakProviderAwsPollyParams,
+ AgentV1UpdateSpeakSpeakProviderCartesiaParams,
+ AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams,
+ AgentV1UpdateSpeakSpeakProviderDeepgramParams,
+ AgentV1UpdateSpeakSpeakProviderElevenLabsParams,
+ AgentV1UpdateSpeakSpeakProviderOpenAiParams,
+ AgentV1UpdateSpeakSpeakProviderParams,
+ AgentV1UpdateSpeakSpeakProvider_AwsPollyParams,
+ AgentV1UpdateSpeakSpeakProvider_CartesiaParams,
+ AgentV1UpdateSpeakSpeakProvider_DeepgramParams,
+ AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams,
+ AgentV1UpdateSpeakSpeakProvider_OpenAiParams,
+ AgentV1UserStartedSpeakingParams,
+ AgentV1WarningParams,
+ AgentV1WelcomeParams,
+ AnthropicParams,
+ CartesiaParams,
+ DeepgramParams,
+ ElevenLabsParams,
+ GoogleParams,
+ GroqParams,
+ OpenAiParams,
+ )
+_dynamic_imports: typing.Dict[str, str] = {
+ "AgentV1AgentAudioDone": ".types",
+ "AgentV1AgentAudioDoneParams": ".requests",
+ "AgentV1AgentStartedSpeaking": ".types",
+ "AgentV1AgentStartedSpeakingParams": ".requests",
+ "AgentV1AgentThinking": ".types",
+ "AgentV1AgentThinkingParams": ".requests",
+ "AgentV1ConversationText": ".types",
+ "AgentV1ConversationTextParams": ".requests",
+ "AgentV1ConversationTextRole": ".types",
+ "AgentV1Error": ".types",
+ "AgentV1ErrorParams": ".requests",
+ "AgentV1FunctionCallRequest": ".types",
+ "AgentV1FunctionCallRequestFunctionsItem": ".types",
+ "AgentV1FunctionCallRequestFunctionsItemParams": ".requests",
+ "AgentV1FunctionCallRequestParams": ".requests",
+ "AgentV1InjectAgentMessage": ".types",
+ "AgentV1InjectAgentMessageParams": ".requests",
+ "AgentV1InjectUserMessage": ".types",
+ "AgentV1InjectUserMessageParams": ".requests",
+ "AgentV1InjectionRefused": ".types",
+ "AgentV1InjectionRefusedParams": ".requests",
+ "AgentV1KeepAlive": ".types",
+ "AgentV1KeepAliveParams": ".requests",
+ "AgentV1PromptUpdated": ".types",
+ "AgentV1PromptUpdatedParams": ".requests",
+ "AgentV1ReceiveFunctionCallResponse": ".types",
+ "AgentV1ReceiveFunctionCallResponseParams": ".requests",
+ "AgentV1SendFunctionCallResponse": ".types",
+ "AgentV1SendFunctionCallResponseParams": ".requests",
+ "AgentV1Settings": ".types",
+ "AgentV1SettingsAgent": ".types",
+ "AgentV1SettingsAgentContext": ".types",
+ "AgentV1SettingsAgentContextMessagesItem": ".types",
+ "AgentV1SettingsAgentContextMessagesItemContent": ".types",
+ "AgentV1SettingsAgentContextMessagesItemContentParams": ".requests",
+ "AgentV1SettingsAgentContextMessagesItemContentRole": ".types",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCalls": ".types",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem": ".types",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams": ".requests",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsParams": ".requests",
+ "AgentV1SettingsAgentContextMessagesItemParams": ".requests",
+ "AgentV1SettingsAgentContextParams": ".requests",
+ "AgentV1SettingsAgentListen": ".types",
+ "AgentV1SettingsAgentListenParams": ".requests",
+ "AgentV1SettingsAgentListenProvider": ".types",
+ "AgentV1SettingsAgentListenProviderParams": ".requests",
+ "AgentV1SettingsAgentListenProviderV1": ".types",
+ "AgentV1SettingsAgentListenProviderV1Params": ".requests",
+ "AgentV1SettingsAgentListenProviderV2": ".types",
+ "AgentV1SettingsAgentListenProviderV2Params": ".requests",
+ "AgentV1SettingsAgentListenProvider_V1": ".types",
+ "AgentV1SettingsAgentListenProvider_V1Params": ".requests",
+ "AgentV1SettingsAgentListenProvider_V2": ".types",
+ "AgentV1SettingsAgentListenProvider_V2Params": ".requests",
+ "AgentV1SettingsAgentParams": ".requests",
+ "AgentV1SettingsAgentSpeak": ".types",
+ "AgentV1SettingsAgentSpeakEndpoint": ".types",
+ "AgentV1SettingsAgentSpeakEndpointEndpoint": ".types",
+ "AgentV1SettingsAgentSpeakEndpointEndpointParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProvider": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPolly": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProviderParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Cartesia": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Deepgram": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams": ".requests",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAi": ".types",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams": ".requests",
+ "AgentV1SettingsAgentSpeakOneItem": ".types",
+ "AgentV1SettingsAgentSpeakOneItemEndpoint": ".types",
+ "AgentV1SettingsAgentSpeakOneItemEndpointParams": ".requests",
+ "AgentV1SettingsAgentSpeakOneItemParams": ".requests",
+ "AgentV1SettingsAgentSpeakOneItemProvider": ".types",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPolly": ".types",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials": ".types",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams": ".requests",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType": ".types",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine": ".types",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyParams": ".requests",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice": ".types",
+ "AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId": ".types",
+ "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice": ".types",
+ "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams": ".requests",
+ "AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel": ".types",
+ "AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId": ".types",
+ "AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel": ".types",
+ "AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice": ".types",
+ "AgentV1SettingsAgentSpeakOneItemProviderParams": ".requests",
+ "AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly": ".types",
+ "AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams": ".requests",
+ "AgentV1SettingsAgentSpeakOneItemProvider_Cartesia": ".types",
+ "AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams": ".requests",
+ "AgentV1SettingsAgentSpeakOneItemProvider_Deepgram": ".types",
+ "AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams": ".requests",
+ "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs": ".types",
+ "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams": ".requests",
+ "AgentV1SettingsAgentSpeakOneItemProvider_OpenAi": ".types",
+ "AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams": ".requests",
+ "AgentV1SettingsAgentSpeakParams": ".requests",
+ "AgentV1SettingsAgentThink": ".types",
+ "AgentV1SettingsAgentThinkContextLength": ".types",
+ "AgentV1SettingsAgentThinkContextLengthParams": ".requests",
+ "AgentV1SettingsAgentThinkEndpoint": ".types",
+ "AgentV1SettingsAgentThinkEndpointParams": ".requests",
+ "AgentV1SettingsAgentThinkFunctionsItem": ".types",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpoint": ".types",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpointParams": ".requests",
+ "AgentV1SettingsAgentThinkFunctionsItemParams": ".requests",
+ "AgentV1SettingsAgentThinkParams": ".requests",
+ "AgentV1SettingsAgentThinkProvider": ".types",
+ "AgentV1SettingsAgentThinkProviderAnthropicModel": ".types",
+ "AgentV1SettingsAgentThinkProviderAwsBedrock": ".types",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockCredentials": ".types",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsParams": ".requests",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsType": ".types",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockModel": ".types",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockParams": ".requests",
+ "AgentV1SettingsAgentThinkProviderGoogleModel": ".types",
+ "AgentV1SettingsAgentThinkProviderOpenAiModel": ".types",
+ "AgentV1SettingsAgentThinkProviderParams": ".requests",
+ "AgentV1SettingsAgentThinkProvider_Anthropic": ".types",
+ "AgentV1SettingsAgentThinkProvider_AnthropicParams": ".requests",
+ "AgentV1SettingsAgentThinkProvider_AwsBedrock": ".types",
+ "AgentV1SettingsAgentThinkProvider_AwsBedrockParams": ".requests",
+ "AgentV1SettingsAgentThinkProvider_Google": ".types",
+ "AgentV1SettingsAgentThinkProvider_GoogleParams": ".requests",
+ "AgentV1SettingsAgentThinkProvider_Groq": ".types",
+ "AgentV1SettingsAgentThinkProvider_GroqParams": ".requests",
+ "AgentV1SettingsAgentThinkProvider_OpenAi": ".types",
+ "AgentV1SettingsAgentThinkProvider_OpenAiParams": ".requests",
+ "AgentV1SettingsApplied": ".types",
+ "AgentV1SettingsAppliedParams": ".requests",
+ "AgentV1SettingsAudio": ".types",
+ "AgentV1SettingsAudioInput": ".types",
+ "AgentV1SettingsAudioInputEncoding": ".types",
+ "AgentV1SettingsAudioInputParams": ".requests",
+ "AgentV1SettingsAudioOutput": ".types",
+ "AgentV1SettingsAudioOutputEncoding": ".types",
+ "AgentV1SettingsAudioOutputParams": ".requests",
+ "AgentV1SettingsAudioParams": ".requests",
+ "AgentV1SettingsFlags": ".types",
+ "AgentV1SettingsFlagsParams": ".requests",
+ "AgentV1SettingsParams": ".requests",
+ "AgentV1SpeakUpdated": ".types",
+ "AgentV1SpeakUpdatedParams": ".requests",
+ "AgentV1UpdatePrompt": ".types",
+ "AgentV1UpdatePromptParams": ".requests",
+ "AgentV1UpdateSpeak": ".types",
+ "AgentV1UpdateSpeakParams": ".requests",
+ "AgentV1UpdateSpeakSpeak": ".types",
+ "AgentV1UpdateSpeakSpeakEndpoint": ".types",
+ "AgentV1UpdateSpeakSpeakEndpointParams": ".requests",
+ "AgentV1UpdateSpeakSpeakParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProvider": ".types",
+ "AgentV1UpdateSpeakSpeakProviderAwsPolly": ".types",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials": ".types",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType": ".types",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyEngine": ".types",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyVoice": ".types",
+ "AgentV1UpdateSpeakSpeakProviderCartesia": ".types",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaModelId": ".types",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoice": ".types",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProviderDeepgram": ".types",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramModel": ".types",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabs": ".types",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsModelId": ".types",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProviderOpenAi": ".types",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiModel": ".types",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiVoice": ".types",
+ "AgentV1UpdateSpeakSpeakProviderParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPolly": ".types",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPollyParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProvider_Cartesia": ".types",
+ "AgentV1UpdateSpeakSpeakProvider_CartesiaParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProvider_Deepgram": ".types",
+ "AgentV1UpdateSpeakSpeakProvider_DeepgramParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabs": ".types",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams": ".requests",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAi": ".types",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAiParams": ".requests",
+ "AgentV1UserStartedSpeaking": ".types",
+ "AgentV1UserStartedSpeakingParams": ".requests",
+ "AgentV1Warning": ".types",
+ "AgentV1WarningParams": ".requests",
+ "AgentV1Welcome": ".types",
+ "AgentV1WelcomeParams": ".requests",
+ "Anthropic": ".types",
+ "AnthropicParams": ".requests",
+ "Cartesia": ".types",
+ "CartesiaParams": ".requests",
+ "Deepgram": ".types",
+ "DeepgramParams": ".requests",
+ "ElevenLabs": ".types",
+ "ElevenLabsParams": ".requests",
+ "Google": ".types",
+ "GoogleParams": ".requests",
+ "Groq": ".types",
+ "GroqParams": ".requests",
+ "OpenAi": ".types",
+ "OpenAiParams": ".requests",
+ "settings": ".settings",
+}
def __getattr__(attr_name: str) -> typing.Any:
@@ -31,4 +487,231 @@ def __dir__():
return sorted(lazy_attrs)
-__all__ = ["settings"]
+__all__ = [
+ "AgentV1AgentAudioDone",
+ "AgentV1AgentAudioDoneParams",
+ "AgentV1AgentStartedSpeaking",
+ "AgentV1AgentStartedSpeakingParams",
+ "AgentV1AgentThinking",
+ "AgentV1AgentThinkingParams",
+ "AgentV1ConversationText",
+ "AgentV1ConversationTextParams",
+ "AgentV1ConversationTextRole",
+ "AgentV1Error",
+ "AgentV1ErrorParams",
+ "AgentV1FunctionCallRequest",
+ "AgentV1FunctionCallRequestFunctionsItem",
+ "AgentV1FunctionCallRequestFunctionsItemParams",
+ "AgentV1FunctionCallRequestParams",
+ "AgentV1InjectAgentMessage",
+ "AgentV1InjectAgentMessageParams",
+ "AgentV1InjectUserMessage",
+ "AgentV1InjectUserMessageParams",
+ "AgentV1InjectionRefused",
+ "AgentV1InjectionRefusedParams",
+ "AgentV1KeepAlive",
+ "AgentV1KeepAliveParams",
+ "AgentV1PromptUpdated",
+ "AgentV1PromptUpdatedParams",
+ "AgentV1ReceiveFunctionCallResponse",
+ "AgentV1ReceiveFunctionCallResponseParams",
+ "AgentV1SendFunctionCallResponse",
+ "AgentV1SendFunctionCallResponseParams",
+ "AgentV1Settings",
+ "AgentV1SettingsAgent",
+ "AgentV1SettingsAgentContext",
+ "AgentV1SettingsAgentContextMessagesItem",
+ "AgentV1SettingsAgentContextMessagesItemContent",
+ "AgentV1SettingsAgentContextMessagesItemContentParams",
+ "AgentV1SettingsAgentContextMessagesItemContentRole",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCalls",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsParams",
+ "AgentV1SettingsAgentContextMessagesItemParams",
+ "AgentV1SettingsAgentContextParams",
+ "AgentV1SettingsAgentListen",
+ "AgentV1SettingsAgentListenParams",
+ "AgentV1SettingsAgentListenProvider",
+ "AgentV1SettingsAgentListenProviderParams",
+ "AgentV1SettingsAgentListenProviderV1",
+ "AgentV1SettingsAgentListenProviderV1Params",
+ "AgentV1SettingsAgentListenProviderV2",
+ "AgentV1SettingsAgentListenProviderV2Params",
+ "AgentV1SettingsAgentListenProvider_V1",
+ "AgentV1SettingsAgentListenProvider_V1Params",
+ "AgentV1SettingsAgentListenProvider_V2",
+ "AgentV1SettingsAgentListenProvider_V2Params",
+ "AgentV1SettingsAgentParams",
+ "AgentV1SettingsAgentSpeak",
+ "AgentV1SettingsAgentSpeakEndpoint",
+ "AgentV1SettingsAgentSpeakEndpointEndpoint",
+ "AgentV1SettingsAgentSpeakEndpointEndpointParams",
+ "AgentV1SettingsAgentSpeakEndpointParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPolly",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice",
+ "AgentV1SettingsAgentSpeakEndpointProviderParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Cartesia",
+ "AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Deepgram",
+ "AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAi",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams",
+ "AgentV1SettingsAgentSpeakOneItem",
+ "AgentV1SettingsAgentSpeakOneItemEndpoint",
+ "AgentV1SettingsAgentSpeakOneItemEndpointParams",
+ "AgentV1SettingsAgentSpeakOneItemParams",
+ "AgentV1SettingsAgentSpeakOneItemProvider",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPolly",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyParams",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice",
+ "AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId",
+ "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice",
+ "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams",
+ "AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel",
+ "AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId",
+ "AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel",
+ "AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice",
+ "AgentV1SettingsAgentSpeakOneItemProviderParams",
+ "AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly",
+ "AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams",
+ "AgentV1SettingsAgentSpeakOneItemProvider_Cartesia",
+ "AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams",
+ "AgentV1SettingsAgentSpeakOneItemProvider_Deepgram",
+ "AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams",
+ "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs",
+ "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams",
+ "AgentV1SettingsAgentSpeakOneItemProvider_OpenAi",
+ "AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams",
+ "AgentV1SettingsAgentSpeakParams",
+ "AgentV1SettingsAgentThink",
+ "AgentV1SettingsAgentThinkContextLength",
+ "AgentV1SettingsAgentThinkContextLengthParams",
+ "AgentV1SettingsAgentThinkEndpoint",
+ "AgentV1SettingsAgentThinkEndpointParams",
+ "AgentV1SettingsAgentThinkFunctionsItem",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpoint",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpointParams",
+ "AgentV1SettingsAgentThinkFunctionsItemParams",
+ "AgentV1SettingsAgentThinkParams",
+ "AgentV1SettingsAgentThinkProvider",
+ "AgentV1SettingsAgentThinkProviderAnthropicModel",
+ "AgentV1SettingsAgentThinkProviderAwsBedrock",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockCredentials",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsParams",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsType",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockModel",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockParams",
+ "AgentV1SettingsAgentThinkProviderGoogleModel",
+ "AgentV1SettingsAgentThinkProviderOpenAiModel",
+ "AgentV1SettingsAgentThinkProviderParams",
+ "AgentV1SettingsAgentThinkProvider_Anthropic",
+ "AgentV1SettingsAgentThinkProvider_AnthropicParams",
+ "AgentV1SettingsAgentThinkProvider_AwsBedrock",
+ "AgentV1SettingsAgentThinkProvider_AwsBedrockParams",
+ "AgentV1SettingsAgentThinkProvider_Google",
+ "AgentV1SettingsAgentThinkProvider_GoogleParams",
+ "AgentV1SettingsAgentThinkProvider_Groq",
+ "AgentV1SettingsAgentThinkProvider_GroqParams",
+ "AgentV1SettingsAgentThinkProvider_OpenAi",
+ "AgentV1SettingsAgentThinkProvider_OpenAiParams",
+ "AgentV1SettingsApplied",
+ "AgentV1SettingsAppliedParams",
+ "AgentV1SettingsAudio",
+ "AgentV1SettingsAudioInput",
+ "AgentV1SettingsAudioInputEncoding",
+ "AgentV1SettingsAudioInputParams",
+ "AgentV1SettingsAudioOutput",
+ "AgentV1SettingsAudioOutputEncoding",
+ "AgentV1SettingsAudioOutputParams",
+ "AgentV1SettingsAudioParams",
+ "AgentV1SettingsFlags",
+ "AgentV1SettingsFlagsParams",
+ "AgentV1SettingsParams",
+ "AgentV1SpeakUpdated",
+ "AgentV1SpeakUpdatedParams",
+ "AgentV1UpdatePrompt",
+ "AgentV1UpdatePromptParams",
+ "AgentV1UpdateSpeak",
+ "AgentV1UpdateSpeakParams",
+ "AgentV1UpdateSpeakSpeak",
+ "AgentV1UpdateSpeakSpeakEndpoint",
+ "AgentV1UpdateSpeakSpeakEndpointParams",
+ "AgentV1UpdateSpeakSpeakParams",
+ "AgentV1UpdateSpeakSpeakProvider",
+ "AgentV1UpdateSpeakSpeakProviderAwsPolly",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyEngine",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyParams",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyVoice",
+ "AgentV1UpdateSpeakSpeakProviderCartesia",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaModelId",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaParams",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoice",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams",
+ "AgentV1UpdateSpeakSpeakProviderDeepgram",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramModel",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramParams",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabs",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsModelId",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsParams",
+ "AgentV1UpdateSpeakSpeakProviderOpenAi",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiModel",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiParams",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiVoice",
+ "AgentV1UpdateSpeakSpeakProviderParams",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPolly",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPollyParams",
+ "AgentV1UpdateSpeakSpeakProvider_Cartesia",
+ "AgentV1UpdateSpeakSpeakProvider_CartesiaParams",
+ "AgentV1UpdateSpeakSpeakProvider_Deepgram",
+ "AgentV1UpdateSpeakSpeakProvider_DeepgramParams",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabs",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAi",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAiParams",
+ "AgentV1UserStartedSpeaking",
+ "AgentV1UserStartedSpeakingParams",
+ "AgentV1Warning",
+ "AgentV1WarningParams",
+ "AgentV1Welcome",
+ "AgentV1WelcomeParams",
+ "Anthropic",
+ "AnthropicParams",
+ "Cartesia",
+ "CartesiaParams",
+ "Deepgram",
+ "DeepgramParams",
+ "ElevenLabs",
+ "ElevenLabsParams",
+ "Google",
+ "GoogleParams",
+ "Groq",
+ "GroqParams",
+ "OpenAi",
+ "OpenAiParams",
+ "settings",
+]
diff --git a/src/deepgram/agent/v1/requests/__init__.py b/src/deepgram/agent/v1/requests/__init__.py
new file mode 100644
index 00000000..c978f689
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/__init__.py
@@ -0,0 +1,357 @@
+# This file was auto-generated by Fern from our API Definition.
+
+# isort: skip_file
+
+import typing
+from importlib import import_module
+
+if typing.TYPE_CHECKING:
+ from .agent_v1agent_audio_done import AgentV1AgentAudioDoneParams
+ from .agent_v1agent_started_speaking import AgentV1AgentStartedSpeakingParams
+ from .agent_v1agent_thinking import AgentV1AgentThinkingParams
+ from .agent_v1conversation_text import AgentV1ConversationTextParams
+ from .agent_v1error import AgentV1ErrorParams
+ from .agent_v1function_call_request import AgentV1FunctionCallRequestParams
+ from .agent_v1function_call_request_functions_item import AgentV1FunctionCallRequestFunctionsItemParams
+ from .agent_v1inject_agent_message import AgentV1InjectAgentMessageParams
+ from .agent_v1inject_user_message import AgentV1InjectUserMessageParams
+ from .agent_v1injection_refused import AgentV1InjectionRefusedParams
+ from .agent_v1keep_alive import AgentV1KeepAliveParams
+ from .agent_v1prompt_updated import AgentV1PromptUpdatedParams
+ from .agent_v1receive_function_call_response import AgentV1ReceiveFunctionCallResponseParams
+ from .agent_v1send_function_call_response import AgentV1SendFunctionCallResponseParams
+ from .agent_v1settings import AgentV1SettingsParams
+ from .agent_v1settings_agent import AgentV1SettingsAgentParams
+ from .agent_v1settings_agent_context import AgentV1SettingsAgentContextParams
+ from .agent_v1settings_agent_context_messages_item import AgentV1SettingsAgentContextMessagesItemParams
+ from .agent_v1settings_agent_context_messages_item_content import (
+ AgentV1SettingsAgentContextMessagesItemContentParams,
+ )
+ from .agent_v1settings_agent_context_messages_item_function_calls import (
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsParams,
+ )
+ from .agent_v1settings_agent_context_messages_item_function_calls_function_calls_item import (
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams,
+ )
+ from .agent_v1settings_agent_listen import AgentV1SettingsAgentListenParams
+ from .agent_v1settings_agent_listen_provider import (
+ AgentV1SettingsAgentListenProviderParams,
+ AgentV1SettingsAgentListenProvider_V1Params,
+ AgentV1SettingsAgentListenProvider_V2Params,
+ )
+ from .agent_v1settings_agent_listen_provider_v1 import AgentV1SettingsAgentListenProviderV1Params
+ from .agent_v1settings_agent_listen_provider_v2 import AgentV1SettingsAgentListenProviderV2Params
+ from .agent_v1settings_agent_speak import AgentV1SettingsAgentSpeakParams
+ from .agent_v1settings_agent_speak_endpoint import AgentV1SettingsAgentSpeakEndpointParams
+ from .agent_v1settings_agent_speak_endpoint_endpoint import AgentV1SettingsAgentSpeakEndpointEndpointParams
+ from .agent_v1settings_agent_speak_endpoint_provider import (
+ AgentV1SettingsAgentSpeakEndpointProviderParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_aws_polly import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams,
+ )
+ from .agent_v1settings_agent_speak_one_item import AgentV1SettingsAgentSpeakOneItemParams
+ from .agent_v1settings_agent_speak_one_item_endpoint import AgentV1SettingsAgentSpeakOneItemEndpointParams
+ from .agent_v1settings_agent_speak_one_item_provider import (
+ AgentV1SettingsAgentSpeakOneItemProviderParams,
+ AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams,
+ AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams,
+ AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams,
+ AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams,
+ AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams,
+ )
+ from .agent_v1settings_agent_speak_one_item_provider_aws_polly import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyParams,
+ )
+ from .agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams,
+ )
+ from .agent_v1settings_agent_speak_one_item_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams,
+ )
+ from .agent_v1settings_agent_think import AgentV1SettingsAgentThinkParams
+ from .agent_v1settings_agent_think_context_length import AgentV1SettingsAgentThinkContextLengthParams
+ from .agent_v1settings_agent_think_endpoint import AgentV1SettingsAgentThinkEndpointParams
+ from .agent_v1settings_agent_think_functions_item import AgentV1SettingsAgentThinkFunctionsItemParams
+ from .agent_v1settings_agent_think_functions_item_endpoint import (
+ AgentV1SettingsAgentThinkFunctionsItemEndpointParams,
+ )
+ from .agent_v1settings_agent_think_provider import (
+ AgentV1SettingsAgentThinkProviderParams,
+ AgentV1SettingsAgentThinkProvider_AnthropicParams,
+ AgentV1SettingsAgentThinkProvider_AwsBedrockParams,
+ AgentV1SettingsAgentThinkProvider_GoogleParams,
+ AgentV1SettingsAgentThinkProvider_GroqParams,
+ AgentV1SettingsAgentThinkProvider_OpenAiParams,
+ )
+ from .agent_v1settings_agent_think_provider_aws_bedrock import AgentV1SettingsAgentThinkProviderAwsBedrockParams
+ from .agent_v1settings_agent_think_provider_aws_bedrock_credentials import (
+ AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsParams,
+ )
+ from .agent_v1settings_applied import AgentV1SettingsAppliedParams
+ from .agent_v1settings_audio import AgentV1SettingsAudioParams
+ from .agent_v1settings_audio_input import AgentV1SettingsAudioInputParams
+ from .agent_v1settings_audio_output import AgentV1SettingsAudioOutputParams
+ from .agent_v1settings_flags import AgentV1SettingsFlagsParams
+ from .agent_v1speak_updated import AgentV1SpeakUpdatedParams
+ from .agent_v1update_prompt import AgentV1UpdatePromptParams
+ from .agent_v1update_speak import AgentV1UpdateSpeakParams
+ from .agent_v1update_speak_speak import AgentV1UpdateSpeakSpeakParams
+ from .agent_v1update_speak_speak_endpoint import AgentV1UpdateSpeakSpeakEndpointParams
+ from .agent_v1update_speak_speak_provider import (
+ AgentV1UpdateSpeakSpeakProviderParams,
+ AgentV1UpdateSpeakSpeakProvider_AwsPollyParams,
+ AgentV1UpdateSpeakSpeakProvider_CartesiaParams,
+ AgentV1UpdateSpeakSpeakProvider_DeepgramParams,
+ AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams,
+ AgentV1UpdateSpeakSpeakProvider_OpenAiParams,
+ )
+ from .agent_v1update_speak_speak_provider_aws_polly import AgentV1UpdateSpeakSpeakProviderAwsPollyParams
+ from .agent_v1update_speak_speak_provider_aws_polly_credentials import (
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams,
+ )
+ from .agent_v1update_speak_speak_provider_cartesia import AgentV1UpdateSpeakSpeakProviderCartesiaParams
+ from .agent_v1update_speak_speak_provider_cartesia_voice import AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams
+ from .agent_v1update_speak_speak_provider_deepgram import AgentV1UpdateSpeakSpeakProviderDeepgramParams
+ from .agent_v1update_speak_speak_provider_eleven_labs import AgentV1UpdateSpeakSpeakProviderElevenLabsParams
+ from .agent_v1update_speak_speak_provider_open_ai import AgentV1UpdateSpeakSpeakProviderOpenAiParams
+ from .agent_v1user_started_speaking import AgentV1UserStartedSpeakingParams
+ from .agent_v1warning import AgentV1WarningParams
+ from .agent_v1welcome import AgentV1WelcomeParams
+ from .anthropic import AnthropicParams
+ from .cartesia import CartesiaParams
+ from .deepgram import DeepgramParams
+ from .eleven_labs import ElevenLabsParams
+ from .google import GoogleParams
+ from .groq import GroqParams
+ from .open_ai import OpenAiParams
+_dynamic_imports: typing.Dict[str, str] = {
+ "AgentV1AgentAudioDoneParams": ".agent_v1agent_audio_done",
+ "AgentV1AgentStartedSpeakingParams": ".agent_v1agent_started_speaking",
+ "AgentV1AgentThinkingParams": ".agent_v1agent_thinking",
+ "AgentV1ConversationTextParams": ".agent_v1conversation_text",
+ "AgentV1ErrorParams": ".agent_v1error",
+ "AgentV1FunctionCallRequestFunctionsItemParams": ".agent_v1function_call_request_functions_item",
+ "AgentV1FunctionCallRequestParams": ".agent_v1function_call_request",
+ "AgentV1InjectAgentMessageParams": ".agent_v1inject_agent_message",
+ "AgentV1InjectUserMessageParams": ".agent_v1inject_user_message",
+ "AgentV1InjectionRefusedParams": ".agent_v1injection_refused",
+ "AgentV1KeepAliveParams": ".agent_v1keep_alive",
+ "AgentV1PromptUpdatedParams": ".agent_v1prompt_updated",
+ "AgentV1ReceiveFunctionCallResponseParams": ".agent_v1receive_function_call_response",
+ "AgentV1SendFunctionCallResponseParams": ".agent_v1send_function_call_response",
+ "AgentV1SettingsAgentContextMessagesItemContentParams": ".agent_v1settings_agent_context_messages_item_content",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams": ".agent_v1settings_agent_context_messages_item_function_calls_function_calls_item",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsParams": ".agent_v1settings_agent_context_messages_item_function_calls",
+ "AgentV1SettingsAgentContextMessagesItemParams": ".agent_v1settings_agent_context_messages_item",
+ "AgentV1SettingsAgentContextParams": ".agent_v1settings_agent_context",
+ "AgentV1SettingsAgentListenParams": ".agent_v1settings_agent_listen",
+ "AgentV1SettingsAgentListenProviderParams": ".agent_v1settings_agent_listen_provider",
+ "AgentV1SettingsAgentListenProviderV1Params": ".agent_v1settings_agent_listen_provider_v1",
+ "AgentV1SettingsAgentListenProviderV2Params": ".agent_v1settings_agent_listen_provider_v2",
+ "AgentV1SettingsAgentListenProvider_V1Params": ".agent_v1settings_agent_listen_provider",
+ "AgentV1SettingsAgentListenProvider_V2Params": ".agent_v1settings_agent_listen_provider",
+ "AgentV1SettingsAgentParams": ".agent_v1settings_agent",
+ "AgentV1SettingsAgentSpeakEndpointEndpointParams": ".agent_v1settings_agent_speak_endpoint_endpoint",
+ "AgentV1SettingsAgentSpeakEndpointParams": ".agent_v1settings_agent_speak_endpoint",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams": ".agent_v1settings_agent_speak_endpoint_provider_cartesia_voice",
+ "AgentV1SettingsAgentSpeakEndpointProviderParams": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakOneItemEndpointParams": ".agent_v1settings_agent_speak_one_item_endpoint",
+ "AgentV1SettingsAgentSpeakOneItemParams": ".agent_v1settings_agent_speak_one_item",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams": ".agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyParams": ".agent_v1settings_agent_speak_one_item_provider_aws_polly",
+ "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams": ".agent_v1settings_agent_speak_one_item_provider_cartesia_voice",
+ "AgentV1SettingsAgentSpeakOneItemProviderParams": ".agent_v1settings_agent_speak_one_item_provider",
+ "AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams": ".agent_v1settings_agent_speak_one_item_provider",
+ "AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams": ".agent_v1settings_agent_speak_one_item_provider",
+ "AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams": ".agent_v1settings_agent_speak_one_item_provider",
+ "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams": ".agent_v1settings_agent_speak_one_item_provider",
+ "AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams": ".agent_v1settings_agent_speak_one_item_provider",
+ "AgentV1SettingsAgentSpeakParams": ".agent_v1settings_agent_speak",
+ "AgentV1SettingsAgentThinkContextLengthParams": ".agent_v1settings_agent_think_context_length",
+ "AgentV1SettingsAgentThinkEndpointParams": ".agent_v1settings_agent_think_endpoint",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpointParams": ".agent_v1settings_agent_think_functions_item_endpoint",
+ "AgentV1SettingsAgentThinkFunctionsItemParams": ".agent_v1settings_agent_think_functions_item",
+ "AgentV1SettingsAgentThinkParams": ".agent_v1settings_agent_think",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsParams": ".agent_v1settings_agent_think_provider_aws_bedrock_credentials",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockParams": ".agent_v1settings_agent_think_provider_aws_bedrock",
+ "AgentV1SettingsAgentThinkProviderParams": ".agent_v1settings_agent_think_provider",
+ "AgentV1SettingsAgentThinkProvider_AnthropicParams": ".agent_v1settings_agent_think_provider",
+ "AgentV1SettingsAgentThinkProvider_AwsBedrockParams": ".agent_v1settings_agent_think_provider",
+ "AgentV1SettingsAgentThinkProvider_GoogleParams": ".agent_v1settings_agent_think_provider",
+ "AgentV1SettingsAgentThinkProvider_GroqParams": ".agent_v1settings_agent_think_provider",
+ "AgentV1SettingsAgentThinkProvider_OpenAiParams": ".agent_v1settings_agent_think_provider",
+ "AgentV1SettingsAppliedParams": ".agent_v1settings_applied",
+ "AgentV1SettingsAudioInputParams": ".agent_v1settings_audio_input",
+ "AgentV1SettingsAudioOutputParams": ".agent_v1settings_audio_output",
+ "AgentV1SettingsAudioParams": ".agent_v1settings_audio",
+ "AgentV1SettingsFlagsParams": ".agent_v1settings_flags",
+ "AgentV1SettingsParams": ".agent_v1settings",
+ "AgentV1SpeakUpdatedParams": ".agent_v1speak_updated",
+ "AgentV1UpdatePromptParams": ".agent_v1update_prompt",
+ "AgentV1UpdateSpeakParams": ".agent_v1update_speak",
+ "AgentV1UpdateSpeakSpeakEndpointParams": ".agent_v1update_speak_speak_endpoint",
+ "AgentV1UpdateSpeakSpeakParams": ".agent_v1update_speak_speak",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams": ".agent_v1update_speak_speak_provider_aws_polly_credentials",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyParams": ".agent_v1update_speak_speak_provider_aws_polly",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaParams": ".agent_v1update_speak_speak_provider_cartesia",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams": ".agent_v1update_speak_speak_provider_cartesia_voice",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramParams": ".agent_v1update_speak_speak_provider_deepgram",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsParams": ".agent_v1update_speak_speak_provider_eleven_labs",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiParams": ".agent_v1update_speak_speak_provider_open_ai",
+ "AgentV1UpdateSpeakSpeakProviderParams": ".agent_v1update_speak_speak_provider",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPollyParams": ".agent_v1update_speak_speak_provider",
+ "AgentV1UpdateSpeakSpeakProvider_CartesiaParams": ".agent_v1update_speak_speak_provider",
+ "AgentV1UpdateSpeakSpeakProvider_DeepgramParams": ".agent_v1update_speak_speak_provider",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams": ".agent_v1update_speak_speak_provider",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAiParams": ".agent_v1update_speak_speak_provider",
+ "AgentV1UserStartedSpeakingParams": ".agent_v1user_started_speaking",
+ "AgentV1WarningParams": ".agent_v1warning",
+ "AgentV1WelcomeParams": ".agent_v1welcome",
+ "AnthropicParams": ".anthropic",
+ "CartesiaParams": ".cartesia",
+ "DeepgramParams": ".deepgram",
+ "ElevenLabsParams": ".eleven_labs",
+ "GoogleParams": ".google",
+ "GroqParams": ".groq",
+ "OpenAiParams": ".open_ai",
+}
+
+
+def __getattr__(attr_name: str) -> typing.Any:
+ module_name = _dynamic_imports.get(attr_name)
+ if module_name is None:
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
+ try:
+ module = import_module(module_name, __package__)
+ if module_name == f".{attr_name}":
+ return module
+ else:
+ return getattr(module, attr_name)
+ except ImportError as e:
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
+ except AttributeError as e:
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
+
+
+def __dir__():
+ lazy_attrs = list(_dynamic_imports.keys())
+ return sorted(lazy_attrs)
+
+
+__all__ = [
+ "AgentV1AgentAudioDoneParams",
+ "AgentV1AgentStartedSpeakingParams",
+ "AgentV1AgentThinkingParams",
+ "AgentV1ConversationTextParams",
+ "AgentV1ErrorParams",
+ "AgentV1FunctionCallRequestFunctionsItemParams",
+ "AgentV1FunctionCallRequestParams",
+ "AgentV1InjectAgentMessageParams",
+ "AgentV1InjectUserMessageParams",
+ "AgentV1InjectionRefusedParams",
+ "AgentV1KeepAliveParams",
+ "AgentV1PromptUpdatedParams",
+ "AgentV1ReceiveFunctionCallResponseParams",
+ "AgentV1SendFunctionCallResponseParams",
+ "AgentV1SettingsAgentContextMessagesItemContentParams",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsParams",
+ "AgentV1SettingsAgentContextMessagesItemParams",
+ "AgentV1SettingsAgentContextParams",
+ "AgentV1SettingsAgentListenParams",
+ "AgentV1SettingsAgentListenProviderParams",
+ "AgentV1SettingsAgentListenProviderV1Params",
+ "AgentV1SettingsAgentListenProviderV2Params",
+ "AgentV1SettingsAgentListenProvider_V1Params",
+ "AgentV1SettingsAgentListenProvider_V2Params",
+ "AgentV1SettingsAgentParams",
+ "AgentV1SettingsAgentSpeakEndpointEndpointParams",
+ "AgentV1SettingsAgentSpeakEndpointParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams",
+ "AgentV1SettingsAgentSpeakEndpointProviderParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams",
+ "AgentV1SettingsAgentSpeakOneItemEndpointParams",
+ "AgentV1SettingsAgentSpeakOneItemParams",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyParams",
+ "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams",
+ "AgentV1SettingsAgentSpeakOneItemProviderParams",
+ "AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams",
+ "AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams",
+ "AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams",
+ "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams",
+ "AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams",
+ "AgentV1SettingsAgentSpeakParams",
+ "AgentV1SettingsAgentThinkContextLengthParams",
+ "AgentV1SettingsAgentThinkEndpointParams",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpointParams",
+ "AgentV1SettingsAgentThinkFunctionsItemParams",
+ "AgentV1SettingsAgentThinkParams",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsParams",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockParams",
+ "AgentV1SettingsAgentThinkProviderParams",
+ "AgentV1SettingsAgentThinkProvider_AnthropicParams",
+ "AgentV1SettingsAgentThinkProvider_AwsBedrockParams",
+ "AgentV1SettingsAgentThinkProvider_GoogleParams",
+ "AgentV1SettingsAgentThinkProvider_GroqParams",
+ "AgentV1SettingsAgentThinkProvider_OpenAiParams",
+ "AgentV1SettingsAppliedParams",
+ "AgentV1SettingsAudioInputParams",
+ "AgentV1SettingsAudioOutputParams",
+ "AgentV1SettingsAudioParams",
+ "AgentV1SettingsFlagsParams",
+ "AgentV1SettingsParams",
+ "AgentV1SpeakUpdatedParams",
+ "AgentV1UpdatePromptParams",
+ "AgentV1UpdateSpeakParams",
+ "AgentV1UpdateSpeakSpeakEndpointParams",
+ "AgentV1UpdateSpeakSpeakParams",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyParams",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaParams",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramParams",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsParams",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiParams",
+ "AgentV1UpdateSpeakSpeakProviderParams",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPollyParams",
+ "AgentV1UpdateSpeakSpeakProvider_CartesiaParams",
+ "AgentV1UpdateSpeakSpeakProvider_DeepgramParams",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAiParams",
+ "AgentV1UserStartedSpeakingParams",
+ "AgentV1WarningParams",
+ "AgentV1WelcomeParams",
+ "AnthropicParams",
+ "CartesiaParams",
+ "DeepgramParams",
+ "ElevenLabsParams",
+ "GoogleParams",
+ "GroqParams",
+ "OpenAiParams",
+]
diff --git a/src/deepgram/agent/v1/requests/agent_v1agent_audio_done.py b/src/deepgram/agent/v1/requests/agent_v1agent_audio_done.py
new file mode 100644
index 00000000..43b4f013
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1agent_audio_done.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1AgentAudioDoneParams(typing_extensions.TypedDict):
+ type: typing.Literal["AgentAudioDone"]
+ """
+ Message type identifier indicating the agent has finished sending audio
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1agent_started_speaking.py b/src/deepgram/agent/v1/requests/agent_v1agent_started_speaking.py
new file mode 100644
index 00000000..39861c94
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1agent_started_speaking.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1AgentStartedSpeakingParams(typing_extensions.TypedDict):
+ type: typing.Literal["AgentStartedSpeaking"]
+ """
+ Message type identifier for agent started speaking
+ """
+
+ total_latency: float
+ """
+ Seconds from receiving the user's utterance to producing the agent's reply
+ """
+
+ tts_latency: float
+ """
+ The portion of total latency attributable to text-to-speech
+ """
+
+ ttt_latency: float
+ """
+ The portion of total latency attributable to text-to-text (usually an LLM)
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1agent_thinking.py b/src/deepgram/agent/v1/requests/agent_v1agent_thinking.py
new file mode 100644
index 00000000..13434cbc
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1agent_thinking.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1AgentThinkingParams(typing_extensions.TypedDict):
+ type: typing.Literal["AgentThinking"]
+ """
+ Message type identifier for agent thinking
+ """
+
+ content: str
+ """
+ The text of the agent's thought process
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1conversation_text.py b/src/deepgram/agent/v1/requests/agent_v1conversation_text.py
new file mode 100644
index 00000000..ea0601e3
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1conversation_text.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1conversation_text_role import AgentV1ConversationTextRole
+
+
+class AgentV1ConversationTextParams(typing_extensions.TypedDict):
+ type: typing.Literal["ConversationText"]
+ """
+ Message type identifier for conversation text
+ """
+
+ role: AgentV1ConversationTextRole
+ """
+ Identifies who spoke the statement
+ """
+
+ content: str
+ """
+ The actual statement that was spoken
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1error.py b/src/deepgram/agent/v1/requests/agent_v1error.py
new file mode 100644
index 00000000..23547cb7
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1error.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1ErrorParams(typing_extensions.TypedDict):
+ type: typing.Literal["Error"]
+ """
+ Message type identifier for error responses
+ """
+
+ description: str
+ """
+ A description of what went wrong
+ """
+
+ code: str
+ """
+ Error code identifying the type of error
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1function_call_request.py b/src/deepgram/agent/v1/requests/agent_v1function_call_request.py
new file mode 100644
index 00000000..b00cc6d4
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1function_call_request.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from .agent_v1function_call_request_functions_item import AgentV1FunctionCallRequestFunctionsItemParams
+
+
+class AgentV1FunctionCallRequestParams(typing_extensions.TypedDict):
+ type: typing.Literal["FunctionCallRequest"]
+ """
+ Message type identifier for function call requests
+ """
+
+ functions: typing.Sequence[AgentV1FunctionCallRequestFunctionsItemParams]
+ """
+ Array of functions to be called
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1function_call_request_functions_item.py b/src/deepgram/agent/v1/requests/agent_v1function_call_request_functions_item.py
new file mode 100644
index 00000000..bdc26719
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1function_call_request_functions_item.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class AgentV1FunctionCallRequestFunctionsItemParams(typing_extensions.TypedDict):
+ id: str
+ """
+ Unique identifier for the function call
+ """
+
+ name: str
+ """
+ The name of the function to call
+ """
+
+ arguments: str
+ """
+ JSON string containing the function arguments
+ """
+
+ client_side: bool
+ """
+ Whether the function should be executed client-side
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1inject_agent_message.py b/src/deepgram/agent/v1/requests/agent_v1inject_agent_message.py
new file mode 100644
index 00000000..8fb718bd
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1inject_agent_message.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1InjectAgentMessageParams(typing_extensions.TypedDict):
+ type: typing.Literal["InjectAgentMessage"]
+ """
+ Message type identifier for injecting an agent message
+ """
+
+ message: str
+ """
+ The statement that the agent should say
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1inject_user_message.py b/src/deepgram/agent/v1/requests/agent_v1inject_user_message.py
new file mode 100644
index 00000000..86583a81
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1inject_user_message.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1InjectUserMessageParams(typing_extensions.TypedDict):
+ type: typing.Literal["InjectUserMessage"]
+ """
+ Message type identifier for injecting a user message
+ """
+
+ content: str
+ """
+ The specific phrase or statement the agent should respond to
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1injection_refused.py b/src/deepgram/agent/v1/requests/agent_v1injection_refused.py
new file mode 100644
index 00000000..e19f3241
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1injection_refused.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1InjectionRefusedParams(typing_extensions.TypedDict):
+ type: typing.Literal["InjectionRefused"]
+ """
+ Message type identifier for injection refused
+ """
+
+ message: str
+ """
+ Details about why the injection was refused
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1keep_alive.py b/src/deepgram/agent/v1/requests/agent_v1keep_alive.py
new file mode 100644
index 00000000..125eb8ae
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1keep_alive.py
@@ -0,0 +1,16 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1KeepAliveParams(typing_extensions.TypedDict):
+ """
+ Send a control message to the agent
+ """
+
+ type: typing.Literal["KeepAlive"]
+ """
+ Message type identifier
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1prompt_updated.py b/src/deepgram/agent/v1/requests/agent_v1prompt_updated.py
new file mode 100644
index 00000000..40d5a426
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1prompt_updated.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1PromptUpdatedParams(typing_extensions.TypedDict):
+ type: typing.Literal["PromptUpdated"]
+ """
+ Message type identifier for prompt update confirmation
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1receive_function_call_response.py b/src/deepgram/agent/v1/requests/agent_v1receive_function_call_response.py
new file mode 100644
index 00000000..f26fda70
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1receive_function_call_response.py
@@ -0,0 +1,44 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1ReceiveFunctionCallResponseParams(typing_extensions.TypedDict):
+ """
+ Function call response message used bidirectionally:
+
+ • **Client → Server**: Response after client executes a function
+ marked as client_side: true
+ • **Server → Client**: Response after server executes a function
+ marked as client_side: false
+
+ The same message structure serves both directions, enabling a unified
+ interface for function call responses regardless of execution location.
+ """
+
+ type: typing.Literal["FunctionCallResponse"]
+ """
+ Message type identifier for function call responses
+ """
+
+ id: typing_extensions.NotRequired[str]
+ """
+ The unique identifier for the function call.
+
+ • **Required for client responses**: Should match the id from
+ the corresponding `FunctionCallRequest`
+ • **Optional for server responses**: Server may omit when responding
+ to internal function executions
+ """
+
+ name: str
+ """
+ The name of the function being called
+ """
+
+ content: str
+ """
+ The content or result of the function call
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1send_function_call_response.py b/src/deepgram/agent/v1/requests/agent_v1send_function_call_response.py
new file mode 100644
index 00000000..8d75edeb
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1send_function_call_response.py
@@ -0,0 +1,44 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1SendFunctionCallResponseParams(typing_extensions.TypedDict):
+ """
+ Function call response message used bidirectionally:
+
+ • **Client → Server**: Response after client executes a function
+ marked as client_side: true
+ • **Server → Client**: Response after server executes a function
+ marked as client_side: false
+
+ The same message structure serves both directions, enabling a unified
+ interface for function call responses regardless of execution location.
+ """
+
+ type: typing.Literal["FunctionCallResponse"]
+ """
+ Message type identifier for function call responses
+ """
+
+ id: typing_extensions.NotRequired[str]
+ """
+ The unique identifier for the function call.
+
+ • **Required for client responses**: Should match the id from
+ the corresponding `FunctionCallRequest`
+ • **Optional for server responses**: Server may omit when responding
+ to internal function executions
+ """
+
+ name: str
+ """
+ The name of the function being called
+ """
+
+ content: str
+ """
+ The content or result of the function call
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings.py b/src/deepgram/agent/v1/requests/agent_v1settings.py
new file mode 100644
index 00000000..2f748dcd
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from .agent_v1settings_agent import AgentV1SettingsAgentParams
+from .agent_v1settings_audio import AgentV1SettingsAudioParams
+from .agent_v1settings_flags import AgentV1SettingsFlagsParams
+
+
+class AgentV1SettingsParams(typing_extensions.TypedDict):
+ type: typing.Literal["Settings"]
+ tags: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Tags to associate with the request
+ """
+
+ experimental: typing_extensions.NotRequired[bool]
+ """
+ To enable experimental features
+ """
+
+ flags: typing_extensions.NotRequired[AgentV1SettingsFlagsParams]
+ mip_opt_out: typing_extensions.NotRequired[bool]
+ """
+ To opt out of Deepgram Model Improvement Program
+ """
+
+ audio: AgentV1SettingsAudioParams
+ agent: AgentV1SettingsAgentParams
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent.py
new file mode 100644
index 00000000..01bffc21
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from .agent_v1settings_agent_context import AgentV1SettingsAgentContextParams
+from .agent_v1settings_agent_listen import AgentV1SettingsAgentListenParams
+from .agent_v1settings_agent_speak import AgentV1SettingsAgentSpeakParams
+from .agent_v1settings_agent_think import AgentV1SettingsAgentThinkParams
+
+
+class AgentV1SettingsAgentParams(typing_extensions.TypedDict):
+ language: typing_extensions.NotRequired[str]
+ """
+ Deprecated. Use `listen.provider.language` and `speak.provider.language` fields instead.
+ """
+
+ context: typing_extensions.NotRequired[AgentV1SettingsAgentContextParams]
+ """
+ Conversation context including the history of messages and function calls
+ """
+
+ listen: typing_extensions.NotRequired[AgentV1SettingsAgentListenParams]
+ think: typing_extensions.NotRequired[AgentV1SettingsAgentThinkParams]
+ speak: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakParams]
+ greeting: typing_extensions.NotRequired[str]
+ """
+ Optional message that agent will speak at the start
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context.py
new file mode 100644
index 00000000..a27f848a
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from .agent_v1settings_agent_context_messages_item import AgentV1SettingsAgentContextMessagesItemParams
+
+
+class AgentV1SettingsAgentContextParams(typing_extensions.TypedDict):
+ """
+ Conversation context including the history of messages and function calls
+ """
+
+ messages: typing_extensions.NotRequired[typing.Sequence[AgentV1SettingsAgentContextMessagesItemParams]]
+ """
+ Conversation history as a list of messages and function calls
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item.py
new file mode 100644
index 00000000..cf31d658
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from .agent_v1settings_agent_context_messages_item_content import AgentV1SettingsAgentContextMessagesItemContentParams
+from .agent_v1settings_agent_context_messages_item_function_calls import (
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsParams,
+)
+
+AgentV1SettingsAgentContextMessagesItemParams = typing.Union[
+ AgentV1SettingsAgentContextMessagesItemContentParams, AgentV1SettingsAgentContextMessagesItemFunctionCallsParams
+]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_content.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_content.py
new file mode 100644
index 00000000..1a541ffc
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_content.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1settings_agent_context_messages_item_content_role import (
+ AgentV1SettingsAgentContextMessagesItemContentRole,
+)
+
+
+class AgentV1SettingsAgentContextMessagesItemContentParams(typing_extensions.TypedDict):
+ """
+ Conversation text as part of the conversation history
+ """
+
+ type: typing.Literal["History"]
+ """
+ Message type identifier for conversation text
+ """
+
+ role: AgentV1SettingsAgentContextMessagesItemContentRole
+ """
+ Identifies who spoke the statement
+ """
+
+ content: str
+ """
+ The actual statement that was spoken
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls.py
new file mode 100644
index 00000000..cdc5733c
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from .agent_v1settings_agent_context_messages_item_function_calls_function_calls_item import (
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams,
+)
+
+
+class AgentV1SettingsAgentContextMessagesItemFunctionCallsParams(typing_extensions.TypedDict):
+ """
+ Client-side or server-side function call request and response as part of the conversation history
+ """
+
+ type: typing.Literal["History"]
+ function_calls: typing.Sequence[AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams]
+ """
+ List of function call objects
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py
new file mode 100644
index 00000000..9efeb23e
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams(typing_extensions.TypedDict):
+ id: str
+ """
+ Unique identifier for the function call
+ """
+
+ name: str
+ """
+ Name of the function called
+ """
+
+ client_side: bool
+ """
+ Indicates if the call was client-side or server-side
+ """
+
+ arguments: str
+ """
+ Arguments passed to the function
+ """
+
+ response: str
+ """
+ Response from the function call
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen.py
new file mode 100644
index 00000000..3eb2aa41
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from .agent_v1settings_agent_listen_provider import AgentV1SettingsAgentListenProviderParams
+
+
+class AgentV1SettingsAgentListenParams(typing_extensions.TypedDict):
+ provider: typing_extensions.NotRequired[AgentV1SettingsAgentListenProviderParams]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider.py
new file mode 100644
index 00000000..1b5b47e8
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1SettingsAgentListenProvider_V1Params(typing_extensions.TypedDict):
+ version: typing.Literal["v1"]
+ type: typing.Literal["deepgram"]
+ model: typing_extensions.NotRequired[str]
+ language: typing_extensions.NotRequired[str]
+ keyterms: typing_extensions.NotRequired[typing.Sequence[str]]
+ smart_format: typing_extensions.NotRequired[bool]
+
+
+class AgentV1SettingsAgentListenProvider_V2Params(typing_extensions.TypedDict):
+ version: typing.Literal["v2"]
+ type: typing.Literal["deepgram"]
+ model: str
+ keyterms: typing_extensions.NotRequired[typing.Sequence[str]]
+
+
+AgentV1SettingsAgentListenProviderParams = typing.Union[
+ AgentV1SettingsAgentListenProvider_V1Params, AgentV1SettingsAgentListenProvider_V2Params
+]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v1.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v1.py
new file mode 100644
index 00000000..0fd4e61d
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v1.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1SettingsAgentListenProviderV1Params(typing_extensions.TypedDict):
+ type: typing.Literal["deepgram"]
+ """
+ Provider type for speech-to-text
+ """
+
+ model: typing_extensions.NotRequired[str]
+ """
+ Model to use for speech to text using the V1 API (e.g. Nova-3, Nova-2)
+ """
+
+ language: typing_extensions.NotRequired[str]
+ """
+ Language code to use for speech-to-text. Can be a BCP-47 language tag (e.g. `en`), or `multi` for code-switching transcription
+ """
+
+ keyterms: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Prompt keyterm recognition to improve Keyword Recall Rate
+ """
+
+ smart_format: typing_extensions.NotRequired[bool]
+ """
+ Applies smart formatting to improve transcript readability
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v2.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v2.py
new file mode 100644
index 00000000..0e0e5c5a
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v2.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1SettingsAgentListenProviderV2Params(typing_extensions.TypedDict):
+ type: typing.Literal["deepgram"]
+ """
+ Provider type for speech-to-text
+ """
+
+ model: str
+ """
+ Model to use for speech to text using the V2 API (e.g. flux-general-en)
+ """
+
+ keyterms: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Prompt keyterm recognition to improve Keyword Recall Rate
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak.py
new file mode 100644
index 00000000..3ae1f7c6
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from .agent_v1settings_agent_speak_endpoint import AgentV1SettingsAgentSpeakEndpointParams
+from .agent_v1settings_agent_speak_one_item import AgentV1SettingsAgentSpeakOneItemParams
+
+AgentV1SettingsAgentSpeakParams = typing.Union[
+ AgentV1SettingsAgentSpeakEndpointParams, typing.Sequence[AgentV1SettingsAgentSpeakOneItemParams]
+]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint.py
new file mode 100644
index 00000000..d90614be
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint.py
@@ -0,0 +1,14 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from .agent_v1settings_agent_speak_endpoint_endpoint import AgentV1SettingsAgentSpeakEndpointEndpointParams
+from .agent_v1settings_agent_speak_endpoint_provider import AgentV1SettingsAgentSpeakEndpointProviderParams
+
+
+class AgentV1SettingsAgentSpeakEndpointParams(typing_extensions.TypedDict):
+ provider: AgentV1SettingsAgentSpeakEndpointProviderParams
+ endpoint: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakEndpointEndpointParams]
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_endpoint.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_endpoint.py
new file mode 100644
index 00000000..3bc9c86f
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_endpoint.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1SettingsAgentSpeakEndpointEndpointParams(typing_extensions.TypedDict):
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
+
+ url: typing_extensions.NotRequired[str]
+ """
+ Custom TTS endpoint URL. Cannot contain `output_format` or `model_id` query
+ parameters when the provider is Eleven Labs.
+ """
+
+ headers: typing_extensions.NotRequired[typing.Dict[str, str]]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider.py
new file mode 100644
index 00000000..a826e227
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider.py
@@ -0,0 +1,81 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine,
+)
+from ..types.agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice,
+)
+from ..types.agent_v1settings_agent_speak_one_item_provider_cartesia_model_id import (
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId,
+)
+from ..types.agent_v1settings_agent_speak_one_item_provider_deepgram_model import (
+ AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel,
+)
+from ..types.agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id import (
+ AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId,
+)
+from ..types.agent_v1settings_agent_speak_one_item_provider_open_ai_model import (
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel,
+)
+from ..types.agent_v1settings_agent_speak_one_item_provider_open_ai_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams,
+)
+from .agent_v1settings_agent_speak_one_item_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams,
+)
+
+
+class AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams(typing_extensions.TypedDict):
+ type: typing.Literal["deepgram"]
+ version: typing_extensions.NotRequired[typing.Literal["v1"]]
+ model: AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel
+
+
+class AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams(typing_extensions.TypedDict):
+ type: typing.Literal["eleven_labs"]
+ version: typing_extensions.NotRequired[typing.Literal["v1"]]
+ model_id: AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId
+ language: typing_extensions.NotRequired[str]
+ language_code: typing_extensions.NotRequired[str]
+
+
+class AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams(typing_extensions.TypedDict):
+ type: typing.Literal["cartesia"]
+ version: typing_extensions.NotRequired[typing.Literal["2025-03-17"]]
+ model_id: AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId
+ voice: AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams
+ language: typing_extensions.NotRequired[str]
+
+
+class AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams(typing_extensions.TypedDict):
+ type: typing.Literal["open_ai"]
+ version: typing_extensions.NotRequired[typing.Literal["v1"]]
+ model: AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel
+ voice: AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice
+
+
+class AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams(typing_extensions.TypedDict):
+ type: typing.Literal["aws_polly"]
+ voice: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice
+ language: str
+ language_code: typing_extensions.NotRequired[str]
+ engine: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine
+ credentials: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams
+
+
+AgentV1SettingsAgentSpeakEndpointProviderParams = typing.Union[
+ AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams,
+ AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams,
+]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_aws_polly.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_aws_polly.py
new file mode 100644
index 00000000..7e5c1ede
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_aws_polly.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine,
+)
+from ..types.agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams,
+)
+
+
+class AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams(typing_extensions.TypedDict):
+ voice: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice
+ """
+ AWS Polly voice name
+ """
+
+ language: str
+ """
+ Language to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the AWS Polly API
+ """
+
+ language_code: typing_extensions.NotRequired[str]
+ """
+ Use the `language` field instead.
+ """
+
+ engine: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine
+ credentials: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials.py
new file mode 100644
index 00000000..97ad74b1
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType,
+)
+
+
+class AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams(typing_extensions.TypedDict):
+ type: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType
+ region: str
+ access_key_id: str
+ secret_access_key: str
+ session_token: typing_extensions.NotRequired[str]
+ """
+ Required for STS only
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_cartesia_voice.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_cartesia_voice.py
new file mode 100644
index 00000000..51bd279a
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_cartesia_voice.py
@@ -0,0 +1,15 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams(typing_extensions.TypedDict):
+ mode: str
+ """
+ Cartesia voice mode
+ """
+
+ id: str
+ """
+ Cartesia voice ID
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item.py
new file mode 100644
index 00000000..c892106c
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item.py
@@ -0,0 +1,14 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from .agent_v1settings_agent_speak_one_item_endpoint import AgentV1SettingsAgentSpeakOneItemEndpointParams
+from .agent_v1settings_agent_speak_one_item_provider import AgentV1SettingsAgentSpeakOneItemProviderParams
+
+
+class AgentV1SettingsAgentSpeakOneItemParams(typing_extensions.TypedDict):
+ provider: AgentV1SettingsAgentSpeakOneItemProviderParams
+ endpoint: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakOneItemEndpointParams]
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_endpoint.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_endpoint.py
new file mode 100644
index 00000000..b7ff0269
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_endpoint.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1SettingsAgentSpeakOneItemEndpointParams(typing_extensions.TypedDict):
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
+
+ url: typing_extensions.NotRequired[str]
+ """
+ Custom TTS endpoint URL. Cannot contain `output_format` or `model_id` query
+ parameters when the provider is Eleven Labs.
+ """
+
+ headers: typing_extensions.NotRequired[typing.Dict[str, str]]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider.py
new file mode 100644
index 00000000..f163893b
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider.py
@@ -0,0 +1,81 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_one_item_provider_aws_polly_engine import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine,
+)
+from ..types.agent_v1settings_agent_speak_one_item_provider_aws_polly_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice,
+)
+from ..types.agent_v1settings_agent_speak_one_item_provider_cartesia_model_id import (
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId,
+)
+from ..types.agent_v1settings_agent_speak_one_item_provider_deepgram_model import (
+ AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel,
+)
+from ..types.agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id import (
+ AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId,
+)
+from ..types.agent_v1settings_agent_speak_one_item_provider_open_ai_model import (
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel,
+)
+from ..types.agent_v1settings_agent_speak_one_item_provider_open_ai_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice,
+)
+from .agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams,
+)
+from .agent_v1settings_agent_speak_one_item_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams,
+)
+
+
+class AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams(typing_extensions.TypedDict):
+ type: typing.Literal["deepgram"]
+ version: typing_extensions.NotRequired[typing.Literal["v1"]]
+ model: AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel
+
+
+class AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams(typing_extensions.TypedDict):
+ type: typing.Literal["eleven_labs"]
+ version: typing_extensions.NotRequired[typing.Literal["v1"]]
+ model_id: AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId
+ language: typing_extensions.NotRequired[str]
+ language_code: typing_extensions.NotRequired[str]
+
+
+class AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams(typing_extensions.TypedDict):
+ type: typing.Literal["cartesia"]
+ version: typing_extensions.NotRequired[typing.Literal["2025-03-17"]]
+ model_id: AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId
+ voice: AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams
+ language: typing_extensions.NotRequired[str]
+
+
+class AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams(typing_extensions.TypedDict):
+ type: typing.Literal["open_ai"]
+ version: typing_extensions.NotRequired[typing.Literal["v1"]]
+ model: AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel
+ voice: AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice
+
+
+class AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams(typing_extensions.TypedDict):
+ type: typing.Literal["aws_polly"]
+ voice: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice
+ language: str
+ language_code: typing_extensions.NotRequired[str]
+ engine: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine
+ credentials: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams
+
+
+AgentV1SettingsAgentSpeakOneItemProviderParams = typing.Union[
+ AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams,
+ AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams,
+ AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams,
+ AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams,
+ AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams,
+]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_aws_polly.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_aws_polly.py
new file mode 100644
index 00000000..4e175186
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_aws_polly.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_one_item_provider_aws_polly_engine import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine,
+)
+from ..types.agent_v1settings_agent_speak_one_item_provider_aws_polly_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice,
+)
+from .agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams,
+)
+
+
+class AgentV1SettingsAgentSpeakOneItemProviderAwsPollyParams(typing_extensions.TypedDict):
+ voice: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice
+ """
+ AWS Polly voice name
+ """
+
+ language: str
+ """
+ Language code to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the AWS Polly API
+ """
+
+ language_code: typing_extensions.NotRequired[str]
+ """
+ Use the `language` field instead.
+ """
+
+ engine: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine
+ credentials: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials.py
new file mode 100644
index 00000000..7d94447a
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials_type import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType,
+)
+
+
+class AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams(typing_extensions.TypedDict):
+ type: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType
+ region: str
+ access_key_id: str
+ secret_access_key: str
+ session_token: typing_extensions.NotRequired[str]
+ """
+ Required for STS only
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_cartesia_voice.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_cartesia_voice.py
new file mode 100644
index 00000000..b2765075
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_cartesia_voice.py
@@ -0,0 +1,15 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams(typing_extensions.TypedDict):
+ mode: str
+ """
+ Cartesia voice mode
+ """
+
+ id: str
+ """
+ Cartesia voice ID
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think.py
new file mode 100644
index 00000000..2d5d22c2
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think.py
@@ -0,0 +1,24 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from .agent_v1settings_agent_think_context_length import AgentV1SettingsAgentThinkContextLengthParams
+from .agent_v1settings_agent_think_endpoint import AgentV1SettingsAgentThinkEndpointParams
+from .agent_v1settings_agent_think_functions_item import AgentV1SettingsAgentThinkFunctionsItemParams
+from .agent_v1settings_agent_think_provider import AgentV1SettingsAgentThinkProviderParams
+
+
+class AgentV1SettingsAgentThinkParams(typing_extensions.TypedDict):
+ provider: AgentV1SettingsAgentThinkProviderParams
+ endpoint: typing_extensions.NotRequired[AgentV1SettingsAgentThinkEndpointParams]
+ """
+ Optional for non-Deepgram LLM providers. When present, must include url field and headers object
+ """
+
+ functions: typing_extensions.NotRequired[typing.Sequence[AgentV1SettingsAgentThinkFunctionsItemParams]]
+ prompt: typing_extensions.NotRequired[str]
+ context_length: typing_extensions.NotRequired[AgentV1SettingsAgentThinkContextLengthParams]
+ """
+ Specifies the number of characters retained in context between user messages, agent responses, and function calls. This setting is only configurable when a custom think endpoint is used
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_context_length.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_context_length.py
new file mode 100644
index 00000000..f96cd4c3
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_context_length.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentThinkContextLengthParams = typing.Union[typing.Literal["max"], float]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_endpoint.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_endpoint.py
new file mode 100644
index 00000000..396bbc07
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_endpoint.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1SettingsAgentThinkEndpointParams(typing_extensions.TypedDict):
+ """
+ Optional for non-Deepgram LLM providers. When present, must include url field and headers object
+ """
+
+ url: typing_extensions.NotRequired[str]
+ """
+ Custom LLM endpoint URL
+ """
+
+ headers: typing_extensions.NotRequired[typing.Dict[str, str]]
+ """
+ Custom headers for the endpoint
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_functions_item.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_functions_item.py
new file mode 100644
index 00000000..b316f5f0
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_functions_item.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from .agent_v1settings_agent_think_functions_item_endpoint import AgentV1SettingsAgentThinkFunctionsItemEndpointParams
+
+
+class AgentV1SettingsAgentThinkFunctionsItemParams(typing_extensions.TypedDict):
+ name: typing_extensions.NotRequired[str]
+ """
+ Function name
+ """
+
+ description: typing_extensions.NotRequired[str]
+ """
+ Function description
+ """
+
+ parameters: typing_extensions.NotRequired[typing.Dict[str, typing.Any]]
+ """
+ Function parameters
+ """
+
+ endpoint: typing_extensions.NotRequired[AgentV1SettingsAgentThinkFunctionsItemEndpointParams]
+ """
+ The Function endpoint to call. if not passed, function is called client-side
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_functions_item_endpoint.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_functions_item_endpoint.py
new file mode 100644
index 00000000..19e76bdc
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_functions_item_endpoint.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1SettingsAgentThinkFunctionsItemEndpointParams(typing_extensions.TypedDict):
+ """
+ The Function endpoint to call. if not passed, function is called client-side
+ """
+
+ url: typing_extensions.NotRequired[str]
+ """
+ Endpoint URL
+ """
+
+ method: typing_extensions.NotRequired[str]
+ """
+ HTTP method
+ """
+
+ headers: typing_extensions.NotRequired[typing.Dict[str, str]]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider.py
new file mode 100644
index 00000000..49e61c0f
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider.py
@@ -0,0 +1,67 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_one_item_provider_open_ai_model import (
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel,
+)
+from ..types.agent_v1settings_agent_speak_one_item_provider_open_ai_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice,
+)
+from ..types.agent_v1settings_agent_think_provider_anthropic_model import (
+ AgentV1SettingsAgentThinkProviderAnthropicModel,
+)
+from ..types.agent_v1settings_agent_think_provider_aws_bedrock_model import (
+ AgentV1SettingsAgentThinkProviderAwsBedrockModel,
+)
+from ..types.agent_v1settings_agent_think_provider_google_model import AgentV1SettingsAgentThinkProviderGoogleModel
+from .agent_v1settings_agent_think_provider_aws_bedrock_credentials import (
+ AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsParams,
+)
+
+
+class AgentV1SettingsAgentThinkProvider_OpenAiParams(typing_extensions.TypedDict):
+ type: typing.Literal["open_ai"]
+ version: typing_extensions.NotRequired[typing.Literal["v1"]]
+ model: AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel
+ voice: AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice
+
+
+class AgentV1SettingsAgentThinkProvider_AwsBedrockParams(typing_extensions.TypedDict):
+ type: typing.Literal["aws_bedrock"]
+ model: AgentV1SettingsAgentThinkProviderAwsBedrockModel
+ temperature: typing_extensions.NotRequired[float]
+ credentials: typing_extensions.NotRequired[AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsParams]
+
+
+class AgentV1SettingsAgentThinkProvider_AnthropicParams(typing_extensions.TypedDict):
+ type: typing.Literal["anthropic"]
+ version: typing_extensions.NotRequired[typing.Literal["v1"]]
+ model: AgentV1SettingsAgentThinkProviderAnthropicModel
+ temperature: typing_extensions.NotRequired[float]
+
+
+class AgentV1SettingsAgentThinkProvider_GoogleParams(typing_extensions.TypedDict):
+ type: typing.Literal["google"]
+ version: typing_extensions.NotRequired[typing.Literal["v1beta"]]
+ model: AgentV1SettingsAgentThinkProviderGoogleModel
+ temperature: typing_extensions.NotRequired[float]
+
+
+class AgentV1SettingsAgentThinkProvider_GroqParams(typing_extensions.TypedDict):
+ type: typing.Literal["groq"]
+ version: typing_extensions.NotRequired[typing.Literal["v1"]]
+ model: typing.Literal["openai/gpt-oss-20b"]
+ temperature: typing_extensions.NotRequired[float]
+
+
+AgentV1SettingsAgentThinkProviderParams = typing.Union[
+ AgentV1SettingsAgentThinkProvider_OpenAiParams,
+ AgentV1SettingsAgentThinkProvider_AwsBedrockParams,
+ AgentV1SettingsAgentThinkProvider_AnthropicParams,
+ AgentV1SettingsAgentThinkProvider_GoogleParams,
+ AgentV1SettingsAgentThinkProvider_GroqParams,
+]
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_aws_bedrock.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_aws_bedrock.py
new file mode 100644
index 00000000..bf1ebfea
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_aws_bedrock.py
@@ -0,0 +1,26 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_agent_think_provider_aws_bedrock_model import (
+ AgentV1SettingsAgentThinkProviderAwsBedrockModel,
+)
+from .agent_v1settings_agent_think_provider_aws_bedrock_credentials import (
+ AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsParams,
+)
+
+
+class AgentV1SettingsAgentThinkProviderAwsBedrockParams(typing_extensions.TypedDict):
+ model: AgentV1SettingsAgentThinkProviderAwsBedrockModel
+ """
+ AWS Bedrock model to use
+ """
+
+ temperature: typing_extensions.NotRequired[float]
+ """
+ AWS Bedrock temperature (0-2)
+ """
+
+ credentials: typing_extensions.NotRequired[AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsParams]
+ """
+ AWS credentials type (STS short-lived or IAM long-lived)
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_aws_bedrock_credentials.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_aws_bedrock_credentials.py
new file mode 100644
index 00000000..a76accbe
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_provider_aws_bedrock_credentials.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_agent_think_provider_aws_bedrock_credentials_type import (
+ AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsType,
+)
+
+
+class AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsParams(typing_extensions.TypedDict):
+ """
+ AWS credentials type (STS short-lived or IAM long-lived)
+ """
+
+ type: typing_extensions.NotRequired[AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsType]
+ """
+ AWS credentials type (STS short-lived or IAM long-lived)
+ """
+
+ region: typing_extensions.NotRequired[str]
+ """
+ AWS region
+ """
+
+ access_key_id: typing_extensions.NotRequired[str]
+ """
+ AWS access key
+ """
+
+ secret_access_key: typing_extensions.NotRequired[str]
+ """
+ AWS secret access key
+ """
+
+ session_token: typing_extensions.NotRequired[str]
+ """
+ AWS session token (required for STS only)
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_applied.py b/src/deepgram/agent/v1/requests/agent_v1settings_applied.py
new file mode 100644
index 00000000..32bca304
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_applied.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1SettingsAppliedParams(typing_extensions.TypedDict):
+ type: typing.Literal["SettingsApplied"]
+ """
+ Message type identifier for settings applied confirmation
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_audio.py b/src/deepgram/agent/v1/requests/agent_v1settings_audio.py
new file mode 100644
index 00000000..0c09d60f
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_audio.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from .agent_v1settings_audio_input import AgentV1SettingsAudioInputParams
+from .agent_v1settings_audio_output import AgentV1SettingsAudioOutputParams
+
+
+class AgentV1SettingsAudioParams(typing_extensions.TypedDict):
+ input: typing_extensions.NotRequired[AgentV1SettingsAudioInputParams]
+ """
+ Audio input configuration settings. If omitted, defaults to encoding=linear16 and sample_rate=24000. Higher sample rates like 44100 Hz provide better audio quality.
+ """
+
+ output: typing_extensions.NotRequired[AgentV1SettingsAudioOutputParams]
+ """
+ Audio output configuration settings
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_audio_input.py b/src/deepgram/agent/v1/requests/agent_v1settings_audio_input.py
new file mode 100644
index 00000000..91931180
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_audio_input.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_audio_input_encoding import AgentV1SettingsAudioInputEncoding
+
+
+class AgentV1SettingsAudioInputParams(typing_extensions.TypedDict):
+ """
+ Audio input configuration settings. If omitted, defaults to encoding=linear16 and sample_rate=24000. Higher sample rates like 44100 Hz provide better audio quality.
+ """
+
+ encoding: AgentV1SettingsAudioInputEncoding
+ """
+ Audio encoding format
+ """
+
+ sample_rate: float
+ """
+ Sample rate in Hz. Common values are 16000, 24000, 44100, 48000
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_audio_output.py b/src/deepgram/agent/v1/requests/agent_v1settings_audio_output.py
new file mode 100644
index 00000000..32273699
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_audio_output.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1settings_audio_output_encoding import AgentV1SettingsAudioOutputEncoding
+
+
+class AgentV1SettingsAudioOutputParams(typing_extensions.TypedDict):
+ """
+ Audio output configuration settings
+ """
+
+ encoding: typing_extensions.NotRequired[AgentV1SettingsAudioOutputEncoding]
+ """
+ Audio encoding format for streaming TTS output
+ """
+
+ sample_rate: typing_extensions.NotRequired[float]
+ """
+ Sample rate in Hz
+ """
+
+ bitrate: typing_extensions.NotRequired[float]
+ """
+ Audio bitrate in bits per second
+ """
+
+ container: typing_extensions.NotRequired[str]
+ """
+ Audio container format. If omitted, defaults to 'none'
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_flags.py b/src/deepgram/agent/v1/requests/agent_v1settings_flags.py
new file mode 100644
index 00000000..737233a4
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1settings_flags.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class AgentV1SettingsFlagsParams(typing_extensions.TypedDict):
+ history: typing_extensions.NotRequired[bool]
+ """
+ Enable or disable history message reporting
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1speak_updated.py b/src/deepgram/agent/v1/requests/agent_v1speak_updated.py
new file mode 100644
index 00000000..908d6639
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1speak_updated.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1SpeakUpdatedParams(typing_extensions.TypedDict):
+ type: typing.Literal["SpeakUpdated"]
+ """
+ Message type identifier for speak update confirmation
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_prompt.py b/src/deepgram/agent/v1/requests/agent_v1update_prompt.py
new file mode 100644
index 00000000..8f363a56
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_prompt.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1UpdatePromptParams(typing_extensions.TypedDict):
+ type: typing.Literal["UpdatePrompt"]
+ """
+ Message type identifier for prompt update request
+ """
+
+ prompt: str
+ """
+ The new system prompt to be used by the agent
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak.py b/src/deepgram/agent/v1/requests/agent_v1update_speak.py
new file mode 100644
index 00000000..b86d1240
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from .agent_v1update_speak_speak import AgentV1UpdateSpeakSpeakParams
+
+
+class AgentV1UpdateSpeakParams(typing_extensions.TypedDict):
+ type: typing.Literal["UpdateSpeak"]
+ """
+ Message type identifier for updating the speak model
+ """
+
+ speak: AgentV1UpdateSpeakSpeakParams
+ """
+ Configuration for the speak model. Optional, defaults to latest deepgram TTS model
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak.py
new file mode 100644
index 00000000..16a16e01
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from .agent_v1update_speak_speak_endpoint import AgentV1UpdateSpeakSpeakEndpointParams
+from .agent_v1update_speak_speak_provider import AgentV1UpdateSpeakSpeakProviderParams
+
+
+class AgentV1UpdateSpeakSpeakParams(typing_extensions.TypedDict):
+ """
+ Configuration for the speak model. Optional, defaults to latest deepgram TTS model
+ """
+
+ provider: AgentV1UpdateSpeakSpeakProviderParams
+ endpoint: typing_extensions.NotRequired[AgentV1UpdateSpeakSpeakEndpointParams]
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint.py
new file mode 100644
index 00000000..43cdb3af
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1UpdateSpeakSpeakEndpointParams(typing_extensions.TypedDict):
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
+
+ url: typing_extensions.NotRequired[str]
+ """
+ Custom TTS endpoint URL. Cannot contain `output_format` or `model_id` query
+ parameters when the provider is Eleven Labs.
+ """
+
+ headers: typing_extensions.NotRequired[typing.Dict[str, str]]
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider.py
new file mode 100644
index 00000000..0817df32
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider.py
@@ -0,0 +1,67 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1update_speak_speak_provider_aws_polly_engine import AgentV1UpdateSpeakSpeakProviderAwsPollyEngine
+from ..types.agent_v1update_speak_speak_provider_aws_polly_voice import AgentV1UpdateSpeakSpeakProviderAwsPollyVoice
+from ..types.agent_v1update_speak_speak_provider_cartesia_model_id import AgentV1UpdateSpeakSpeakProviderCartesiaModelId
+from ..types.agent_v1update_speak_speak_provider_deepgram_model import AgentV1UpdateSpeakSpeakProviderDeepgramModel
+from ..types.agent_v1update_speak_speak_provider_eleven_labs_model_id import (
+ AgentV1UpdateSpeakSpeakProviderElevenLabsModelId,
+)
+from ..types.agent_v1update_speak_speak_provider_open_ai_model import AgentV1UpdateSpeakSpeakProviderOpenAiModel
+from ..types.agent_v1update_speak_speak_provider_open_ai_voice import AgentV1UpdateSpeakSpeakProviderOpenAiVoice
+from .agent_v1update_speak_speak_provider_aws_polly_credentials import (
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams,
+)
+from .agent_v1update_speak_speak_provider_cartesia_voice import AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams
+
+
+class AgentV1UpdateSpeakSpeakProvider_DeepgramParams(typing_extensions.TypedDict):
+ type: typing.Literal["deepgram"]
+ version: typing_extensions.NotRequired[typing.Literal["v1"]]
+ model: AgentV1UpdateSpeakSpeakProviderDeepgramModel
+
+
+class AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams(typing_extensions.TypedDict):
+ type: typing.Literal["eleven_labs"]
+ version: typing_extensions.NotRequired[typing.Literal["v1"]]
+ model_id: AgentV1UpdateSpeakSpeakProviderElevenLabsModelId
+ language: typing_extensions.NotRequired[str]
+ language_code: typing_extensions.NotRequired[str]
+
+
+class AgentV1UpdateSpeakSpeakProvider_CartesiaParams(typing_extensions.TypedDict):
+ type: typing.Literal["cartesia"]
+ version: typing_extensions.NotRequired[typing.Literal["2025-03-17"]]
+ model_id: AgentV1UpdateSpeakSpeakProviderCartesiaModelId
+ voice: AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams
+ language: typing_extensions.NotRequired[str]
+
+
+class AgentV1UpdateSpeakSpeakProvider_OpenAiParams(typing_extensions.TypedDict):
+ type: typing.Literal["open_ai"]
+ version: typing_extensions.NotRequired[typing.Literal["v1"]]
+ model: AgentV1UpdateSpeakSpeakProviderOpenAiModel
+ voice: AgentV1UpdateSpeakSpeakProviderOpenAiVoice
+
+
+class AgentV1UpdateSpeakSpeakProvider_AwsPollyParams(typing_extensions.TypedDict):
+ type: typing.Literal["aws_polly"]
+ voice: AgentV1UpdateSpeakSpeakProviderAwsPollyVoice
+ language: str
+ language_code: typing_extensions.NotRequired[str]
+ engine: AgentV1UpdateSpeakSpeakProviderAwsPollyEngine
+ credentials: AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams
+
+
+AgentV1UpdateSpeakSpeakProviderParams = typing.Union[
+ AgentV1UpdateSpeakSpeakProvider_DeepgramParams,
+ AgentV1UpdateSpeakSpeakProvider_ElevenLabsParams,
+ AgentV1UpdateSpeakSpeakProvider_CartesiaParams,
+ AgentV1UpdateSpeakSpeakProvider_OpenAiParams,
+ AgentV1UpdateSpeakSpeakProvider_AwsPollyParams,
+]
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_aws_polly.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_aws_polly.py
new file mode 100644
index 00000000..adb96400
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_aws_polly.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1update_speak_speak_provider_aws_polly_engine import AgentV1UpdateSpeakSpeakProviderAwsPollyEngine
+from ..types.agent_v1update_speak_speak_provider_aws_polly_voice import AgentV1UpdateSpeakSpeakProviderAwsPollyVoice
+from .agent_v1update_speak_speak_provider_aws_polly_credentials import (
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams,
+)
+
+
+class AgentV1UpdateSpeakSpeakProviderAwsPollyParams(typing_extensions.TypedDict):
+ voice: AgentV1UpdateSpeakSpeakProviderAwsPollyVoice
+ """
+ AWS Polly voice name
+ """
+
+ language: str
+ """
+ Language code to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the AWS Polly API
+ """
+
+ language_code: typing_extensions.NotRequired[str]
+ """
+ Use the `language` field instead.
+ """
+
+ engine: AgentV1UpdateSpeakSpeakProviderAwsPollyEngine
+ credentials: AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_aws_polly_credentials.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_aws_polly_credentials.py
new file mode 100644
index 00000000..ff643278
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_aws_polly_credentials.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.agent_v1update_speak_speak_provider_aws_polly_credentials_type import (
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType,
+)
+
+
+class AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsParams(typing_extensions.TypedDict):
+ type: AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType
+ region: str
+ access_key_id: str
+ secret_access_key: str
+ session_token: typing_extensions.NotRequired[str]
+ """
+ Required for STS only
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_cartesia.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_cartesia.py
new file mode 100644
index 00000000..72e6c4a4
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_cartesia.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1update_speak_speak_provider_cartesia_model_id import AgentV1UpdateSpeakSpeakProviderCartesiaModelId
+from .agent_v1update_speak_speak_provider_cartesia_voice import AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams
+
+
+class AgentV1UpdateSpeakSpeakProviderCartesiaParams(typing_extensions.TypedDict):
+ version: typing_extensions.NotRequired[typing.Literal["2025-03-17"]]
+ """
+ The API version header for the Cartesia text-to-speech API
+ """
+
+ model_id: AgentV1UpdateSpeakSpeakProviderCartesiaModelId
+ """
+ Cartesia model ID
+ """
+
+ voice: AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams
+ language: typing_extensions.NotRequired[str]
+ """
+ Cartesia language code
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_cartesia_voice.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_cartesia_voice.py
new file mode 100644
index 00000000..3ff2e8be
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_cartesia_voice.py
@@ -0,0 +1,15 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class AgentV1UpdateSpeakSpeakProviderCartesiaVoiceParams(typing_extensions.TypedDict):
+ mode: str
+ """
+ Cartesia voice mode
+ """
+
+ id: str
+ """
+ Cartesia voice ID
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_deepgram.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_deepgram.py
new file mode 100644
index 00000000..54c067c7
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_deepgram.py
@@ -0,0 +1,18 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1update_speak_speak_provider_deepgram_model import AgentV1UpdateSpeakSpeakProviderDeepgramModel
+
+
+class AgentV1UpdateSpeakSpeakProviderDeepgramParams(typing_extensions.TypedDict):
+ version: typing_extensions.NotRequired[typing.Literal["v1"]]
+ """
+ The REST API version for the Deepgram text-to-speech API
+ """
+
+ model: AgentV1UpdateSpeakSpeakProviderDeepgramModel
+ """
+ Deepgram TTS model
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_eleven_labs.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_eleven_labs.py
new file mode 100644
index 00000000..9cd15531
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_eleven_labs.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1update_speak_speak_provider_eleven_labs_model_id import (
+ AgentV1UpdateSpeakSpeakProviderElevenLabsModelId,
+)
+
+
+class AgentV1UpdateSpeakSpeakProviderElevenLabsParams(typing_extensions.TypedDict):
+ version: typing_extensions.NotRequired[typing.Literal["v1"]]
+ """
+ The REST API version for the ElevenLabs text-to-speech API
+ """
+
+ model_id: AgentV1UpdateSpeakSpeakProviderElevenLabsModelId
+ """
+ Eleven Labs model ID
+ """
+
+ language: typing_extensions.NotRequired[str]
+ """
+ Language code to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the ElevenLabs API
+ """
+
+ language_code: typing_extensions.NotRequired[str]
+ """
+ Use the `language` field instead.
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_open_ai.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_open_ai.py
new file mode 100644
index 00000000..f12d8a7d
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_provider_open_ai.py
@@ -0,0 +1,24 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1update_speak_speak_provider_open_ai_model import AgentV1UpdateSpeakSpeakProviderOpenAiModel
+from ..types.agent_v1update_speak_speak_provider_open_ai_voice import AgentV1UpdateSpeakSpeakProviderOpenAiVoice
+
+
+class AgentV1UpdateSpeakSpeakProviderOpenAiParams(typing_extensions.TypedDict):
+ version: typing_extensions.NotRequired[typing.Literal["v1"]]
+ """
+ The REST API version for the OpenAI text-to-speech API
+ """
+
+ model: AgentV1UpdateSpeakSpeakProviderOpenAiModel
+ """
+ OpenAI TTS model
+ """
+
+ voice: AgentV1UpdateSpeakSpeakProviderOpenAiVoice
+ """
+ OpenAI voice
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1user_started_speaking.py b/src/deepgram/agent/v1/requests/agent_v1user_started_speaking.py
new file mode 100644
index 00000000..c883119c
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1user_started_speaking.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1UserStartedSpeakingParams(typing_extensions.TypedDict):
+ type: typing.Literal["UserStartedSpeaking"]
+ """
+ Message type identifier indicating that the user has begun speaking
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1warning.py b/src/deepgram/agent/v1/requests/agent_v1warning.py
new file mode 100644
index 00000000..f1e75051
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1warning.py
@@ -0,0 +1,26 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1WarningParams(typing_extensions.TypedDict):
+ """
+ Notifies the client of non-fatal errors or warnings
+ """
+
+ type: typing.Literal["Warning"]
+ """
+ Message type identifier for warnings
+ """
+
+ description: str
+ """
+ Description of the warning
+ """
+
+ code: str
+ """
+ Warning code identifier
+ """
diff --git a/src/deepgram/agent/v1/requests/agent_v1welcome.py b/src/deepgram/agent/v1/requests/agent_v1welcome.py
new file mode 100644
index 00000000..5168a4f0
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/agent_v1welcome.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class AgentV1WelcomeParams(typing_extensions.TypedDict):
+ type: typing.Literal["Welcome"]
+ """
+ Message type identifier for welcome message
+ """
+
+ request_id: str
+ """
+ Unique identifier for the request
+ """
diff --git a/src/deepgram/agent/v1/requests/anthropic.py b/src/deepgram/agent/v1/requests/anthropic.py
new file mode 100644
index 00000000..98ccdcba
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/anthropic.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1settings_agent_think_provider_anthropic_model import (
+ AgentV1SettingsAgentThinkProviderAnthropicModel,
+)
+
+
+class AnthropicParams(typing_extensions.TypedDict):
+ version: typing_extensions.NotRequired[typing.Literal["v1"]]
+ """
+ The REST API version for the Anthropic Messages API
+ """
+
+ model: AgentV1SettingsAgentThinkProviderAnthropicModel
+ """
+ Anthropic model to use
+ """
+
+ temperature: typing_extensions.NotRequired[float]
+ """
+ Anthropic temperature (0-1)
+ """
diff --git a/src/deepgram/agent/v1/requests/cartesia.py b/src/deepgram/agent/v1/requests/cartesia.py
new file mode 100644
index 00000000..0c85d598
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/cartesia.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_one_item_provider_cartesia_model_id import (
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId,
+)
+from .agent_v1settings_agent_speak_one_item_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams,
+)
+
+
+class CartesiaParams(typing_extensions.TypedDict):
+ version: typing_extensions.NotRequired[typing.Literal["2025-03-17"]]
+ """
+ The API version header for the Cartesia text-to-speech API
+ """
+
+ model_id: AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId
+ """
+ Cartesia model ID
+ """
+
+ voice: AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams
+ language: typing_extensions.NotRequired[str]
+ """
+ Cartesia language code
+ """
diff --git a/src/deepgram/agent/v1/requests/deepgram.py b/src/deepgram/agent/v1/requests/deepgram.py
new file mode 100644
index 00000000..add51c6b
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/deepgram.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_one_item_provider_deepgram_model import (
+ AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel,
+)
+
+
+class DeepgramParams(typing_extensions.TypedDict):
+ version: typing_extensions.NotRequired[typing.Literal["v1"]]
+ """
+ The REST API version for the Deepgram text-to-speech API
+ """
+
+ model: AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel
+ """
+ Deepgram TTS model
+ """
diff --git a/src/deepgram/agent/v1/requests/eleven_labs.py b/src/deepgram/agent/v1/requests/eleven_labs.py
new file mode 100644
index 00000000..6ffb82cb
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/eleven_labs.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id import (
+ AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId,
+)
+
+
+class ElevenLabsParams(typing_extensions.TypedDict):
+ version: typing_extensions.NotRequired[typing.Literal["v1"]]
+ """
+ The REST API version for the Deepgram text-to-speech API
+ """
+
+ model_id: AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId
+ """
+ Eleven Labs model ID
+ """
+
+ language: typing_extensions.NotRequired[str]
+ """
+ Optional language to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the ElevenLabs API
+ """
+
+ language_code: typing_extensions.NotRequired[str]
+ """
+ Use the `language` field instead.
+ """
diff --git a/src/deepgram/agent/v1/requests/google.py b/src/deepgram/agent/v1/requests/google.py
new file mode 100644
index 00000000..85883233
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/google.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1settings_agent_think_provider_google_model import AgentV1SettingsAgentThinkProviderGoogleModel
+
+
+class GoogleParams(typing_extensions.TypedDict):
+ version: typing_extensions.NotRequired[typing.Literal["v1beta"]]
+ """
+ The REST API version for the Google generative language API
+ """
+
+ model: AgentV1SettingsAgentThinkProviderGoogleModel
+ """
+ Google model to use
+ """
+
+ temperature: typing_extensions.NotRequired[float]
+ """
+ Google temperature (0-2)
+ """
diff --git a/src/deepgram/agent/v1/requests/groq.py b/src/deepgram/agent/v1/requests/groq.py
new file mode 100644
index 00000000..526931f1
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/groq.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class GroqParams(typing_extensions.TypedDict):
+ version: typing_extensions.NotRequired[typing.Literal["v1"]]
+ """
+ The REST API version for the Groq's chat completions API (mostly OpenAI-compatible)
+ """
+
+ model: typing.Literal["openai/gpt-oss-20b"]
+ """
+ Groq model to use
+ """
+
+ temperature: typing_extensions.NotRequired[float]
+ """
+ Groq temperature (0-2)
+ """
diff --git a/src/deepgram/agent/v1/requests/open_ai.py b/src/deepgram/agent/v1/requests/open_ai.py
new file mode 100644
index 00000000..3726f5c3
--- /dev/null
+++ b/src/deepgram/agent/v1/requests/open_ai.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from ..types.agent_v1settings_agent_speak_one_item_provider_open_ai_model import (
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel,
+)
+from ..types.agent_v1settings_agent_speak_one_item_provider_open_ai_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice,
+)
+
+
+class OpenAiParams(typing_extensions.TypedDict):
+ version: typing_extensions.NotRequired[typing.Literal["v1"]]
+ """
+ The REST API version for the OpenAI text-to-speech API
+ """
+
+ model: AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel
+ """
+ OpenAI TTS model
+ """
+
+ voice: AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice
+ """
+ OpenAI voice
+ """
diff --git a/src/deepgram/agent/v1/settings/think/models/raw_client.py b/src/deepgram/agent/v1/settings/think/models/raw_client.py
index 5f01114b..a48447d8 100644
--- a/src/deepgram/agent/v1/settings/think/models/raw_client.py
+++ b/src/deepgram/agent/v1/settings/think/models/raw_client.py
@@ -52,9 +52,9 @@ def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -105,9 +105,9 @@ async def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/agent/v1/types/__init__.py b/src/deepgram/agent/v1/types/__init__.py
new file mode 100644
index 00000000..190b29a4
--- /dev/null
+++ b/src/deepgram/agent/v1/types/__init__.py
@@ -0,0 +1,494 @@
+# This file was auto-generated by Fern from our API Definition.
+
+# isort: skip_file
+
+import typing
+from importlib import import_module
+
+if typing.TYPE_CHECKING:
+ from .agent_v1agent_audio_done import AgentV1AgentAudioDone
+ from .agent_v1agent_started_speaking import AgentV1AgentStartedSpeaking
+ from .agent_v1agent_thinking import AgentV1AgentThinking
+ from .agent_v1conversation_text import AgentV1ConversationText
+ from .agent_v1conversation_text_role import AgentV1ConversationTextRole
+ from .agent_v1error import AgentV1Error
+ from .agent_v1function_call_request import AgentV1FunctionCallRequest
+ from .agent_v1function_call_request_functions_item import AgentV1FunctionCallRequestFunctionsItem
+ from .agent_v1inject_agent_message import AgentV1InjectAgentMessage
+ from .agent_v1inject_user_message import AgentV1InjectUserMessage
+ from .agent_v1injection_refused import AgentV1InjectionRefused
+ from .agent_v1keep_alive import AgentV1KeepAlive
+ from .agent_v1prompt_updated import AgentV1PromptUpdated
+ from .agent_v1receive_function_call_response import AgentV1ReceiveFunctionCallResponse
+ from .agent_v1send_function_call_response import AgentV1SendFunctionCallResponse
+ from .agent_v1settings import AgentV1Settings
+ from .agent_v1settings_agent import AgentV1SettingsAgent
+ from .agent_v1settings_agent_context import AgentV1SettingsAgentContext
+ from .agent_v1settings_agent_context_messages_item import AgentV1SettingsAgentContextMessagesItem
+ from .agent_v1settings_agent_context_messages_item_content import AgentV1SettingsAgentContextMessagesItemContent
+ from .agent_v1settings_agent_context_messages_item_content_role import (
+ AgentV1SettingsAgentContextMessagesItemContentRole,
+ )
+ from .agent_v1settings_agent_context_messages_item_function_calls import (
+ AgentV1SettingsAgentContextMessagesItemFunctionCalls,
+ )
+ from .agent_v1settings_agent_context_messages_item_function_calls_function_calls_item import (
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem,
+ )
+ from .agent_v1settings_agent_listen import AgentV1SettingsAgentListen
+ from .agent_v1settings_agent_listen_provider import (
+ AgentV1SettingsAgentListenProvider,
+ AgentV1SettingsAgentListenProvider_V1,
+ AgentV1SettingsAgentListenProvider_V2,
+ )
+ from .agent_v1settings_agent_listen_provider_v1 import AgentV1SettingsAgentListenProviderV1
+ from .agent_v1settings_agent_listen_provider_v2 import AgentV1SettingsAgentListenProviderV2
+ from .agent_v1settings_agent_speak import AgentV1SettingsAgentSpeak
+ from .agent_v1settings_agent_speak_endpoint import AgentV1SettingsAgentSpeakEndpoint
+ from .agent_v1settings_agent_speak_endpoint_endpoint import AgentV1SettingsAgentSpeakEndpointEndpoint
+ from .agent_v1settings_agent_speak_endpoint_provider import (
+ AgentV1SettingsAgentSpeakEndpointProvider,
+ AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly,
+ AgentV1SettingsAgentSpeakEndpointProvider_Cartesia,
+ AgentV1SettingsAgentSpeakEndpointProvider_Deepgram,
+ AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs,
+ AgentV1SettingsAgentSpeakEndpointProvider_OpenAi,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_aws_polly import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPolly,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_cartesia_model_id import (
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_deepgram_model import (
+ AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_eleven_labs_model_id import (
+ AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_open_ai_model import (
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel,
+ )
+ from .agent_v1settings_agent_speak_endpoint_provider_open_ai_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice,
+ )
+ from .agent_v1settings_agent_speak_one_item import AgentV1SettingsAgentSpeakOneItem
+ from .agent_v1settings_agent_speak_one_item_endpoint import AgentV1SettingsAgentSpeakOneItemEndpoint
+ from .agent_v1settings_agent_speak_one_item_provider import (
+ AgentV1SettingsAgentSpeakOneItemProvider,
+ AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly,
+ AgentV1SettingsAgentSpeakOneItemProvider_Cartesia,
+ AgentV1SettingsAgentSpeakOneItemProvider_Deepgram,
+ AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs,
+ AgentV1SettingsAgentSpeakOneItemProvider_OpenAi,
+ )
+ from .agent_v1settings_agent_speak_one_item_provider_aws_polly import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPolly,
+ )
+ from .agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials,
+ )
+ from .agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials_type import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType,
+ )
+ from .agent_v1settings_agent_speak_one_item_provider_aws_polly_engine import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine,
+ )
+ from .agent_v1settings_agent_speak_one_item_provider_aws_polly_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice,
+ )
+ from .agent_v1settings_agent_speak_one_item_provider_cartesia_model_id import (
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId,
+ )
+ from .agent_v1settings_agent_speak_one_item_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice,
+ )
+ from .agent_v1settings_agent_speak_one_item_provider_deepgram_model import (
+ AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel,
+ )
+ from .agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id import (
+ AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId,
+ )
+ from .agent_v1settings_agent_speak_one_item_provider_open_ai_model import (
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel,
+ )
+ from .agent_v1settings_agent_speak_one_item_provider_open_ai_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice,
+ )
+ from .agent_v1settings_agent_think import AgentV1SettingsAgentThink
+ from .agent_v1settings_agent_think_context_length import AgentV1SettingsAgentThinkContextLength
+ from .agent_v1settings_agent_think_endpoint import AgentV1SettingsAgentThinkEndpoint
+ from .agent_v1settings_agent_think_functions_item import AgentV1SettingsAgentThinkFunctionsItem
+ from .agent_v1settings_agent_think_functions_item_endpoint import AgentV1SettingsAgentThinkFunctionsItemEndpoint
+ from .agent_v1settings_agent_think_provider import (
+ AgentV1SettingsAgentThinkProvider,
+ AgentV1SettingsAgentThinkProvider_Anthropic,
+ AgentV1SettingsAgentThinkProvider_AwsBedrock,
+ AgentV1SettingsAgentThinkProvider_Google,
+ AgentV1SettingsAgentThinkProvider_Groq,
+ AgentV1SettingsAgentThinkProvider_OpenAi,
+ )
+ from .agent_v1settings_agent_think_provider_anthropic_model import AgentV1SettingsAgentThinkProviderAnthropicModel
+ from .agent_v1settings_agent_think_provider_aws_bedrock import AgentV1SettingsAgentThinkProviderAwsBedrock
+ from .agent_v1settings_agent_think_provider_aws_bedrock_credentials import (
+ AgentV1SettingsAgentThinkProviderAwsBedrockCredentials,
+ )
+ from .agent_v1settings_agent_think_provider_aws_bedrock_credentials_type import (
+ AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsType,
+ )
+ from .agent_v1settings_agent_think_provider_aws_bedrock_model import (
+ AgentV1SettingsAgentThinkProviderAwsBedrockModel,
+ )
+ from .agent_v1settings_agent_think_provider_google_model import AgentV1SettingsAgentThinkProviderGoogleModel
+ from .agent_v1settings_agent_think_provider_open_ai_model import AgentV1SettingsAgentThinkProviderOpenAiModel
+ from .agent_v1settings_applied import AgentV1SettingsApplied
+ from .agent_v1settings_audio import AgentV1SettingsAudio
+ from .agent_v1settings_audio_input import AgentV1SettingsAudioInput
+ from .agent_v1settings_audio_input_encoding import AgentV1SettingsAudioInputEncoding
+ from .agent_v1settings_audio_output import AgentV1SettingsAudioOutput
+ from .agent_v1settings_audio_output_encoding import AgentV1SettingsAudioOutputEncoding
+ from .agent_v1settings_flags import AgentV1SettingsFlags
+ from .agent_v1speak_updated import AgentV1SpeakUpdated
+ from .agent_v1update_prompt import AgentV1UpdatePrompt
+ from .agent_v1update_speak import AgentV1UpdateSpeak
+ from .agent_v1update_speak_speak import AgentV1UpdateSpeakSpeak
+ from .agent_v1update_speak_speak_endpoint import AgentV1UpdateSpeakSpeakEndpoint
+ from .agent_v1update_speak_speak_provider import (
+ AgentV1UpdateSpeakSpeakProvider,
+ AgentV1UpdateSpeakSpeakProvider_AwsPolly,
+ AgentV1UpdateSpeakSpeakProvider_Cartesia,
+ AgentV1UpdateSpeakSpeakProvider_Deepgram,
+ AgentV1UpdateSpeakSpeakProvider_ElevenLabs,
+ AgentV1UpdateSpeakSpeakProvider_OpenAi,
+ )
+ from .agent_v1update_speak_speak_provider_aws_polly import AgentV1UpdateSpeakSpeakProviderAwsPolly
+ from .agent_v1update_speak_speak_provider_aws_polly_credentials import (
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials,
+ )
+ from .agent_v1update_speak_speak_provider_aws_polly_credentials_type import (
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType,
+ )
+ from .agent_v1update_speak_speak_provider_aws_polly_engine import AgentV1UpdateSpeakSpeakProviderAwsPollyEngine
+ from .agent_v1update_speak_speak_provider_aws_polly_voice import AgentV1UpdateSpeakSpeakProviderAwsPollyVoice
+ from .agent_v1update_speak_speak_provider_cartesia import AgentV1UpdateSpeakSpeakProviderCartesia
+ from .agent_v1update_speak_speak_provider_cartesia_model_id import AgentV1UpdateSpeakSpeakProviderCartesiaModelId
+ from .agent_v1update_speak_speak_provider_cartesia_voice import AgentV1UpdateSpeakSpeakProviderCartesiaVoice
+ from .agent_v1update_speak_speak_provider_deepgram import AgentV1UpdateSpeakSpeakProviderDeepgram
+ from .agent_v1update_speak_speak_provider_deepgram_model import AgentV1UpdateSpeakSpeakProviderDeepgramModel
+ from .agent_v1update_speak_speak_provider_eleven_labs import AgentV1UpdateSpeakSpeakProviderElevenLabs
+ from .agent_v1update_speak_speak_provider_eleven_labs_model_id import (
+ AgentV1UpdateSpeakSpeakProviderElevenLabsModelId,
+ )
+ from .agent_v1update_speak_speak_provider_open_ai import AgentV1UpdateSpeakSpeakProviderOpenAi
+ from .agent_v1update_speak_speak_provider_open_ai_model import AgentV1UpdateSpeakSpeakProviderOpenAiModel
+ from .agent_v1update_speak_speak_provider_open_ai_voice import AgentV1UpdateSpeakSpeakProviderOpenAiVoice
+ from .agent_v1user_started_speaking import AgentV1UserStartedSpeaking
+ from .agent_v1warning import AgentV1Warning
+ from .agent_v1welcome import AgentV1Welcome
+ from .anthropic import Anthropic
+ from .cartesia import Cartesia
+ from .deepgram import Deepgram
+ from .eleven_labs import ElevenLabs
+ from .google import Google
+ from .groq import Groq
+ from .open_ai import OpenAi
+_dynamic_imports: typing.Dict[str, str] = {
+ "AgentV1AgentAudioDone": ".agent_v1agent_audio_done",
+ "AgentV1AgentStartedSpeaking": ".agent_v1agent_started_speaking",
+ "AgentV1AgentThinking": ".agent_v1agent_thinking",
+ "AgentV1ConversationText": ".agent_v1conversation_text",
+ "AgentV1ConversationTextRole": ".agent_v1conversation_text_role",
+ "AgentV1Error": ".agent_v1error",
+ "AgentV1FunctionCallRequest": ".agent_v1function_call_request",
+ "AgentV1FunctionCallRequestFunctionsItem": ".agent_v1function_call_request_functions_item",
+ "AgentV1InjectAgentMessage": ".agent_v1inject_agent_message",
+ "AgentV1InjectUserMessage": ".agent_v1inject_user_message",
+ "AgentV1InjectionRefused": ".agent_v1injection_refused",
+ "AgentV1KeepAlive": ".agent_v1keep_alive",
+ "AgentV1PromptUpdated": ".agent_v1prompt_updated",
+ "AgentV1ReceiveFunctionCallResponse": ".agent_v1receive_function_call_response",
+ "AgentV1SendFunctionCallResponse": ".agent_v1send_function_call_response",
+ "AgentV1Settings": ".agent_v1settings",
+ "AgentV1SettingsAgent": ".agent_v1settings_agent",
+ "AgentV1SettingsAgentContext": ".agent_v1settings_agent_context",
+ "AgentV1SettingsAgentContextMessagesItem": ".agent_v1settings_agent_context_messages_item",
+ "AgentV1SettingsAgentContextMessagesItemContent": ".agent_v1settings_agent_context_messages_item_content",
+ "AgentV1SettingsAgentContextMessagesItemContentRole": ".agent_v1settings_agent_context_messages_item_content_role",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCalls": ".agent_v1settings_agent_context_messages_item_function_calls",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem": ".agent_v1settings_agent_context_messages_item_function_calls_function_calls_item",
+ "AgentV1SettingsAgentListen": ".agent_v1settings_agent_listen",
+ "AgentV1SettingsAgentListenProvider": ".agent_v1settings_agent_listen_provider",
+ "AgentV1SettingsAgentListenProviderV1": ".agent_v1settings_agent_listen_provider_v1",
+ "AgentV1SettingsAgentListenProviderV2": ".agent_v1settings_agent_listen_provider_v2",
+ "AgentV1SettingsAgentListenProvider_V1": ".agent_v1settings_agent_listen_provider",
+ "AgentV1SettingsAgentListenProvider_V2": ".agent_v1settings_agent_listen_provider",
+ "AgentV1SettingsAgentSpeak": ".agent_v1settings_agent_speak",
+ "AgentV1SettingsAgentSpeakEndpoint": ".agent_v1settings_agent_speak_endpoint",
+ "AgentV1SettingsAgentSpeakEndpointEndpoint": ".agent_v1settings_agent_speak_endpoint_endpoint",
+ "AgentV1SettingsAgentSpeakEndpointProvider": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPolly": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId": ".agent_v1settings_agent_speak_endpoint_provider_cartesia_model_id",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice": ".agent_v1settings_agent_speak_endpoint_provider_cartesia_voice",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel": ".agent_v1settings_agent_speak_endpoint_provider_deepgram_model",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId": ".agent_v1settings_agent_speak_endpoint_provider_eleven_labs_model_id",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel": ".agent_v1settings_agent_speak_endpoint_provider_open_ai_model",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice": ".agent_v1settings_agent_speak_endpoint_provider_open_ai_voice",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Cartesia": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Deepgram": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAi": ".agent_v1settings_agent_speak_endpoint_provider",
+ "AgentV1SettingsAgentSpeakOneItem": ".agent_v1settings_agent_speak_one_item",
+ "AgentV1SettingsAgentSpeakOneItemEndpoint": ".agent_v1settings_agent_speak_one_item_endpoint",
+ "AgentV1SettingsAgentSpeakOneItemProvider": ".agent_v1settings_agent_speak_one_item_provider",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPolly": ".agent_v1settings_agent_speak_one_item_provider_aws_polly",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials": ".agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType": ".agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials_type",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine": ".agent_v1settings_agent_speak_one_item_provider_aws_polly_engine",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice": ".agent_v1settings_agent_speak_one_item_provider_aws_polly_voice",
+ "AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId": ".agent_v1settings_agent_speak_one_item_provider_cartesia_model_id",
+ "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice": ".agent_v1settings_agent_speak_one_item_provider_cartesia_voice",
+ "AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel": ".agent_v1settings_agent_speak_one_item_provider_deepgram_model",
+ "AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId": ".agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id",
+ "AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel": ".agent_v1settings_agent_speak_one_item_provider_open_ai_model",
+ "AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice": ".agent_v1settings_agent_speak_one_item_provider_open_ai_voice",
+ "AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly": ".agent_v1settings_agent_speak_one_item_provider",
+ "AgentV1SettingsAgentSpeakOneItemProvider_Cartesia": ".agent_v1settings_agent_speak_one_item_provider",
+ "AgentV1SettingsAgentSpeakOneItemProvider_Deepgram": ".agent_v1settings_agent_speak_one_item_provider",
+ "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs": ".agent_v1settings_agent_speak_one_item_provider",
+ "AgentV1SettingsAgentSpeakOneItemProvider_OpenAi": ".agent_v1settings_agent_speak_one_item_provider",
+ "AgentV1SettingsAgentThink": ".agent_v1settings_agent_think",
+ "AgentV1SettingsAgentThinkContextLength": ".agent_v1settings_agent_think_context_length",
+ "AgentV1SettingsAgentThinkEndpoint": ".agent_v1settings_agent_think_endpoint",
+ "AgentV1SettingsAgentThinkFunctionsItem": ".agent_v1settings_agent_think_functions_item",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpoint": ".agent_v1settings_agent_think_functions_item_endpoint",
+ "AgentV1SettingsAgentThinkProvider": ".agent_v1settings_agent_think_provider",
+ "AgentV1SettingsAgentThinkProviderAnthropicModel": ".agent_v1settings_agent_think_provider_anthropic_model",
+ "AgentV1SettingsAgentThinkProviderAwsBedrock": ".agent_v1settings_agent_think_provider_aws_bedrock",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockCredentials": ".agent_v1settings_agent_think_provider_aws_bedrock_credentials",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsType": ".agent_v1settings_agent_think_provider_aws_bedrock_credentials_type",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockModel": ".agent_v1settings_agent_think_provider_aws_bedrock_model",
+ "AgentV1SettingsAgentThinkProviderGoogleModel": ".agent_v1settings_agent_think_provider_google_model",
+ "AgentV1SettingsAgentThinkProviderOpenAiModel": ".agent_v1settings_agent_think_provider_open_ai_model",
+ "AgentV1SettingsAgentThinkProvider_Anthropic": ".agent_v1settings_agent_think_provider",
+ "AgentV1SettingsAgentThinkProvider_AwsBedrock": ".agent_v1settings_agent_think_provider",
+ "AgentV1SettingsAgentThinkProvider_Google": ".agent_v1settings_agent_think_provider",
+ "AgentV1SettingsAgentThinkProvider_Groq": ".agent_v1settings_agent_think_provider",
+ "AgentV1SettingsAgentThinkProvider_OpenAi": ".agent_v1settings_agent_think_provider",
+ "AgentV1SettingsApplied": ".agent_v1settings_applied",
+ "AgentV1SettingsAudio": ".agent_v1settings_audio",
+ "AgentV1SettingsAudioInput": ".agent_v1settings_audio_input",
+ "AgentV1SettingsAudioInputEncoding": ".agent_v1settings_audio_input_encoding",
+ "AgentV1SettingsAudioOutput": ".agent_v1settings_audio_output",
+ "AgentV1SettingsAudioOutputEncoding": ".agent_v1settings_audio_output_encoding",
+ "AgentV1SettingsFlags": ".agent_v1settings_flags",
+ "AgentV1SpeakUpdated": ".agent_v1speak_updated",
+ "AgentV1UpdatePrompt": ".agent_v1update_prompt",
+ "AgentV1UpdateSpeak": ".agent_v1update_speak",
+ "AgentV1UpdateSpeakSpeak": ".agent_v1update_speak_speak",
+ "AgentV1UpdateSpeakSpeakEndpoint": ".agent_v1update_speak_speak_endpoint",
+ "AgentV1UpdateSpeakSpeakProvider": ".agent_v1update_speak_speak_provider",
+ "AgentV1UpdateSpeakSpeakProviderAwsPolly": ".agent_v1update_speak_speak_provider_aws_polly",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials": ".agent_v1update_speak_speak_provider_aws_polly_credentials",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType": ".agent_v1update_speak_speak_provider_aws_polly_credentials_type",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyEngine": ".agent_v1update_speak_speak_provider_aws_polly_engine",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyVoice": ".agent_v1update_speak_speak_provider_aws_polly_voice",
+ "AgentV1UpdateSpeakSpeakProviderCartesia": ".agent_v1update_speak_speak_provider_cartesia",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaModelId": ".agent_v1update_speak_speak_provider_cartesia_model_id",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoice": ".agent_v1update_speak_speak_provider_cartesia_voice",
+ "AgentV1UpdateSpeakSpeakProviderDeepgram": ".agent_v1update_speak_speak_provider_deepgram",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramModel": ".agent_v1update_speak_speak_provider_deepgram_model",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabs": ".agent_v1update_speak_speak_provider_eleven_labs",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsModelId": ".agent_v1update_speak_speak_provider_eleven_labs_model_id",
+ "AgentV1UpdateSpeakSpeakProviderOpenAi": ".agent_v1update_speak_speak_provider_open_ai",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiModel": ".agent_v1update_speak_speak_provider_open_ai_model",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiVoice": ".agent_v1update_speak_speak_provider_open_ai_voice",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPolly": ".agent_v1update_speak_speak_provider",
+ "AgentV1UpdateSpeakSpeakProvider_Cartesia": ".agent_v1update_speak_speak_provider",
+ "AgentV1UpdateSpeakSpeakProvider_Deepgram": ".agent_v1update_speak_speak_provider",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabs": ".agent_v1update_speak_speak_provider",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAi": ".agent_v1update_speak_speak_provider",
+ "AgentV1UserStartedSpeaking": ".agent_v1user_started_speaking",
+ "AgentV1Warning": ".agent_v1warning",
+ "AgentV1Welcome": ".agent_v1welcome",
+ "Anthropic": ".anthropic",
+ "Cartesia": ".cartesia",
+ "Deepgram": ".deepgram",
+ "ElevenLabs": ".eleven_labs",
+ "Google": ".google",
+ "Groq": ".groq",
+ "OpenAi": ".open_ai",
+}
+
+
+def __getattr__(attr_name: str) -> typing.Any:
+ module_name = _dynamic_imports.get(attr_name)
+ if module_name is None:
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
+ try:
+ module = import_module(module_name, __package__)
+ if module_name == f".{attr_name}":
+ return module
+ else:
+ return getattr(module, attr_name)
+ except ImportError as e:
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
+ except AttributeError as e:
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
+
+
+def __dir__():
+ lazy_attrs = list(_dynamic_imports.keys())
+ return sorted(lazy_attrs)
+
+
+__all__ = [
+ "AgentV1AgentAudioDone",
+ "AgentV1AgentStartedSpeaking",
+ "AgentV1AgentThinking",
+ "AgentV1ConversationText",
+ "AgentV1ConversationTextRole",
+ "AgentV1Error",
+ "AgentV1FunctionCallRequest",
+ "AgentV1FunctionCallRequestFunctionsItem",
+ "AgentV1InjectAgentMessage",
+ "AgentV1InjectUserMessage",
+ "AgentV1InjectionRefused",
+ "AgentV1KeepAlive",
+ "AgentV1PromptUpdated",
+ "AgentV1ReceiveFunctionCallResponse",
+ "AgentV1SendFunctionCallResponse",
+ "AgentV1Settings",
+ "AgentV1SettingsAgent",
+ "AgentV1SettingsAgentContext",
+ "AgentV1SettingsAgentContextMessagesItem",
+ "AgentV1SettingsAgentContextMessagesItemContent",
+ "AgentV1SettingsAgentContextMessagesItemContentRole",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCalls",
+ "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem",
+ "AgentV1SettingsAgentListen",
+ "AgentV1SettingsAgentListenProvider",
+ "AgentV1SettingsAgentListenProviderV1",
+ "AgentV1SettingsAgentListenProviderV2",
+ "AgentV1SettingsAgentListenProvider_V1",
+ "AgentV1SettingsAgentListenProvider_V2",
+ "AgentV1SettingsAgentSpeak",
+ "AgentV1SettingsAgentSpeakEndpoint",
+ "AgentV1SettingsAgentSpeakEndpointEndpoint",
+ "AgentV1SettingsAgentSpeakEndpointProvider",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPolly",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine",
+ "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId",
+ "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice",
+ "AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel",
+ "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel",
+ "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice",
+ "AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Cartesia",
+ "AgentV1SettingsAgentSpeakEndpointProvider_Deepgram",
+ "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs",
+ "AgentV1SettingsAgentSpeakEndpointProvider_OpenAi",
+ "AgentV1SettingsAgentSpeakOneItem",
+ "AgentV1SettingsAgentSpeakOneItemEndpoint",
+ "AgentV1SettingsAgentSpeakOneItemProvider",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPolly",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine",
+ "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice",
+ "AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId",
+ "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice",
+ "AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel",
+ "AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId",
+ "AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel",
+ "AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice",
+ "AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly",
+ "AgentV1SettingsAgentSpeakOneItemProvider_Cartesia",
+ "AgentV1SettingsAgentSpeakOneItemProvider_Deepgram",
+ "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs",
+ "AgentV1SettingsAgentSpeakOneItemProvider_OpenAi",
+ "AgentV1SettingsAgentThink",
+ "AgentV1SettingsAgentThinkContextLength",
+ "AgentV1SettingsAgentThinkEndpoint",
+ "AgentV1SettingsAgentThinkFunctionsItem",
+ "AgentV1SettingsAgentThinkFunctionsItemEndpoint",
+ "AgentV1SettingsAgentThinkProvider",
+ "AgentV1SettingsAgentThinkProviderAnthropicModel",
+ "AgentV1SettingsAgentThinkProviderAwsBedrock",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockCredentials",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsType",
+ "AgentV1SettingsAgentThinkProviderAwsBedrockModel",
+ "AgentV1SettingsAgentThinkProviderGoogleModel",
+ "AgentV1SettingsAgentThinkProviderOpenAiModel",
+ "AgentV1SettingsAgentThinkProvider_Anthropic",
+ "AgentV1SettingsAgentThinkProvider_AwsBedrock",
+ "AgentV1SettingsAgentThinkProvider_Google",
+ "AgentV1SettingsAgentThinkProvider_Groq",
+ "AgentV1SettingsAgentThinkProvider_OpenAi",
+ "AgentV1SettingsApplied",
+ "AgentV1SettingsAudio",
+ "AgentV1SettingsAudioInput",
+ "AgentV1SettingsAudioInputEncoding",
+ "AgentV1SettingsAudioOutput",
+ "AgentV1SettingsAudioOutputEncoding",
+ "AgentV1SettingsFlags",
+ "AgentV1SpeakUpdated",
+ "AgentV1UpdatePrompt",
+ "AgentV1UpdateSpeak",
+ "AgentV1UpdateSpeakSpeak",
+ "AgentV1UpdateSpeakSpeakEndpoint",
+ "AgentV1UpdateSpeakSpeakProvider",
+ "AgentV1UpdateSpeakSpeakProviderAwsPolly",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyEngine",
+ "AgentV1UpdateSpeakSpeakProviderAwsPollyVoice",
+ "AgentV1UpdateSpeakSpeakProviderCartesia",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaModelId",
+ "AgentV1UpdateSpeakSpeakProviderCartesiaVoice",
+ "AgentV1UpdateSpeakSpeakProviderDeepgram",
+ "AgentV1UpdateSpeakSpeakProviderDeepgramModel",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabs",
+ "AgentV1UpdateSpeakSpeakProviderElevenLabsModelId",
+ "AgentV1UpdateSpeakSpeakProviderOpenAi",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiModel",
+ "AgentV1UpdateSpeakSpeakProviderOpenAiVoice",
+ "AgentV1UpdateSpeakSpeakProvider_AwsPolly",
+ "AgentV1UpdateSpeakSpeakProvider_Cartesia",
+ "AgentV1UpdateSpeakSpeakProvider_Deepgram",
+ "AgentV1UpdateSpeakSpeakProvider_ElevenLabs",
+ "AgentV1UpdateSpeakSpeakProvider_OpenAi",
+ "AgentV1UserStartedSpeaking",
+ "AgentV1Warning",
+ "AgentV1Welcome",
+ "Anthropic",
+ "Cartesia",
+ "Deepgram",
+ "ElevenLabs",
+ "Google",
+ "Groq",
+ "OpenAi",
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1agent_audio_done.py b/src/deepgram/agent/v1/types/agent_v1agent_audio_done.py
new file mode 100644
index 00000000..95f3f376
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1agent_audio_done.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1AgentAudioDone(UniversalBaseModel):
+ type: typing.Literal["AgentAudioDone"] = pydantic.Field(default="AgentAudioDone")
+ """
+ Message type identifier indicating the agent has finished sending audio
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1agent_started_speaking.py b/src/deepgram/agent/v1/types/agent_v1agent_started_speaking.py
new file mode 100644
index 00000000..e6c47c6f
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1agent_started_speaking.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1AgentStartedSpeaking(UniversalBaseModel):
+ type: typing.Literal["AgentStartedSpeaking"] = pydantic.Field(default="AgentStartedSpeaking")
+ """
+ Message type identifier for agent started speaking
+ """
+
+ total_latency: float = pydantic.Field()
+ """
+ Seconds from receiving the user's utterance to producing the agent's reply
+ """
+
+ tts_latency: float = pydantic.Field()
+ """
+ The portion of total latency attributable to text-to-speech
+ """
+
+ ttt_latency: float = pydantic.Field()
+ """
+ The portion of total latency attributable to text-to-text (usually an LLM)
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1agent_thinking.py b/src/deepgram/agent/v1/types/agent_v1agent_thinking.py
new file mode 100644
index 00000000..4b63c92f
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1agent_thinking.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1AgentThinking(UniversalBaseModel):
+ type: typing.Literal["AgentThinking"] = pydantic.Field(default="AgentThinking")
+ """
+ Message type identifier for agent thinking
+ """
+
+ content: str = pydantic.Field()
+ """
+ The text of the agent's thought process
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1conversation_text.py b/src/deepgram/agent/v1/types/agent_v1conversation_text.py
new file mode 100644
index 00000000..9888c93b
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1conversation_text.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1conversation_text_role import AgentV1ConversationTextRole
+
+
+class AgentV1ConversationText(UniversalBaseModel):
+ type: typing.Literal["ConversationText"] = pydantic.Field(default="ConversationText")
+ """
+ Message type identifier for conversation text
+ """
+
+ role: AgentV1ConversationTextRole = pydantic.Field()
+ """
+ Identifies who spoke the statement
+ """
+
+ content: str = pydantic.Field()
+ """
+ The actual statement that was spoken
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1conversation_text_role.py b/src/deepgram/agent/v1/types/agent_v1conversation_text_role.py
new file mode 100644
index 00000000..785333dd
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1conversation_text_role.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1ConversationTextRole = typing.Union[typing.Literal["user", "assistant"], typing.Any]
diff --git a/src/deepgram/agent/v1/types/agent_v1error.py b/src/deepgram/agent/v1/types/agent_v1error.py
new file mode 100644
index 00000000..e0c01466
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1error.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1Error(UniversalBaseModel):
+ type: typing.Literal["Error"] = pydantic.Field(default="Error")
+ """
+ Message type identifier for error responses
+ """
+
+ description: str = pydantic.Field()
+ """
+ A description of what went wrong
+ """
+
+ code: str = pydantic.Field()
+ """
+ Error code identifying the type of error
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1function_call_request.py b/src/deepgram/agent/v1/types/agent_v1function_call_request.py
new file mode 100644
index 00000000..4c5e7c4a
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1function_call_request.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1function_call_request_functions_item import AgentV1FunctionCallRequestFunctionsItem
+
+
+class AgentV1FunctionCallRequest(UniversalBaseModel):
+ type: typing.Literal["FunctionCallRequest"] = pydantic.Field(default="FunctionCallRequest")
+ """
+ Message type identifier for function call requests
+ """
+
+ functions: typing.List[AgentV1FunctionCallRequestFunctionsItem] = pydantic.Field()
+ """
+ Array of functions to be called
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1function_call_request_functions_item.py b/src/deepgram/agent/v1/types/agent_v1function_call_request_functions_item.py
new file mode 100644
index 00000000..dcd75dd4
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1function_call_request_functions_item.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1FunctionCallRequestFunctionsItem(UniversalBaseModel):
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the function call
+ """
+
+ name: str = pydantic.Field()
+ """
+ The name of the function to call
+ """
+
+ arguments: str = pydantic.Field()
+ """
+ JSON string containing the function arguments
+ """
+
+ client_side: bool = pydantic.Field()
+ """
+ Whether the function should be executed client-side
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1inject_agent_message.py b/src/deepgram/agent/v1/types/agent_v1inject_agent_message.py
new file mode 100644
index 00000000..6711f6dc
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1inject_agent_message.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1InjectAgentMessage(UniversalBaseModel):
+ type: typing.Literal["InjectAgentMessage"] = pydantic.Field(default="InjectAgentMessage")
+ """
+ Message type identifier for injecting an agent message
+ """
+
+ message: str = pydantic.Field()
+ """
+ The statement that the agent should say
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1inject_user_message.py b/src/deepgram/agent/v1/types/agent_v1inject_user_message.py
new file mode 100644
index 00000000..78a3ebf9
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1inject_user_message.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1InjectUserMessage(UniversalBaseModel):
+ type: typing.Literal["InjectUserMessage"] = pydantic.Field(default="InjectUserMessage")
+ """
+ Message type identifier for injecting a user message
+ """
+
+ content: str = pydantic.Field()
+ """
+ The specific phrase or statement the agent should respond to
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1injection_refused.py b/src/deepgram/agent/v1/types/agent_v1injection_refused.py
new file mode 100644
index 00000000..b185fccc
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1injection_refused.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1InjectionRefused(UniversalBaseModel):
+ type: typing.Literal["InjectionRefused"] = pydantic.Field(default="InjectionRefused")
+ """
+ Message type identifier for injection refused
+ """
+
+ message: str = pydantic.Field()
+ """
+ Details about why the injection was refused
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1keep_alive.py b/src/deepgram/agent/v1/types/agent_v1keep_alive.py
new file mode 100644
index 00000000..49266088
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1keep_alive.py
@@ -0,0 +1,26 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1KeepAlive(UniversalBaseModel):
+ """
+ Send a control message to the agent
+ """
+
+ type: typing.Literal["KeepAlive"] = pydantic.Field(default="KeepAlive")
+ """
+ Message type identifier
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1prompt_updated.py b/src/deepgram/agent/v1/types/agent_v1prompt_updated.py
new file mode 100644
index 00000000..f4827a96
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1prompt_updated.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1PromptUpdated(UniversalBaseModel):
+ type: typing.Literal["PromptUpdated"] = pydantic.Field(default="PromptUpdated")
+ """
+ Message type identifier for prompt update confirmation
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1receive_function_call_response.py b/src/deepgram/agent/v1/types/agent_v1receive_function_call_response.py
new file mode 100644
index 00000000..852aec2b
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1receive_function_call_response.py
@@ -0,0 +1,54 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1ReceiveFunctionCallResponse(UniversalBaseModel):
+ """
+ Function call response message used bidirectionally:
+
+ • **Client → Server**: Response after client executes a function
+ marked as client_side: true
+ • **Server → Client**: Response after server executes a function
+ marked as client_side: false
+
+ The same message structure serves both directions, enabling a unified
+ interface for function call responses regardless of execution location.
+ """
+
+ type: typing.Literal["FunctionCallResponse"] = pydantic.Field(default="FunctionCallResponse")
+ """
+ Message type identifier for function call responses
+ """
+
+ id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The unique identifier for the function call.
+
+ • **Required for client responses**: Should match the id from
+ the corresponding `FunctionCallRequest`
+ • **Optional for server responses**: Server may omit when responding
+ to internal function executions
+ """
+
+ name: str = pydantic.Field()
+ """
+ The name of the function being called
+ """
+
+ content: str = pydantic.Field()
+ """
+ The content or result of the function call
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1send_function_call_response.py b/src/deepgram/agent/v1/types/agent_v1send_function_call_response.py
new file mode 100644
index 00000000..9366862f
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1send_function_call_response.py
@@ -0,0 +1,54 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SendFunctionCallResponse(UniversalBaseModel):
+ """
+ Function call response message used bidirectionally:
+
+ • **Client → Server**: Response after client executes a function
+ marked as client_side: true
+ • **Server → Client**: Response after server executes a function
+ marked as client_side: false
+
+ The same message structure serves both directions, enabling a unified
+ interface for function call responses regardless of execution location.
+ """
+
+ type: typing.Literal["FunctionCallResponse"] = pydantic.Field(default="FunctionCallResponse")
+ """
+ Message type identifier for function call responses
+ """
+
+ id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The unique identifier for the function call.
+
+ • **Required for client responses**: Should match the id from
+ the corresponding `FunctionCallRequest`
+ • **Optional for server responses**: Server may omit when responding
+ to internal function executions
+ """
+
+ name: str = pydantic.Field()
+ """
+ The name of the function being called
+ """
+
+ content: str = pydantic.Field()
+ """
+ The content or result of the function call
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings.py b/src/deepgram/agent/v1/types/agent_v1settings.py
new file mode 100644
index 00000000..201533d3
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings.py
@@ -0,0 +1,40 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent import AgentV1SettingsAgent
+from .agent_v1settings_audio import AgentV1SettingsAudio
+from .agent_v1settings_flags import AgentV1SettingsFlags
+
+
+class AgentV1Settings(UniversalBaseModel):
+ type: typing.Literal["Settings"] = "Settings"
+ tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Tags to associate with the request
+ """
+
+ experimental: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ To enable experimental features
+ """
+
+ flags: typing.Optional[AgentV1SettingsFlags] = None
+ mip_opt_out: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ To opt out of Deepgram Model Improvement Program
+ """
+
+ audio: AgentV1SettingsAudio
+ agent: AgentV1SettingsAgent
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent.py b/src/deepgram/agent/v1/types/agent_v1settings_agent.py
new file mode 100644
index 00000000..b51a0247
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_context import AgentV1SettingsAgentContext
+from .agent_v1settings_agent_listen import AgentV1SettingsAgentListen
+from .agent_v1settings_agent_speak import AgentV1SettingsAgentSpeak
+from .agent_v1settings_agent_think import AgentV1SettingsAgentThink
+
+
+class AgentV1SettingsAgent(UniversalBaseModel):
+ language: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Deprecated. Use `listen.provider.language` and `speak.provider.language` fields instead.
+ """
+
+ context: typing.Optional[AgentV1SettingsAgentContext] = pydantic.Field(default=None)
+ """
+ Conversation context including the history of messages and function calls
+ """
+
+ listen: typing.Optional[AgentV1SettingsAgentListen] = None
+ think: typing.Optional[AgentV1SettingsAgentThink] = None
+ speak: typing.Optional[AgentV1SettingsAgentSpeak] = None
+ greeting: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Optional message that agent will speak at the start
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context.py
new file mode 100644
index 00000000..635f68b5
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_context.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_context_messages_item import AgentV1SettingsAgentContextMessagesItem
+
+
+class AgentV1SettingsAgentContext(UniversalBaseModel):
+ """
+ Conversation context including the history of messages and function calls
+ """
+
+ messages: typing.Optional[typing.List[AgentV1SettingsAgentContextMessagesItem]] = pydantic.Field(default=None)
+ """
+ Conversation history as a list of messages and function calls
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item.py
new file mode 100644
index 00000000..2061fd2d
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from .agent_v1settings_agent_context_messages_item_content import AgentV1SettingsAgentContextMessagesItemContent
+from .agent_v1settings_agent_context_messages_item_function_calls import (
+ AgentV1SettingsAgentContextMessagesItemFunctionCalls,
+)
+
+AgentV1SettingsAgentContextMessagesItem = typing.Union[
+ AgentV1SettingsAgentContextMessagesItemContent, AgentV1SettingsAgentContextMessagesItemFunctionCalls
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content.py
new file mode 100644
index 00000000..9bc207ab
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_context_messages_item_content_role import (
+ AgentV1SettingsAgentContextMessagesItemContentRole,
+)
+
+
+class AgentV1SettingsAgentContextMessagesItemContent(UniversalBaseModel):
+ """
+ Conversation text as part of the conversation history
+ """
+
+ type: typing.Literal["History"] = pydantic.Field(default="History")
+ """
+ Message type identifier for conversation text
+ """
+
+ role: AgentV1SettingsAgentContextMessagesItemContentRole = pydantic.Field()
+ """
+ Identifies who spoke the statement
+ """
+
+ content: str = pydantic.Field()
+ """
+ The actual statement that was spoken
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content_role.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content_role.py
new file mode 100644
index 00000000..19a3bcc0
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content_role.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentContextMessagesItemContentRole = typing.Union[typing.Literal["user", "assistant"], typing.Any]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls.py
new file mode 100644
index 00000000..6759f8b3
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_context_messages_item_function_calls_function_calls_item import (
+ AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem,
+)
+
+
+class AgentV1SettingsAgentContextMessagesItemFunctionCalls(UniversalBaseModel):
+ """
+ Client-side or server-side function call request and response as part of the conversation history
+ """
+
+ type: typing.Literal["History"] = "History"
+ function_calls: typing.List[AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem] = (
+ pydantic.Field()
+ )
+ """
+ List of function call objects
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py
new file mode 100644
index 00000000..9fead900
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem(UniversalBaseModel):
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the function call
+ """
+
+ name: str = pydantic.Field()
+ """
+ Name of the function called
+ """
+
+ client_side: bool = pydantic.Field()
+ """
+ Indicates if the call was client-side or server-side
+ """
+
+ arguments: str = pydantic.Field()
+ """
+ Arguments passed to the function
+ """
+
+ response: str = pydantic.Field()
+ """
+ Response from the function call
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen.py
new file mode 100644
index 00000000..22951f00
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_listen_provider import AgentV1SettingsAgentListenProvider
+
+
+class AgentV1SettingsAgentListen(UniversalBaseModel):
+ provider: typing.Optional[AgentV1SettingsAgentListenProvider] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider.py
new file mode 100644
index 00000000..273b3157
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider.py
@@ -0,0 +1,49 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import typing
+
+import pydantic
+import typing_extensions
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsAgentListenProvider_V1(UniversalBaseModel):
+ version: typing.Literal["v1"] = "v1"
+ type: typing.Literal["deepgram"] = "deepgram"
+ model: typing.Optional[str] = None
+ language: typing.Optional[str] = None
+ keyterms: typing.Optional[typing.List[str]] = None
+ smart_format: typing.Optional[bool] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentListenProvider_V2(UniversalBaseModel):
+ version: typing.Literal["v2"] = "v2"
+ type: typing.Literal["deepgram"] = "deepgram"
+ model: str
+ keyterms: typing.Optional[typing.List[str]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+AgentV1SettingsAgentListenProvider = typing_extensions.Annotated[
+ typing.Union[AgentV1SettingsAgentListenProvider_V1, AgentV1SettingsAgentListenProvider_V2],
+ pydantic.Field(discriminator="version"),
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v1.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v1.py
new file mode 100644
index 00000000..1012454d
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v1.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsAgentListenProviderV1(UniversalBaseModel):
+ type: typing.Literal["deepgram"] = pydantic.Field(default="deepgram")
+ """
+ Provider type for speech-to-text
+ """
+
+ model: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Model to use for speech to text using the V1 API (e.g. Nova-3, Nova-2)
+ """
+
+ language: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Language code to use for speech-to-text. Can be a BCP-47 language tag (e.g. `en`), or `multi` for code-switching transcription
+ """
+
+ keyterms: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Prompt keyterm recognition to improve Keyword Recall Rate
+ """
+
+ smart_format: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Applies smart formatting to improve transcript readability
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v2.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v2.py
new file mode 100644
index 00000000..cf33274f
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v2.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsAgentListenProviderV2(UniversalBaseModel):
+ type: typing.Literal["deepgram"] = pydantic.Field(default="deepgram")
+ """
+ Provider type for speech-to-text
+ """
+
+ model: str = pydantic.Field()
+ """
+ Model to use for speech to text using the V2 API (e.g. flux-general-en)
+ """
+
+ keyterms: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Prompt keyterm recognition to improve Keyword Recall Rate
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak.py
new file mode 100644
index 00000000..0ad6d1c5
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from .agent_v1settings_agent_speak_endpoint import AgentV1SettingsAgentSpeakEndpoint
+from .agent_v1settings_agent_speak_one_item import AgentV1SettingsAgentSpeakOneItem
+
+AgentV1SettingsAgentSpeak = typing.Union[
+ AgentV1SettingsAgentSpeakEndpoint, typing.List[AgentV1SettingsAgentSpeakOneItem]
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint.py
new file mode 100644
index 00000000..e6647bee
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint.py
@@ -0,0 +1,26 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_endpoint_endpoint import AgentV1SettingsAgentSpeakEndpointEndpoint
+from .agent_v1settings_agent_speak_endpoint_provider import AgentV1SettingsAgentSpeakEndpointProvider
+
+
+class AgentV1SettingsAgentSpeakEndpoint(UniversalBaseModel):
+ provider: AgentV1SettingsAgentSpeakEndpointProvider
+ endpoint: typing.Optional[AgentV1SettingsAgentSpeakEndpointEndpoint] = pydantic.Field(default=None)
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_endpoint.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_endpoint.py
new file mode 100644
index 00000000..e4dfb433
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_endpoint.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsAgentSpeakEndpointEndpoint(UniversalBaseModel):
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
+
+ url: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Custom TTS endpoint URL. Cannot contain `output_format` or `model_id` query
+ parameters when the provider is Eleven Labs.
+ """
+
+ headers: typing.Optional[typing.Dict[str, str]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider.py
new file mode 100644
index 00000000..9f9a08ac
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider.py
@@ -0,0 +1,131 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import typing
+
+import pydantic
+import typing_extensions
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice,
+)
+from .agent_v1settings_agent_speak_one_item_provider_cartesia_model_id import (
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId,
+)
+from .agent_v1settings_agent_speak_one_item_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice,
+)
+from .agent_v1settings_agent_speak_one_item_provider_deepgram_model import (
+ AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel,
+)
+from .agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id import (
+ AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId,
+)
+from .agent_v1settings_agent_speak_one_item_provider_open_ai_model import (
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel,
+)
+from .agent_v1settings_agent_speak_one_item_provider_open_ai_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice,
+)
+
+
+class AgentV1SettingsAgentSpeakEndpointProvider_Deepgram(UniversalBaseModel):
+ type: typing.Literal["deepgram"] = "deepgram"
+ version: typing.Optional[typing.Literal["v1"]] = None
+ model: AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs(UniversalBaseModel):
+ type: typing.Literal["eleven_labs"] = "eleven_labs"
+ version: typing.Optional[typing.Literal["v1"]] = None
+ model_id: AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId
+ language: typing.Optional[str] = None
+ language_code: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentSpeakEndpointProvider_Cartesia(UniversalBaseModel):
+ type: typing.Literal["cartesia"] = "cartesia"
+ version: typing.Optional[typing.Literal["2025-03-17"]] = None
+ model_id: AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId
+ voice: AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice
+ language: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentSpeakEndpointProvider_OpenAi(UniversalBaseModel):
+ type: typing.Literal["open_ai"] = "open_ai"
+ version: typing.Optional[typing.Literal["v1"]] = None
+ model: AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel
+ voice: AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly(UniversalBaseModel):
+ type: typing.Literal["aws_polly"] = "aws_polly"
+ voice: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice
+ language: str
+ language_code: typing.Optional[str] = None
+ engine: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine
+ credentials: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+AgentV1SettingsAgentSpeakEndpointProvider = typing_extensions.Annotated[
+ typing.Union[
+ AgentV1SettingsAgentSpeakEndpointProvider_Deepgram,
+ AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs,
+ AgentV1SettingsAgentSpeakEndpointProvider_Cartesia,
+ AgentV1SettingsAgentSpeakEndpointProvider_OpenAi,
+ AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly,
+ ],
+ pydantic.Field(discriminator="type"),
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly.py
new file mode 100644
index 00000000..60126fc6
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly.py
@@ -0,0 +1,44 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine,
+)
+from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice,
+)
+
+
+class AgentV1SettingsAgentSpeakEndpointProviderAwsPolly(UniversalBaseModel):
+ voice: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice = pydantic.Field()
+ """
+ AWS Polly voice name
+ """
+
+ language: str = pydantic.Field()
+ """
+ Language to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the AWS Polly API
+ """
+
+ language_code: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Use the `language` field instead.
+ """
+
+ engine: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine
+ credentials: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials.py
new file mode 100644
index 00000000..3aef30ab
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type import (
+ AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType,
+)
+
+
+class AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials(UniversalBaseModel):
+ type: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType
+ region: str
+ access_key_id: str
+ secret_access_key: str
+ session_token: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Required for STS only
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type.py
new file mode 100644
index 00000000..515f0617
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType = typing.Union[
+ typing.Literal["sts", "iam"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine.py
new file mode 100644
index 00000000..2f182419
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine = typing.Union[
+ typing.Literal["generative", "long-form", "standard", "neural"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice.py
new file mode 100644
index 00000000..0079e7b3
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice = typing.Union[
+ typing.Literal["Matthew", "Joanna", "Amy", "Emma", "Brian", "Arthur", "Aria", "Ayanda"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_model_id.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_model_id.py
new file mode 100644
index 00000000..b81e30b3
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_model_id.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId = typing.Union[
+ typing.Literal["sonic-2", "sonic-multilingual"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_voice.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_voice.py
new file mode 100644
index 00000000..52d23801
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_voice.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice(UniversalBaseModel):
+ mode: str = pydantic.Field()
+ """
+ Cartesia voice mode
+ """
+
+ id: str = pydantic.Field()
+ """
+ Cartesia voice ID
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_deepgram_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_deepgram_model.py
new file mode 100644
index 00000000..161119d8
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_deepgram_model.py
@@ -0,0 +1,72 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel = typing.Union[
+ typing.Literal[
+ "aura-asteria-en",
+ "aura-luna-en",
+ "aura-stella-en",
+ "aura-athena-en",
+ "aura-hera-en",
+ "aura-orion-en",
+ "aura-arcas-en",
+ "aura-perseus-en",
+ "aura-angus-en",
+ "aura-orpheus-en",
+ "aura-helios-en",
+ "aura-zeus-en",
+ "aura-2-amalthea-en",
+ "aura-2-andromeda-en",
+ "aura-2-apollo-en",
+ "aura-2-arcas-en",
+ "aura-2-aries-en",
+ "aura-2-asteria-en",
+ "aura-2-athena-en",
+ "aura-2-atlas-en",
+ "aura-2-aurora-en",
+ "aura-2-callista-en",
+ "aura-2-cora-en",
+ "aura-2-cordelia-en",
+ "aura-2-delia-en",
+ "aura-2-draco-en",
+ "aura-2-electra-en",
+ "aura-2-harmonia-en",
+ "aura-2-helena-en",
+ "aura-2-hera-en",
+ "aura-2-hermes-en",
+ "aura-2-hyperion-en",
+ "aura-2-iris-en",
+ "aura-2-janus-en",
+ "aura-2-juno-en",
+ "aura-2-jupiter-en",
+ "aura-2-luna-en",
+ "aura-2-mars-en",
+ "aura-2-minerva-en",
+ "aura-2-neptune-en",
+ "aura-2-odysseus-en",
+ "aura-2-ophelia-en",
+ "aura-2-orion-en",
+ "aura-2-orpheus-en",
+ "aura-2-pandora-en",
+ "aura-2-phoebe-en",
+ "aura-2-pluto-en",
+ "aura-2-saturn-en",
+ "aura-2-selene-en",
+ "aura-2-thalia-en",
+ "aura-2-theia-en",
+ "aura-2-vesta-en",
+ "aura-2-zeus-en",
+ "aura-2-sirio-es",
+ "aura-2-nestor-es",
+ "aura-2-carina-es",
+ "aura-2-celeste-es",
+ "aura-2-alvaro-es",
+ "aura-2-diana-es",
+ "aura-2-aquila-es",
+ "aura-2-selena-es",
+ "aura-2-estrella-es",
+ "aura-2-javier-es",
+ ],
+ typing.Any,
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_eleven_labs_model_id.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_eleven_labs_model_id.py
new file mode 100644
index 00000000..4ed8c7e8
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_eleven_labs_model_id.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId = typing.Union[
+ typing.Literal["eleven_turbo_v2_5", "eleven_monolingual_v1", "eleven_multilingual_v2"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_model.py
new file mode 100644
index 00000000..f83a1943
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel = typing.Union[typing.Literal["tts-1", "tts-1-hd"], typing.Any]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_voice.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_voice.py
new file mode 100644
index 00000000..0e8a10eb
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_voice.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice = typing.Union[
+ typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item.py
new file mode 100644
index 00000000..5b98ebe9
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item.py
@@ -0,0 +1,26 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_one_item_endpoint import AgentV1SettingsAgentSpeakOneItemEndpoint
+from .agent_v1settings_agent_speak_one_item_provider import AgentV1SettingsAgentSpeakOneItemProvider
+
+
+class AgentV1SettingsAgentSpeakOneItem(UniversalBaseModel):
+ provider: AgentV1SettingsAgentSpeakOneItemProvider
+ endpoint: typing.Optional[AgentV1SettingsAgentSpeakOneItemEndpoint] = pydantic.Field(default=None)
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_endpoint.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_endpoint.py
new file mode 100644
index 00000000..80057ae3
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_endpoint.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsAgentSpeakOneItemEndpoint(UniversalBaseModel):
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
+
+ url: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Custom TTS endpoint URL. Cannot contain `output_format` or `model_id` query
+ parameters when the provider is Eleven Labs.
+ """
+
+ headers: typing.Optional[typing.Dict[str, str]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider.py
new file mode 100644
index 00000000..5a1ae21d
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider.py
@@ -0,0 +1,131 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import typing
+
+import pydantic
+import typing_extensions
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials,
+)
+from .agent_v1settings_agent_speak_one_item_provider_aws_polly_engine import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine,
+)
+from .agent_v1settings_agent_speak_one_item_provider_aws_polly_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice,
+)
+from .agent_v1settings_agent_speak_one_item_provider_cartesia_model_id import (
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId,
+)
+from .agent_v1settings_agent_speak_one_item_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice,
+)
+from .agent_v1settings_agent_speak_one_item_provider_deepgram_model import (
+ AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel,
+)
+from .agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id import (
+ AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId,
+)
+from .agent_v1settings_agent_speak_one_item_provider_open_ai_model import (
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel,
+)
+from .agent_v1settings_agent_speak_one_item_provider_open_ai_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice,
+)
+
+
+class AgentV1SettingsAgentSpeakOneItemProvider_Deepgram(UniversalBaseModel):
+ type: typing.Literal["deepgram"] = "deepgram"
+ version: typing.Optional[typing.Literal["v1"]] = None
+ model: AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs(UniversalBaseModel):
+ type: typing.Literal["eleven_labs"] = "eleven_labs"
+ version: typing.Optional[typing.Literal["v1"]] = None
+ model_id: AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId
+ language: typing.Optional[str] = None
+ language_code: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentSpeakOneItemProvider_Cartesia(UniversalBaseModel):
+ type: typing.Literal["cartesia"] = "cartesia"
+ version: typing.Optional[typing.Literal["2025-03-17"]] = None
+ model_id: AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId
+ voice: AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice
+ language: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentSpeakOneItemProvider_OpenAi(UniversalBaseModel):
+ type: typing.Literal["open_ai"] = "open_ai"
+ version: typing.Optional[typing.Literal["v1"]] = None
+ model: AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel
+ voice: AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly(UniversalBaseModel):
+ type: typing.Literal["aws_polly"] = "aws_polly"
+ voice: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice
+ language: str
+ language_code: typing.Optional[str] = None
+ engine: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine
+ credentials: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+AgentV1SettingsAgentSpeakOneItemProvider = typing_extensions.Annotated[
+ typing.Union[
+ AgentV1SettingsAgentSpeakOneItemProvider_Deepgram,
+ AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs,
+ AgentV1SettingsAgentSpeakOneItemProvider_Cartesia,
+ AgentV1SettingsAgentSpeakOneItemProvider_OpenAi,
+ AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly,
+ ],
+ pydantic.Field(discriminator="type"),
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly.py
new file mode 100644
index 00000000..b1745b2a
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly.py
@@ -0,0 +1,44 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials,
+)
+from .agent_v1settings_agent_speak_one_item_provider_aws_polly_engine import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine,
+)
+from .agent_v1settings_agent_speak_one_item_provider_aws_polly_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice,
+)
+
+
+class AgentV1SettingsAgentSpeakOneItemProviderAwsPolly(UniversalBaseModel):
+ voice: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice = pydantic.Field()
+ """
+ AWS Polly voice name
+ """
+
+ language: str = pydantic.Field()
+ """
+ Language code to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the AWS Polly API
+ """
+
+ language_code: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Use the `language` field instead.
+ """
+
+ engine: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine
+ credentials: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials.py
new file mode 100644
index 00000000..a2ff5808
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials_type import (
+ AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType,
+)
+
+
+class AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials(UniversalBaseModel):
+ type: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType
+ region: str
+ access_key_id: str
+ secret_access_key: str
+ session_token: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Required for STS only
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials_type.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials_type.py
new file mode 100644
index 00000000..3cd1c64f
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType = typing.Union[typing.Literal["sts", "iam"], typing.Any]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_engine.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_engine.py
new file mode 100644
index 00000000..7313bd82
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_engine.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine = typing.Union[
+ typing.Literal["generative", "long-form", "standard", "neural"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_voice.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_voice.py
new file mode 100644
index 00000000..ad77ee0f
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_voice.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice = typing.Union[
+ typing.Literal["Matthew", "Joanna", "Amy", "Emma", "Brian", "Arthur", "Aria", "Ayanda"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_cartesia_model_id.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_cartesia_model_id.py
new file mode 100644
index 00000000..8d062938
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_cartesia_model_id.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId = typing.Union[
+ typing.Literal["sonic-2", "sonic-multilingual"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_cartesia_voice.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_cartesia_voice.py
new file mode 100644
index 00000000..58197431
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_cartesia_voice.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice(UniversalBaseModel):
+ mode: str = pydantic.Field()
+ """
+ Cartesia voice mode
+ """
+
+ id: str = pydantic.Field()
+ """
+ Cartesia voice ID
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_deepgram_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_deepgram_model.py
new file mode 100644
index 00000000..2c896d8d
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_deepgram_model.py
@@ -0,0 +1,72 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel = typing.Union[
+ typing.Literal[
+ "aura-asteria-en",
+ "aura-luna-en",
+ "aura-stella-en",
+ "aura-athena-en",
+ "aura-hera-en",
+ "aura-orion-en",
+ "aura-arcas-en",
+ "aura-perseus-en",
+ "aura-angus-en",
+ "aura-orpheus-en",
+ "aura-helios-en",
+ "aura-zeus-en",
+ "aura-2-amalthea-en",
+ "aura-2-andromeda-en",
+ "aura-2-apollo-en",
+ "aura-2-arcas-en",
+ "aura-2-aries-en",
+ "aura-2-asteria-en",
+ "aura-2-athena-en",
+ "aura-2-atlas-en",
+ "aura-2-aurora-en",
+ "aura-2-callista-en",
+ "aura-2-cora-en",
+ "aura-2-cordelia-en",
+ "aura-2-delia-en",
+ "aura-2-draco-en",
+ "aura-2-electra-en",
+ "aura-2-harmonia-en",
+ "aura-2-helena-en",
+ "aura-2-hera-en",
+ "aura-2-hermes-en",
+ "aura-2-hyperion-en",
+ "aura-2-iris-en",
+ "aura-2-janus-en",
+ "aura-2-juno-en",
+ "aura-2-jupiter-en",
+ "aura-2-luna-en",
+ "aura-2-mars-en",
+ "aura-2-minerva-en",
+ "aura-2-neptune-en",
+ "aura-2-odysseus-en",
+ "aura-2-ophelia-en",
+ "aura-2-orion-en",
+ "aura-2-orpheus-en",
+ "aura-2-pandora-en",
+ "aura-2-phoebe-en",
+ "aura-2-pluto-en",
+ "aura-2-saturn-en",
+ "aura-2-selene-en",
+ "aura-2-thalia-en",
+ "aura-2-theia-en",
+ "aura-2-vesta-en",
+ "aura-2-zeus-en",
+ "aura-2-sirio-es",
+ "aura-2-nestor-es",
+ "aura-2-carina-es",
+ "aura-2-celeste-es",
+ "aura-2-alvaro-es",
+ "aura-2-diana-es",
+ "aura-2-aquila-es",
+ "aura-2-selena-es",
+ "aura-2-estrella-es",
+ "aura-2-javier-es",
+ ],
+ typing.Any,
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id.py
new file mode 100644
index 00000000..f2bbee02
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId = typing.Union[
+ typing.Literal["eleven_turbo_v2_5", "eleven_monolingual_v1", "eleven_multilingual_v2"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai_model.py
new file mode 100644
index 00000000..21ea5697
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel = typing.Union[typing.Literal["tts-1", "tts-1-hd"], typing.Any]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai_voice.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai_voice.py
new file mode 100644
index 00000000..7e4bc122
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai_voice.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice = typing.Union[
+ typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think.py
new file mode 100644
index 00000000..7247b0ea
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think.py
@@ -0,0 +1,34 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_think_context_length import AgentV1SettingsAgentThinkContextLength
+from .agent_v1settings_agent_think_endpoint import AgentV1SettingsAgentThinkEndpoint
+from .agent_v1settings_agent_think_functions_item import AgentV1SettingsAgentThinkFunctionsItem
+from .agent_v1settings_agent_think_provider import AgentV1SettingsAgentThinkProvider
+
+
+class AgentV1SettingsAgentThink(UniversalBaseModel):
+ provider: AgentV1SettingsAgentThinkProvider
+ endpoint: typing.Optional[AgentV1SettingsAgentThinkEndpoint] = pydantic.Field(default=None)
+ """
+ Optional for non-Deepgram LLM providers. When present, must include url field and headers object
+ """
+
+ functions: typing.Optional[typing.List[AgentV1SettingsAgentThinkFunctionsItem]] = None
+ prompt: typing.Optional[str] = None
+ context_length: typing.Optional[AgentV1SettingsAgentThinkContextLength] = pydantic.Field(default=None)
+ """
+ Specifies the number of characters retained in context between user messages, agent responses, and function calls. This setting is only configurable when a custom think endpoint is used
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_context_length.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_context_length.py
new file mode 100644
index 00000000..daac7703
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_context_length.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentThinkContextLength = typing.Union[typing.Literal["max"], float]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_endpoint.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_endpoint.py
new file mode 100644
index 00000000..1e17900b
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_endpoint.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsAgentThinkEndpoint(UniversalBaseModel):
+ """
+ Optional for non-Deepgram LLM providers. When present, must include url field and headers object
+ """
+
+ url: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Custom LLM endpoint URL
+ """
+
+ headers: typing.Optional[typing.Dict[str, str]] = pydantic.Field(default=None)
+ """
+ Custom headers for the endpoint
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_functions_item.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_functions_item.py
new file mode 100644
index 00000000..05a77bba
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_functions_item.py
@@ -0,0 +1,38 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_think_functions_item_endpoint import AgentV1SettingsAgentThinkFunctionsItemEndpoint
+
+
+class AgentV1SettingsAgentThinkFunctionsItem(UniversalBaseModel):
+ name: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Function name
+ """
+
+ description: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Function description
+ """
+
+ parameters: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
+ """
+ Function parameters
+ """
+
+ endpoint: typing.Optional[AgentV1SettingsAgentThinkFunctionsItemEndpoint] = pydantic.Field(default=None)
+ """
+ The Function endpoint to call. if not passed, function is called client-side
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_functions_item_endpoint.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_functions_item_endpoint.py
new file mode 100644
index 00000000..e8e48f39
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_functions_item_endpoint.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsAgentThinkFunctionsItemEndpoint(UniversalBaseModel):
+ """
+ The Function endpoint to call. if not passed, function is called client-side
+ """
+
+ url: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Endpoint URL
+ """
+
+ method: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ HTTP method
+ """
+
+ headers: typing.Optional[typing.Dict[str, str]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider.py
new file mode 100644
index 00000000..44fe4418
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider.py
@@ -0,0 +1,113 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import typing
+
+import pydantic
+import typing_extensions
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_one_item_provider_open_ai_model import (
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel,
+)
+from .agent_v1settings_agent_speak_one_item_provider_open_ai_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice,
+)
+from .agent_v1settings_agent_think_provider_anthropic_model import AgentV1SettingsAgentThinkProviderAnthropicModel
+from .agent_v1settings_agent_think_provider_aws_bedrock_credentials import (
+ AgentV1SettingsAgentThinkProviderAwsBedrockCredentials,
+)
+from .agent_v1settings_agent_think_provider_aws_bedrock_model import AgentV1SettingsAgentThinkProviderAwsBedrockModel
+from .agent_v1settings_agent_think_provider_google_model import AgentV1SettingsAgentThinkProviderGoogleModel
+
+
+class AgentV1SettingsAgentThinkProvider_OpenAi(UniversalBaseModel):
+ type: typing.Literal["open_ai"] = "open_ai"
+ version: typing.Optional[typing.Literal["v1"]] = None
+ model: AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel
+ voice: AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentThinkProvider_AwsBedrock(UniversalBaseModel):
+ type: typing.Literal["aws_bedrock"] = "aws_bedrock"
+ model: AgentV1SettingsAgentThinkProviderAwsBedrockModel
+ temperature: typing.Optional[float] = None
+ credentials: typing.Optional[AgentV1SettingsAgentThinkProviderAwsBedrockCredentials] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentThinkProvider_Anthropic(UniversalBaseModel):
+ type: typing.Literal["anthropic"] = "anthropic"
+ version: typing.Optional[typing.Literal["v1"]] = None
+ model: AgentV1SettingsAgentThinkProviderAnthropicModel
+ temperature: typing.Optional[float] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentThinkProvider_Google(UniversalBaseModel):
+ type: typing.Literal["google"] = "google"
+ version: typing.Optional[typing.Literal["v1beta"]] = None
+ model: AgentV1SettingsAgentThinkProviderGoogleModel
+ temperature: typing.Optional[float] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1SettingsAgentThinkProvider_Groq(UniversalBaseModel):
+ type: typing.Literal["groq"] = "groq"
+ version: typing.Optional[typing.Literal["v1"]] = None
+ model: typing.Literal["openai/gpt-oss-20b"] = "openai/gpt-oss-20b"
+ temperature: typing.Optional[float] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+AgentV1SettingsAgentThinkProvider = typing_extensions.Annotated[
+ typing.Union[
+ AgentV1SettingsAgentThinkProvider_OpenAi,
+ AgentV1SettingsAgentThinkProvider_AwsBedrock,
+ AgentV1SettingsAgentThinkProvider_Anthropic,
+ AgentV1SettingsAgentThinkProvider_Google,
+ AgentV1SettingsAgentThinkProvider_Groq,
+ ],
+ pydantic.Field(discriminator="type"),
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_anthropic_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_anthropic_model.py
new file mode 100644
index 00000000..5fdf5752
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_anthropic_model.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentThinkProviderAnthropicModel = typing.Union[
+ typing.Literal["claude-3-5-haiku-latest", "claude-sonnet-4-20250514"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_aws_bedrock.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_aws_bedrock.py
new file mode 100644
index 00000000..d83d33cc
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_aws_bedrock.py
@@ -0,0 +1,36 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_think_provider_aws_bedrock_credentials import (
+ AgentV1SettingsAgentThinkProviderAwsBedrockCredentials,
+)
+from .agent_v1settings_agent_think_provider_aws_bedrock_model import AgentV1SettingsAgentThinkProviderAwsBedrockModel
+
+
+class AgentV1SettingsAgentThinkProviderAwsBedrock(UniversalBaseModel):
+ model: AgentV1SettingsAgentThinkProviderAwsBedrockModel = pydantic.Field()
+ """
+ AWS Bedrock model to use
+ """
+
+ temperature: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ AWS Bedrock temperature (0-2)
+ """
+
+ credentials: typing.Optional[AgentV1SettingsAgentThinkProviderAwsBedrockCredentials] = pydantic.Field(default=None)
+ """
+ AWS credentials type (STS short-lived or IAM long-lived)
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_aws_bedrock_credentials.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_aws_bedrock_credentials.py
new file mode 100644
index 00000000..a990a383
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_aws_bedrock_credentials.py
@@ -0,0 +1,49 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_think_provider_aws_bedrock_credentials_type import (
+ AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsType,
+)
+
+
+class AgentV1SettingsAgentThinkProviderAwsBedrockCredentials(UniversalBaseModel):
+ """
+ AWS credentials type (STS short-lived or IAM long-lived)
+ """
+
+ type: typing.Optional[AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsType] = pydantic.Field(default=None)
+ """
+ AWS credentials type (STS short-lived or IAM long-lived)
+ """
+
+ region: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ AWS region
+ """
+
+ access_key_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ AWS access key
+ """
+
+ secret_access_key: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ AWS secret access key
+ """
+
+ session_token: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ AWS session token (required for STS only)
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_aws_bedrock_credentials_type.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_aws_bedrock_credentials_type.py
new file mode 100644
index 00000000..4e684119
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_aws_bedrock_credentials_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentThinkProviderAwsBedrockCredentialsType = typing.Union[typing.Literal["sts", "iam"], typing.Any]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_aws_bedrock_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_aws_bedrock_model.py
new file mode 100644
index 00000000..6f65754b
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_aws_bedrock_model.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentThinkProviderAwsBedrockModel = typing.Union[
+ typing.Literal["anthropic/claude-3-5-sonnet-20240620-v1:0", "anthropic/claude-3-5-haiku-20240307-v1:0"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_google_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_google_model.py
new file mode 100644
index 00000000..5a73f7cb
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_google_model.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentThinkProviderGoogleModel = typing.Union[
+ typing.Literal["gemini-2.0-flash", "gemini-2.0-flash-lite", "gemini-2.5-flash"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_open_ai_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_open_ai_model.py
new file mode 100644
index 00000000..d124ae22
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_provider_open_ai_model.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAgentThinkProviderOpenAiModel = typing.Union[
+ typing.Literal[
+ "gpt-5", "gpt-5-mini", "gpt-5-nano", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", "gpt-4o", "gpt-4o-mini"
+ ],
+ typing.Any,
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_applied.py b/src/deepgram/agent/v1/types/agent_v1settings_applied.py
new file mode 100644
index 00000000..a17ad602
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_applied.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsApplied(UniversalBaseModel):
+ type: typing.Literal["SettingsApplied"] = pydantic.Field(default="SettingsApplied")
+ """
+ Message type identifier for settings applied confirmation
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_audio.py b/src/deepgram/agent/v1/types/agent_v1settings_audio.py
new file mode 100644
index 00000000..29350538
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_audio.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_audio_input import AgentV1SettingsAudioInput
+from .agent_v1settings_audio_output import AgentV1SettingsAudioOutput
+
+
+class AgentV1SettingsAudio(UniversalBaseModel):
+ input: typing.Optional[AgentV1SettingsAudioInput] = pydantic.Field(default=None)
+ """
+ Audio input configuration settings. If omitted, defaults to encoding=linear16 and sample_rate=24000. Higher sample rates like 44100 Hz provide better audio quality.
+ """
+
+ output: typing.Optional[AgentV1SettingsAudioOutput] = pydantic.Field(default=None)
+ """
+ Audio output configuration settings
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_audio_input.py b/src/deepgram/agent/v1/types/agent_v1settings_audio_input.py
new file mode 100644
index 00000000..8b9cae76
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_audio_input.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_audio_input_encoding import AgentV1SettingsAudioInputEncoding
+
+
+class AgentV1SettingsAudioInput(UniversalBaseModel):
+ """
+ Audio input configuration settings. If omitted, defaults to encoding=linear16 and sample_rate=24000. Higher sample rates like 44100 Hz provide better audio quality.
+ """
+
+ encoding: AgentV1SettingsAudioInputEncoding = pydantic.Field()
+ """
+ Audio encoding format
+ """
+
+ sample_rate: float = pydantic.Field()
+ """
+ Sample rate in Hz. Common values are 16000, 24000, 44100, 48000
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_audio_input_encoding.py b/src/deepgram/agent/v1/types/agent_v1settings_audio_input_encoding.py
new file mode 100644
index 00000000..232072d3
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_audio_input_encoding.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAudioInputEncoding = typing.Union[
+ typing.Literal[
+ "linear16", "linear32", "flac", "alaw", "mulaw", "amr-nb", "amr-wb", "opus", "ogg-opus", "speex", "g729"
+ ],
+ typing.Any,
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_audio_output.py b/src/deepgram/agent/v1/types/agent_v1settings_audio_output.py
new file mode 100644
index 00000000..e0c0efc8
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_audio_output.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_audio_output_encoding import AgentV1SettingsAudioOutputEncoding
+
+
+class AgentV1SettingsAudioOutput(UniversalBaseModel):
+ """
+ Audio output configuration settings
+ """
+
+ encoding: typing.Optional[AgentV1SettingsAudioOutputEncoding] = pydantic.Field(default=None)
+ """
+ Audio encoding format for streaming TTS output
+ """
+
+ sample_rate: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Sample rate in Hz
+ """
+
+ bitrate: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Audio bitrate in bits per second
+ """
+
+ container: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Audio container format. If omitted, defaults to 'none'
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_audio_output_encoding.py b/src/deepgram/agent/v1/types/agent_v1settings_audio_output_encoding.py
new file mode 100644
index 00000000..f4b7ca42
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_audio_output_encoding.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1SettingsAudioOutputEncoding = typing.Union[typing.Literal["linear16", "mulaw", "alaw"], typing.Any]
diff --git a/src/deepgram/agent/v1/types/agent_v1settings_flags.py b/src/deepgram/agent/v1/types/agent_v1settings_flags.py
new file mode 100644
index 00000000..db3e96cd
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1settings_flags.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SettingsFlags(UniversalBaseModel):
+ history: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Enable or disable history message reporting
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1speak_updated.py b/src/deepgram/agent/v1/types/agent_v1speak_updated.py
new file mode 100644
index 00000000..aeba09d8
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1speak_updated.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1SpeakUpdated(UniversalBaseModel):
+ type: typing.Literal["SpeakUpdated"] = pydantic.Field(default="SpeakUpdated")
+ """
+ Message type identifier for speak update confirmation
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_prompt.py b/src/deepgram/agent/v1/types/agent_v1update_prompt.py
new file mode 100644
index 00000000..a479b01b
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_prompt.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1UpdatePrompt(UniversalBaseModel):
+ type: typing.Literal["UpdatePrompt"] = pydantic.Field(default="UpdatePrompt")
+ """
+ Message type identifier for prompt update request
+ """
+
+ prompt: str = pydantic.Field()
+ """
+ The new system prompt to be used by the agent
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak.py b/src/deepgram/agent/v1/types/agent_v1update_speak.py
new file mode 100644
index 00000000..f776f945
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1update_speak_speak import AgentV1UpdateSpeakSpeak
+
+
+class AgentV1UpdateSpeak(UniversalBaseModel):
+ type: typing.Literal["UpdateSpeak"] = pydantic.Field(default="UpdateSpeak")
+ """
+ Message type identifier for updating the speak model
+ """
+
+ speak: AgentV1UpdateSpeakSpeak = pydantic.Field()
+ """
+ Configuration for the speak model. Optional, defaults to latest deepgram TTS model
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak.py
new file mode 100644
index 00000000..3ad7b53a
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1update_speak_speak_endpoint import AgentV1UpdateSpeakSpeakEndpoint
+from .agent_v1update_speak_speak_provider import AgentV1UpdateSpeakSpeakProvider
+
+
+class AgentV1UpdateSpeakSpeak(UniversalBaseModel):
+ """
+ Configuration for the speak model. Optional, defaults to latest deepgram TTS model
+ """
+
+ provider: AgentV1UpdateSpeakSpeakProvider
+ endpoint: typing.Optional[AgentV1UpdateSpeakSpeakEndpoint] = pydantic.Field(default=None)
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint.py
new file mode 100644
index 00000000..2c65c454
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1UpdateSpeakSpeakEndpoint(UniversalBaseModel):
+ """
+ Optional if provider is Deepgram. Required for non-Deepgram TTS providers.
+ When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs.
+ """
+
+ url: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Custom TTS endpoint URL. Cannot contain `output_format` or `model_id` query
+ parameters when the provider is Eleven Labs.
+ """
+
+ headers: typing.Optional[typing.Dict[str, str]] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider.py
new file mode 100644
index 00000000..606ae9ce
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider.py
@@ -0,0 +1,115 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+
+import typing
+
+import pydantic
+import typing_extensions
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1update_speak_speak_provider_aws_polly_credentials import (
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials,
+)
+from .agent_v1update_speak_speak_provider_aws_polly_engine import AgentV1UpdateSpeakSpeakProviderAwsPollyEngine
+from .agent_v1update_speak_speak_provider_aws_polly_voice import AgentV1UpdateSpeakSpeakProviderAwsPollyVoice
+from .agent_v1update_speak_speak_provider_cartesia_model_id import AgentV1UpdateSpeakSpeakProviderCartesiaModelId
+from .agent_v1update_speak_speak_provider_cartesia_voice import AgentV1UpdateSpeakSpeakProviderCartesiaVoice
+from .agent_v1update_speak_speak_provider_deepgram_model import AgentV1UpdateSpeakSpeakProviderDeepgramModel
+from .agent_v1update_speak_speak_provider_eleven_labs_model_id import AgentV1UpdateSpeakSpeakProviderElevenLabsModelId
+from .agent_v1update_speak_speak_provider_open_ai_model import AgentV1UpdateSpeakSpeakProviderOpenAiModel
+from .agent_v1update_speak_speak_provider_open_ai_voice import AgentV1UpdateSpeakSpeakProviderOpenAiVoice
+
+
+class AgentV1UpdateSpeakSpeakProvider_Deepgram(UniversalBaseModel):
+ type: typing.Literal["deepgram"] = "deepgram"
+ version: typing.Optional[typing.Literal["v1"]] = None
+ model: AgentV1UpdateSpeakSpeakProviderDeepgramModel
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1UpdateSpeakSpeakProvider_ElevenLabs(UniversalBaseModel):
+ type: typing.Literal["eleven_labs"] = "eleven_labs"
+ version: typing.Optional[typing.Literal["v1"]] = None
+ model_id: AgentV1UpdateSpeakSpeakProviderElevenLabsModelId
+ language: typing.Optional[str] = None
+ language_code: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1UpdateSpeakSpeakProvider_Cartesia(UniversalBaseModel):
+ type: typing.Literal["cartesia"] = "cartesia"
+ version: typing.Optional[typing.Literal["2025-03-17"]] = None
+ model_id: AgentV1UpdateSpeakSpeakProviderCartesiaModelId
+ voice: AgentV1UpdateSpeakSpeakProviderCartesiaVoice
+ language: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1UpdateSpeakSpeakProvider_OpenAi(UniversalBaseModel):
+ type: typing.Literal["open_ai"] = "open_ai"
+ version: typing.Optional[typing.Literal["v1"]] = None
+ model: AgentV1UpdateSpeakSpeakProviderOpenAiModel
+ voice: AgentV1UpdateSpeakSpeakProviderOpenAiVoice
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+class AgentV1UpdateSpeakSpeakProvider_AwsPolly(UniversalBaseModel):
+ type: typing.Literal["aws_polly"] = "aws_polly"
+ voice: AgentV1UpdateSpeakSpeakProviderAwsPollyVoice
+ language: str
+ language_code: typing.Optional[str] = None
+ engine: AgentV1UpdateSpeakSpeakProviderAwsPollyEngine
+ credentials: AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+AgentV1UpdateSpeakSpeakProvider = typing_extensions.Annotated[
+ typing.Union[
+ AgentV1UpdateSpeakSpeakProvider_Deepgram,
+ AgentV1UpdateSpeakSpeakProvider_ElevenLabs,
+ AgentV1UpdateSpeakSpeakProvider_Cartesia,
+ AgentV1UpdateSpeakSpeakProvider_OpenAi,
+ AgentV1UpdateSpeakSpeakProvider_AwsPolly,
+ ],
+ pydantic.Field(discriminator="type"),
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly.py
new file mode 100644
index 00000000..73970576
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly.py
@@ -0,0 +1,40 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1update_speak_speak_provider_aws_polly_credentials import (
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials,
+)
+from .agent_v1update_speak_speak_provider_aws_polly_engine import AgentV1UpdateSpeakSpeakProviderAwsPollyEngine
+from .agent_v1update_speak_speak_provider_aws_polly_voice import AgentV1UpdateSpeakSpeakProviderAwsPollyVoice
+
+
+class AgentV1UpdateSpeakSpeakProviderAwsPolly(UniversalBaseModel):
+ voice: AgentV1UpdateSpeakSpeakProviderAwsPollyVoice = pydantic.Field()
+ """
+ AWS Polly voice name
+ """
+
+ language: str = pydantic.Field()
+ """
+ Language code to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the AWS Polly API
+ """
+
+ language_code: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Use the `language` field instead.
+ """
+
+ engine: AgentV1UpdateSpeakSpeakProviderAwsPollyEngine
+ credentials: AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_credentials.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_credentials.py
new file mode 100644
index 00000000..0ec682b2
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_credentials.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1update_speak_speak_provider_aws_polly_credentials_type import (
+ AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType,
+)
+
+
+class AgentV1UpdateSpeakSpeakProviderAwsPollyCredentials(UniversalBaseModel):
+ type: AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType
+ region: str
+ access_key_id: str
+ secret_access_key: str
+ session_token: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Required for STS only
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_credentials_type.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_credentials_type.py
new file mode 100644
index 00000000..984051d8
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_credentials_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1UpdateSpeakSpeakProviderAwsPollyCredentialsType = typing.Union[typing.Literal["sts", "iam"], typing.Any]
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_engine.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_engine.py
new file mode 100644
index 00000000..2a641f24
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_engine.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1UpdateSpeakSpeakProviderAwsPollyEngine = typing.Union[
+ typing.Literal["generative", "long-form", "standard", "neural"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_voice.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_voice.py
new file mode 100644
index 00000000..2be92987
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_aws_polly_voice.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1UpdateSpeakSpeakProviderAwsPollyVoice = typing.Union[
+ typing.Literal["Matthew", "Joanna", "Amy", "Emma", "Brian", "Arthur", "Aria", "Ayanda"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_cartesia.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_cartesia.py
new file mode 100644
index 00000000..c52aa44f
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_cartesia.py
@@ -0,0 +1,35 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1update_speak_speak_provider_cartesia_model_id import AgentV1UpdateSpeakSpeakProviderCartesiaModelId
+from .agent_v1update_speak_speak_provider_cartesia_voice import AgentV1UpdateSpeakSpeakProviderCartesiaVoice
+
+
+class AgentV1UpdateSpeakSpeakProviderCartesia(UniversalBaseModel):
+ version: typing.Optional[typing.Literal["2025-03-17"]] = pydantic.Field(default=None)
+ """
+ The API version header for the Cartesia text-to-speech API
+ """
+
+ model_id: AgentV1UpdateSpeakSpeakProviderCartesiaModelId = pydantic.Field()
+ """
+ Cartesia model ID
+ """
+
+ voice: AgentV1UpdateSpeakSpeakProviderCartesiaVoice
+ language: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Cartesia language code
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_cartesia_model_id.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_cartesia_model_id.py
new file mode 100644
index 00000000..0cee24cd
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_cartesia_model_id.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1UpdateSpeakSpeakProviderCartesiaModelId = typing.Union[
+ typing.Literal["sonic-2", "sonic-multilingual"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_cartesia_voice.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_cartesia_voice.py
new file mode 100644
index 00000000..2a6a918f
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_cartesia_voice.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1UpdateSpeakSpeakProviderCartesiaVoice(UniversalBaseModel):
+ mode: str = pydantic.Field()
+ """
+ Cartesia voice mode
+ """
+
+ id: str = pydantic.Field()
+ """
+ Cartesia voice ID
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_deepgram.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_deepgram.py
new file mode 100644
index 00000000..c6648566
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_deepgram.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1update_speak_speak_provider_deepgram_model import AgentV1UpdateSpeakSpeakProviderDeepgramModel
+
+
+class AgentV1UpdateSpeakSpeakProviderDeepgram(UniversalBaseModel):
+ version: typing.Optional[typing.Literal["v1"]] = pydantic.Field(default=None)
+ """
+ The REST API version for the Deepgram text-to-speech API
+ """
+
+ model: AgentV1UpdateSpeakSpeakProviderDeepgramModel = pydantic.Field()
+ """
+ Deepgram TTS model
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_deepgram_model.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_deepgram_model.py
new file mode 100644
index 00000000..2e0a9ab9
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_deepgram_model.py
@@ -0,0 +1,72 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1UpdateSpeakSpeakProviderDeepgramModel = typing.Union[
+ typing.Literal[
+ "aura-asteria-en",
+ "aura-luna-en",
+ "aura-stella-en",
+ "aura-athena-en",
+ "aura-hera-en",
+ "aura-orion-en",
+ "aura-arcas-en",
+ "aura-perseus-en",
+ "aura-angus-en",
+ "aura-orpheus-en",
+ "aura-helios-en",
+ "aura-zeus-en",
+ "aura-2-amalthea-en",
+ "aura-2-andromeda-en",
+ "aura-2-apollo-en",
+ "aura-2-arcas-en",
+ "aura-2-aries-en",
+ "aura-2-asteria-en",
+ "aura-2-athena-en",
+ "aura-2-atlas-en",
+ "aura-2-aurora-en",
+ "aura-2-callista-en",
+ "aura-2-cora-en",
+ "aura-2-cordelia-en",
+ "aura-2-delia-en",
+ "aura-2-draco-en",
+ "aura-2-electra-en",
+ "aura-2-harmonia-en",
+ "aura-2-helena-en",
+ "aura-2-hera-en",
+ "aura-2-hermes-en",
+ "aura-2-hyperion-en",
+ "aura-2-iris-en",
+ "aura-2-janus-en",
+ "aura-2-juno-en",
+ "aura-2-jupiter-en",
+ "aura-2-luna-en",
+ "aura-2-mars-en",
+ "aura-2-minerva-en",
+ "aura-2-neptune-en",
+ "aura-2-odysseus-en",
+ "aura-2-ophelia-en",
+ "aura-2-orion-en",
+ "aura-2-orpheus-en",
+ "aura-2-pandora-en",
+ "aura-2-phoebe-en",
+ "aura-2-pluto-en",
+ "aura-2-saturn-en",
+ "aura-2-selene-en",
+ "aura-2-thalia-en",
+ "aura-2-theia-en",
+ "aura-2-vesta-en",
+ "aura-2-zeus-en",
+ "aura-2-sirio-es",
+ "aura-2-nestor-es",
+ "aura-2-carina-es",
+ "aura-2-celeste-es",
+ "aura-2-alvaro-es",
+ "aura-2-diana-es",
+ "aura-2-aquila-es",
+ "aura-2-selena-es",
+ "aura-2-estrella-es",
+ "aura-2-javier-es",
+ ],
+ typing.Any,
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_eleven_labs.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_eleven_labs.py
new file mode 100644
index 00000000..91f1edeb
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_eleven_labs.py
@@ -0,0 +1,38 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1update_speak_speak_provider_eleven_labs_model_id import AgentV1UpdateSpeakSpeakProviderElevenLabsModelId
+
+
+class AgentV1UpdateSpeakSpeakProviderElevenLabs(UniversalBaseModel):
+ version: typing.Optional[typing.Literal["v1"]] = pydantic.Field(default=None)
+ """
+ The REST API version for the ElevenLabs text-to-speech API
+ """
+
+ model_id: AgentV1UpdateSpeakSpeakProviderElevenLabsModelId = pydantic.Field()
+ """
+ Eleven Labs model ID
+ """
+
+ language: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Language code to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the ElevenLabs API
+ """
+
+ language_code: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Use the `language` field instead.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_eleven_labs_model_id.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_eleven_labs_model_id.py
new file mode 100644
index 00000000..fdbba96c
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_eleven_labs_model_id.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1UpdateSpeakSpeakProviderElevenLabsModelId = typing.Union[
+ typing.Literal["eleven_turbo_v2_5", "eleven_monolingual_v1", "eleven_multilingual_v2"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_open_ai.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_open_ai.py
new file mode 100644
index 00000000..c05301fa
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_open_ai.py
@@ -0,0 +1,34 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1update_speak_speak_provider_open_ai_model import AgentV1UpdateSpeakSpeakProviderOpenAiModel
+from .agent_v1update_speak_speak_provider_open_ai_voice import AgentV1UpdateSpeakSpeakProviderOpenAiVoice
+
+
+class AgentV1UpdateSpeakSpeakProviderOpenAi(UniversalBaseModel):
+ version: typing.Optional[typing.Literal["v1"]] = pydantic.Field(default=None)
+ """
+ The REST API version for the OpenAI text-to-speech API
+ """
+
+ model: AgentV1UpdateSpeakSpeakProviderOpenAiModel = pydantic.Field()
+ """
+ OpenAI TTS model
+ """
+
+ voice: AgentV1UpdateSpeakSpeakProviderOpenAiVoice = pydantic.Field()
+ """
+ OpenAI voice
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_open_ai_model.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_open_ai_model.py
new file mode 100644
index 00000000..94c2069a
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_open_ai_model.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1UpdateSpeakSpeakProviderOpenAiModel = typing.Union[typing.Literal["tts-1", "tts-1-hd"], typing.Any]
diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_open_ai_voice.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_open_ai_voice.py
new file mode 100644
index 00000000..bc5fdeb9
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_provider_open_ai_voice.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentV1UpdateSpeakSpeakProviderOpenAiVoice = typing.Union[
+ typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any
+]
diff --git a/src/deepgram/agent/v1/types/agent_v1user_started_speaking.py b/src/deepgram/agent/v1/types/agent_v1user_started_speaking.py
new file mode 100644
index 00000000..ac4d838a
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1user_started_speaking.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1UserStartedSpeaking(UniversalBaseModel):
+ type: typing.Literal["UserStartedSpeaking"] = pydantic.Field(default="UserStartedSpeaking")
+ """
+ Message type identifier indicating that the user has begun speaking
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1warning.py b/src/deepgram/agent/v1/types/agent_v1warning.py
new file mode 100644
index 00000000..cdfacea7
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1warning.py
@@ -0,0 +1,36 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1Warning(UniversalBaseModel):
+ """
+ Notifies the client of non-fatal errors or warnings
+ """
+
+ type: typing.Literal["Warning"] = pydantic.Field(default="Warning")
+ """
+ Message type identifier for warnings
+ """
+
+ description: str = pydantic.Field()
+ """
+ Description of the warning
+ """
+
+ code: str = pydantic.Field()
+ """
+ Warning code identifier
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/agent_v1welcome.py b/src/deepgram/agent/v1/types/agent_v1welcome.py
new file mode 100644
index 00000000..972f8104
--- /dev/null
+++ b/src/deepgram/agent/v1/types/agent_v1welcome.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class AgentV1Welcome(UniversalBaseModel):
+ type: typing.Literal["Welcome"] = pydantic.Field(default="Welcome")
+ """
+ Message type identifier for welcome message
+ """
+
+ request_id: str = pydantic.Field()
+ """
+ Unique identifier for the request
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/anthropic.py b/src/deepgram/agent/v1/types/anthropic.py
new file mode 100644
index 00000000..ba7cf771
--- /dev/null
+++ b/src/deepgram/agent/v1/types/anthropic.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_think_provider_anthropic_model import AgentV1SettingsAgentThinkProviderAnthropicModel
+
+
+class Anthropic(UniversalBaseModel):
+ version: typing.Optional[typing.Literal["v1"]] = pydantic.Field(default=None)
+ """
+ The REST API version for the Anthropic Messages API
+ """
+
+ model: AgentV1SettingsAgentThinkProviderAnthropicModel = pydantic.Field()
+ """
+ Anthropic model to use
+ """
+
+ temperature: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Anthropic temperature (0-1)
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/cartesia.py b/src/deepgram/agent/v1/types/cartesia.py
new file mode 100644
index 00000000..6511e77d
--- /dev/null
+++ b/src/deepgram/agent/v1/types/cartesia.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_one_item_provider_cartesia_model_id import (
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId,
+)
+from .agent_v1settings_agent_speak_one_item_provider_cartesia_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice,
+)
+
+
+class Cartesia(UniversalBaseModel):
+ version: typing.Optional[typing.Literal["2025-03-17"]] = pydantic.Field(default=None)
+ """
+ The API version header for the Cartesia text-to-speech API
+ """
+
+ model_id: AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId = pydantic.Field()
+ """
+ Cartesia model ID
+ """
+
+ voice: AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice
+ language: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Cartesia language code
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/deepgram.py b/src/deepgram/agent/v1/types/deepgram.py
new file mode 100644
index 00000000..c259f057
--- /dev/null
+++ b/src/deepgram/agent/v1/types/deepgram.py
@@ -0,0 +1,30 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_one_item_provider_deepgram_model import (
+ AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel,
+)
+
+
+class Deepgram(UniversalBaseModel):
+ version: typing.Optional[typing.Literal["v1"]] = pydantic.Field(default=None)
+ """
+ The REST API version for the Deepgram text-to-speech API
+ """
+
+ model: AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel = pydantic.Field()
+ """
+ Deepgram TTS model
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/eleven_labs.py b/src/deepgram/agent/v1/types/eleven_labs.py
new file mode 100644
index 00000000..9c10bed4
--- /dev/null
+++ b/src/deepgram/agent/v1/types/eleven_labs.py
@@ -0,0 +1,40 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id import (
+ AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId,
+)
+
+
+class ElevenLabs(UniversalBaseModel):
+ version: typing.Optional[typing.Literal["v1"]] = pydantic.Field(default=None)
+ """
+ The REST API version for the Deepgram text-to-speech API
+ """
+
+ model_id: AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId = pydantic.Field()
+ """
+ Eleven Labs model ID
+ """
+
+ language: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Optional language to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the ElevenLabs API
+ """
+
+ language_code: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Use the `language` field instead.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/google.py b/src/deepgram/agent/v1/types/google.py
new file mode 100644
index 00000000..0c1e7a05
--- /dev/null
+++ b/src/deepgram/agent/v1/types/google.py
@@ -0,0 +1,33 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_think_provider_google_model import AgentV1SettingsAgentThinkProviderGoogleModel
+
+
+class Google(UniversalBaseModel):
+ version: typing.Optional[typing.Literal["v1beta"]] = pydantic.Field(default=None)
+ """
+ The REST API version for the Google generative language API
+ """
+
+ model: AgentV1SettingsAgentThinkProviderGoogleModel = pydantic.Field()
+ """
+ Google model to use
+ """
+
+ temperature: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Google temperature (0-2)
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/groq.py b/src/deepgram/agent/v1/types/groq.py
new file mode 100644
index 00000000..34ba97dd
--- /dev/null
+++ b/src/deepgram/agent/v1/types/groq.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class Groq(UniversalBaseModel):
+ version: typing.Optional[typing.Literal["v1"]] = pydantic.Field(default=None)
+ """
+ The REST API version for the Groq's chat completions API (mostly OpenAI-compatible)
+ """
+
+ model: typing.Literal["openai/gpt-oss-20b"] = pydantic.Field(default="openai/gpt-oss-20b")
+ """
+ Groq model to use
+ """
+
+ temperature: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Groq temperature (0-2)
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/agent/v1/types/open_ai.py b/src/deepgram/agent/v1/types/open_ai.py
new file mode 100644
index 00000000..5837c0cc
--- /dev/null
+++ b/src/deepgram/agent/v1/types/open_ai.py
@@ -0,0 +1,38 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .agent_v1settings_agent_speak_one_item_provider_open_ai_model import (
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel,
+)
+from .agent_v1settings_agent_speak_one_item_provider_open_ai_voice import (
+ AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice,
+)
+
+
+class OpenAi(UniversalBaseModel):
+ version: typing.Optional[typing.Literal["v1"]] = pydantic.Field(default=None)
+ """
+ The REST API version for the OpenAI text-to-speech API
+ """
+
+ model: AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel = pydantic.Field()
+ """
+ OpenAI TTS model
+ """
+
+ voice: AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice = pydantic.Field()
+ """
+ OpenAI voice
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/auth/v1/tokens/raw_client.py b/src/deepgram/auth/v1/tokens/raw_client.py
index ee5c535a..b8d45f79 100644
--- a/src/deepgram/auth/v1/tokens/raw_client.py
+++ b/src/deepgram/auth/v1/tokens/raw_client.py
@@ -65,9 +65,9 @@ def grant(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -128,9 +128,9 @@ async def grant(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/core/http_client.py b/src/deepgram/core/http_client.py
index e4173f99..f4a7c071 100644
--- a/src/deepgram/core/http_client.py
+++ b/src/deepgram/core/http_client.py
@@ -14,13 +14,13 @@
from .force_multipart import FORCE_MULTIPART
from .jsonable_encoder import jsonable_encoder
from .query_encoder import encode_query
-from .remove_none_from_dict import remove_none_from_dict
+from .remove_none_from_dict import remove_none_from_dict as remove_none_from_dict
from .request_options import RequestOptions
from httpx._types import RequestFiles
-INITIAL_RETRY_DELAY_SECONDS = 0.5
-MAX_RETRY_DELAY_SECONDS = 10
-MAX_RETRY_DELAY_SECONDS_FROM_HEADER = 30
+INITIAL_RETRY_DELAY_SECONDS = 1.0
+MAX_RETRY_DELAY_SECONDS = 60.0
+JITTER_FACTOR = 0.2 # 20% random jitter
def _parse_retry_after(response_headers: httpx.Headers) -> typing.Optional[float]:
@@ -64,6 +64,38 @@ def _parse_retry_after(response_headers: httpx.Headers) -> typing.Optional[float
return seconds
+def _add_positive_jitter(delay: float) -> float:
+ """Add positive jitter (0-20%) to prevent thundering herd."""
+ jitter_multiplier = 1 + random() * JITTER_FACTOR
+ return delay * jitter_multiplier
+
+
+def _add_symmetric_jitter(delay: float) -> float:
+ """Add symmetric jitter (±10%) for exponential backoff."""
+ jitter_multiplier = 1 + (random() - 0.5) * JITTER_FACTOR
+ return delay * jitter_multiplier
+
+
+def _parse_x_ratelimit_reset(response_headers: httpx.Headers) -> typing.Optional[float]:
+ """
+ Parse the X-RateLimit-Reset header (Unix timestamp in seconds).
+ Returns seconds to wait, or None if header is missing/invalid.
+ """
+ reset_time_str = response_headers.get("x-ratelimit-reset")
+ if reset_time_str is None:
+ return None
+
+ try:
+ reset_time = int(reset_time_str)
+ delay = reset_time - time.time()
+ if delay > 0:
+ return delay
+ except (ValueError, TypeError):
+ pass
+
+ return None
+
+
def _retry_timeout(response: httpx.Response, retries: int) -> float:
"""
Determine the amount of time to wait before retrying a request.
@@ -71,17 +103,19 @@ def _retry_timeout(response: httpx.Response, retries: int) -> float:
with a jitter to determine the number of seconds to wait.
"""
- # If the API asks us to wait a certain amount of time (and it's a reasonable amount), just do what it says.
+ # 1. Check Retry-After header first
retry_after = _parse_retry_after(response.headers)
- if retry_after is not None and retry_after <= MAX_RETRY_DELAY_SECONDS_FROM_HEADER:
- return retry_after
+ if retry_after is not None and retry_after > 0:
+ return min(retry_after, MAX_RETRY_DELAY_SECONDS)
- # Apply exponential backoff, capped at MAX_RETRY_DELAY_SECONDS.
- retry_delay = min(INITIAL_RETRY_DELAY_SECONDS * pow(2.0, retries), MAX_RETRY_DELAY_SECONDS)
+ # 2. Check X-RateLimit-Reset header (with positive jitter)
+ ratelimit_reset = _parse_x_ratelimit_reset(response.headers)
+ if ratelimit_reset is not None:
+ return _add_positive_jitter(min(ratelimit_reset, MAX_RETRY_DELAY_SECONDS))
- # Add a randomness / jitter to the retry delay to avoid overwhelming the server with retries.
- timeout = retry_delay * (1 - 0.25 * random())
- return timeout if timeout >= 0 else 0
+ # 3. Fall back to exponential backoff (with symmetric jitter)
+ backoff = min(INITIAL_RETRY_DELAY_SECONDS * pow(2.0, retries), MAX_RETRY_DELAY_SECONDS)
+ return _add_symmetric_jitter(backoff)
def _should_retry(response: httpx.Response) -> bool:
@@ -89,6 +123,21 @@ def _should_retry(response: httpx.Response) -> bool:
return response.status_code >= 500 or response.status_code in retryable_400s
+def _maybe_filter_none_from_multipart_data(
+ data: typing.Optional[typing.Any],
+ request_files: typing.Optional[RequestFiles],
+ force_multipart: typing.Optional[bool],
+) -> typing.Optional[typing.Any]:
+ """
+ Filter None values from data body for multipart/form requests.
+ This prevents httpx from converting None to empty strings in multipart encoding.
+ Only applies when files are present or force_multipart is True.
+ """
+ if data is not None and isinstance(data, typing.Mapping) and (request_files or force_multipart):
+ return remove_none_from_dict(data)
+ return data
+
+
def remove_omit_from_dict(
original: typing.Dict[str, typing.Optional[typing.Any]],
omit: typing.Optional[typing.Any],
@@ -210,6 +259,8 @@ def request(
if (request_files is None or len(request_files) == 0) and force_multipart:
request_files = FORCE_MULTIPART
+ data_body = _maybe_filter_none_from_multipart_data(data_body, request_files, force_multipart)
+
response = self.httpx_client.request(
method=method,
url=urllib.parse.urljoin(f"{base_url}/", path),
@@ -307,6 +358,8 @@ def stream(
json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit)
+ data_body = _maybe_filter_none_from_multipart_data(data_body, request_files, force_multipart)
+
with self.httpx_client.stream(
method=method,
url=urllib.parse.urljoin(f"{base_url}/", path),
@@ -353,12 +406,19 @@ def __init__(
base_timeout: typing.Callable[[], typing.Optional[float]],
base_headers: typing.Callable[[], typing.Dict[str, str]],
base_url: typing.Optional[typing.Callable[[], str]] = None,
+ async_base_headers: typing.Optional[typing.Callable[[], typing.Awaitable[typing.Dict[str, str]]]] = None,
):
self.base_url = base_url
self.base_timeout = base_timeout
self.base_headers = base_headers
+ self.async_base_headers = async_base_headers
self.httpx_client = httpx_client
+ async def _get_headers(self) -> typing.Dict[str, str]:
+ if self.async_base_headers is not None:
+ return await self.async_base_headers()
+ return self.base_headers()
+
def get_base_url(self, maybe_base_url: typing.Optional[str]) -> str:
base_url = maybe_base_url
if self.base_url is not None and base_url is None:
@@ -408,6 +468,11 @@ async def request(
json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit)
+ data_body = _maybe_filter_none_from_multipart_data(data_body, request_files, force_multipart)
+
+ # Get headers (supports async token providers)
+ _headers = await self._get_headers()
+
# Add the input to each of these and do None-safety checks
response = await self.httpx_client.request(
method=method,
@@ -415,7 +480,7 @@ async def request(
headers=jsonable_encoder(
remove_none_from_dict(
{
- **self.base_headers(),
+ **_headers,
**(headers if headers is not None else {}),
**(request_options.get("additional_headers", {}) or {} if request_options is not None else {}),
}
@@ -505,13 +570,18 @@ async def stream(
json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit)
+ data_body = _maybe_filter_none_from_multipart_data(data_body, request_files, force_multipart)
+
+ # Get headers (supports async token providers)
+ _headers = await self._get_headers()
+
async with self.httpx_client.stream(
method=method,
url=urllib.parse.urljoin(f"{base_url}/", path),
headers=jsonable_encoder(
remove_none_from_dict(
{
- **self.base_headers(),
+ **_headers,
**(headers if headers is not None else {}),
**(request_options.get("additional_headers", {}) if request_options is not None else {}),
}
diff --git a/src/deepgram/core/http_sse/__init__.py b/src/deepgram/core/http_sse/__init__.py
new file mode 100644
index 00000000..730e5a33
--- /dev/null
+++ b/src/deepgram/core/http_sse/__init__.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+# isort: skip_file
+
+import typing
+from importlib import import_module
+
+if typing.TYPE_CHECKING:
+ from ._api import EventSource, aconnect_sse, connect_sse
+ from ._exceptions import SSEError
+ from ._models import ServerSentEvent
+_dynamic_imports: typing.Dict[str, str] = {
+ "EventSource": "._api",
+ "SSEError": "._exceptions",
+ "ServerSentEvent": "._models",
+ "aconnect_sse": "._api",
+ "connect_sse": "._api",
+}
+
+
+def __getattr__(attr_name: str) -> typing.Any:
+ module_name = _dynamic_imports.get(attr_name)
+ if module_name is None:
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
+ try:
+ module = import_module(module_name, __package__)
+ if module_name == f".{attr_name}":
+ return module
+ else:
+ return getattr(module, attr_name)
+ except ImportError as e:
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
+ except AttributeError as e:
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
+
+
+def __dir__():
+ lazy_attrs = list(_dynamic_imports.keys())
+ return sorted(lazy_attrs)
+
+
+__all__ = ["EventSource", "SSEError", "ServerSentEvent", "aconnect_sse", "connect_sse"]
diff --git a/src/deepgram/core/http_sse/_api.py b/src/deepgram/core/http_sse/_api.py
new file mode 100644
index 00000000..f900b3b6
--- /dev/null
+++ b/src/deepgram/core/http_sse/_api.py
@@ -0,0 +1,112 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import re
+from contextlib import asynccontextmanager, contextmanager
+from typing import Any, AsyncGenerator, AsyncIterator, Iterator, cast
+
+import httpx
+from ._decoders import SSEDecoder
+from ._exceptions import SSEError
+from ._models import ServerSentEvent
+
+
+class EventSource:
+ def __init__(self, response: httpx.Response) -> None:
+ self._response = response
+
+ def _check_content_type(self) -> None:
+ content_type = self._response.headers.get("content-type", "").partition(";")[0]
+ if "text/event-stream" not in content_type:
+ raise SSEError(
+ f"Expected response header Content-Type to contain 'text/event-stream', got {content_type!r}"
+ )
+
+ def _get_charset(self) -> str:
+ """Extract charset from Content-Type header, fallback to UTF-8."""
+ content_type = self._response.headers.get("content-type", "")
+
+ # Parse charset parameter using regex
+ charset_match = re.search(r"charset=([^;\s]+)", content_type, re.IGNORECASE)
+ if charset_match:
+ charset = charset_match.group(1).strip("\"'")
+ # Validate that it's a known encoding
+ try:
+ # Test if the charset is valid by trying to encode/decode
+ "test".encode(charset).decode(charset)
+ return charset
+ except (LookupError, UnicodeError):
+ # If charset is invalid, fall back to UTF-8
+ pass
+
+ # Default to UTF-8 if no charset specified or invalid charset
+ return "utf-8"
+
+ @property
+ def response(self) -> httpx.Response:
+ return self._response
+
+ def iter_sse(self) -> Iterator[ServerSentEvent]:
+ self._check_content_type()
+ decoder = SSEDecoder()
+ charset = self._get_charset()
+
+ buffer = ""
+ for chunk in self._response.iter_bytes():
+ # Decode chunk using detected charset
+ text_chunk = chunk.decode(charset, errors="replace")
+ buffer += text_chunk
+
+ # Process complete lines
+ while "\n" in buffer:
+ line, buffer = buffer.split("\n", 1)
+ line = line.rstrip("\r")
+ sse = decoder.decode(line)
+ # when we reach a "\n\n" => line = ''
+ # => decoder will attempt to return an SSE Event
+ if sse is not None:
+ yield sse
+
+ # Process any remaining data in buffer
+ if buffer.strip():
+ line = buffer.rstrip("\r")
+ sse = decoder.decode(line)
+ if sse is not None:
+ yield sse
+
+ async def aiter_sse(self) -> AsyncGenerator[ServerSentEvent, None]:
+ self._check_content_type()
+ decoder = SSEDecoder()
+ lines = cast(AsyncGenerator[str, None], self._response.aiter_lines())
+ try:
+ async for line in lines:
+ line = line.rstrip("\n")
+ sse = decoder.decode(line)
+ if sse is not None:
+ yield sse
+ finally:
+ await lines.aclose()
+
+
+@contextmanager
+def connect_sse(client: httpx.Client, method: str, url: str, **kwargs: Any) -> Iterator[EventSource]:
+ headers = kwargs.pop("headers", {})
+ headers["Accept"] = "text/event-stream"
+ headers["Cache-Control"] = "no-store"
+
+ with client.stream(method, url, headers=headers, **kwargs) as response:
+ yield EventSource(response)
+
+
+@asynccontextmanager
+async def aconnect_sse(
+ client: httpx.AsyncClient,
+ method: str,
+ url: str,
+ **kwargs: Any,
+) -> AsyncIterator[EventSource]:
+ headers = kwargs.pop("headers", {})
+ headers["Accept"] = "text/event-stream"
+ headers["Cache-Control"] = "no-store"
+
+ async with client.stream(method, url, headers=headers, **kwargs) as response:
+ yield EventSource(response)
diff --git a/src/deepgram/core/http_sse/_decoders.py b/src/deepgram/core/http_sse/_decoders.py
new file mode 100644
index 00000000..339b0890
--- /dev/null
+++ b/src/deepgram/core/http_sse/_decoders.py
@@ -0,0 +1,61 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from typing import List, Optional
+
+from ._models import ServerSentEvent
+
+
+class SSEDecoder:
+ def __init__(self) -> None:
+ self._event = ""
+ self._data: List[str] = []
+ self._last_event_id = ""
+ self._retry: Optional[int] = None
+
+ def decode(self, line: str) -> Optional[ServerSentEvent]:
+ # See: https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation # noqa: E501
+
+ if not line:
+ if not self._event and not self._data and not self._last_event_id and self._retry is None:
+ return None
+
+ sse = ServerSentEvent(
+ event=self._event,
+ data="\n".join(self._data),
+ id=self._last_event_id,
+ retry=self._retry,
+ )
+
+ # NOTE: as per the SSE spec, do not reset last_event_id.
+ self._event = ""
+ self._data = []
+ self._retry = None
+
+ return sse
+
+ if line.startswith(":"):
+ return None
+
+ fieldname, _, value = line.partition(":")
+
+ if value.startswith(" "):
+ value = value[1:]
+
+ if fieldname == "event":
+ self._event = value
+ elif fieldname == "data":
+ self._data.append(value)
+ elif fieldname == "id":
+ if "\0" in value:
+ pass
+ else:
+ self._last_event_id = value
+ elif fieldname == "retry":
+ try:
+ self._retry = int(value)
+ except (TypeError, ValueError):
+ pass
+ else:
+ pass # Field is ignored.
+
+ return None
diff --git a/src/deepgram/core/http_sse/_exceptions.py b/src/deepgram/core/http_sse/_exceptions.py
new file mode 100644
index 00000000..81605a8a
--- /dev/null
+++ b/src/deepgram/core/http_sse/_exceptions.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import httpx
+
+
+class SSEError(httpx.TransportError):
+ pass
diff --git a/src/deepgram/core/http_sse/_models.py b/src/deepgram/core/http_sse/_models.py
new file mode 100644
index 00000000..1af57f8f
--- /dev/null
+++ b/src/deepgram/core/http_sse/_models.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import json
+from dataclasses import dataclass
+from typing import Any, Optional
+
+
+@dataclass(frozen=True)
+class ServerSentEvent:
+ event: str = "message"
+ data: str = ""
+ id: str = ""
+ retry: Optional[int] = None
+
+ def json(self) -> Any:
+ """Parse the data field as JSON."""
+ return json.loads(self.data)
diff --git a/src/deepgram/core/pydantic_utilities.py b/src/deepgram/core/pydantic_utilities.py
index 8906cdfa..185e5c4f 100644
--- a/src/deepgram/core/pydantic_utilities.py
+++ b/src/deepgram/core/pydantic_utilities.py
@@ -220,7 +220,9 @@ def universal_root_validator(
) -> Callable[[AnyCallable], AnyCallable]:
def decorator(func: AnyCallable) -> AnyCallable:
if IS_PYDANTIC_V2:
- return cast(AnyCallable, pydantic.model_validator(mode="before" if pre else "after")(func)) # type: ignore[attr-defined]
+ # In Pydantic v2, for RootModel we always use "before" mode
+ # The custom validators transform the input value before the model is created
+ return cast(AnyCallable, pydantic.model_validator(mode="before")(func)) # type: ignore[attr-defined]
return cast(AnyCallable, pydantic.root_validator(pre=pre)(func)) # type: ignore[call-overload]
return decorator
diff --git a/src/deepgram/errors/bad_request_error.py b/src/deepgram/errors/bad_request_error.py
index baf5be4f..ec78e269 100644
--- a/src/deepgram/errors/bad_request_error.py
+++ b/src/deepgram/errors/bad_request_error.py
@@ -6,5 +6,5 @@
class BadRequestError(ApiError):
- def __init__(self, body: typing.Optional[typing.Any], headers: typing.Optional[typing.Dict[str, str]] = None):
+ def __init__(self, body: typing.Any, headers: typing.Optional[typing.Dict[str, str]] = None):
super().__init__(status_code=400, headers=headers, body=body)
diff --git a/src/deepgram/listen/__init__.py b/src/deepgram/listen/__init__.py
index 6186f5b4..42a8267f 100644
--- a/src/deepgram/listen/__init__.py
+++ b/src/deepgram/listen/__init__.py
@@ -7,7 +7,96 @@
if typing.TYPE_CHECKING:
from . import v1, v2
-_dynamic_imports: typing.Dict[str, str] = {"v1": ".v1", "v2": ".v2"}
+ from .v1 import (
+ ListenV1CloseStream,
+ ListenV1CloseStreamParams,
+ ListenV1CloseStreamType,
+ ListenV1Finalize,
+ ListenV1FinalizeParams,
+ ListenV1FinalizeType,
+ ListenV1KeepAlive,
+ ListenV1KeepAliveParams,
+ ListenV1KeepAliveType,
+ ListenV1Metadata,
+ ListenV1MetadataParams,
+ ListenV1Results,
+ ListenV1ResultsChannel,
+ ListenV1ResultsChannelAlternativesItem,
+ ListenV1ResultsChannelAlternativesItemParams,
+ ListenV1ResultsChannelAlternativesItemWordsItem,
+ ListenV1ResultsChannelAlternativesItemWordsItemParams,
+ ListenV1ResultsChannelParams,
+ ListenV1ResultsEntitiesItem,
+ ListenV1ResultsEntitiesItemParams,
+ ListenV1ResultsMetadata,
+ ListenV1ResultsMetadataModelInfo,
+ ListenV1ResultsMetadataModelInfoParams,
+ ListenV1ResultsMetadataParams,
+ ListenV1ResultsParams,
+ ListenV1SpeechStarted,
+ ListenV1SpeechStartedParams,
+ ListenV1UtteranceEnd,
+ ListenV1UtteranceEndParams,
+ )
+ from .v2 import (
+ ListenV2CloseStream,
+ ListenV2CloseStreamParams,
+ ListenV2CloseStreamType,
+ ListenV2Connected,
+ ListenV2ConnectedParams,
+ ListenV2FatalError,
+ ListenV2FatalErrorParams,
+ ListenV2TurnInfo,
+ ListenV2TurnInfoEvent,
+ ListenV2TurnInfoParams,
+ ListenV2TurnInfoWordsItem,
+ ListenV2TurnInfoWordsItemParams,
+ )
+_dynamic_imports: typing.Dict[str, str] = {
+ "ListenV1CloseStream": ".v1",
+ "ListenV1CloseStreamParams": ".v1",
+ "ListenV1CloseStreamType": ".v1",
+ "ListenV1Finalize": ".v1",
+ "ListenV1FinalizeParams": ".v1",
+ "ListenV1FinalizeType": ".v1",
+ "ListenV1KeepAlive": ".v1",
+ "ListenV1KeepAliveParams": ".v1",
+ "ListenV1KeepAliveType": ".v1",
+ "ListenV1Metadata": ".v1",
+ "ListenV1MetadataParams": ".v1",
+ "ListenV1Results": ".v1",
+ "ListenV1ResultsChannel": ".v1",
+ "ListenV1ResultsChannelAlternativesItem": ".v1",
+ "ListenV1ResultsChannelAlternativesItemParams": ".v1",
+ "ListenV1ResultsChannelAlternativesItemWordsItem": ".v1",
+ "ListenV1ResultsChannelAlternativesItemWordsItemParams": ".v1",
+ "ListenV1ResultsChannelParams": ".v1",
+ "ListenV1ResultsEntitiesItem": ".v1",
+ "ListenV1ResultsEntitiesItemParams": ".v1",
+ "ListenV1ResultsMetadata": ".v1",
+ "ListenV1ResultsMetadataModelInfo": ".v1",
+ "ListenV1ResultsMetadataModelInfoParams": ".v1",
+ "ListenV1ResultsMetadataParams": ".v1",
+ "ListenV1ResultsParams": ".v1",
+ "ListenV1SpeechStarted": ".v1",
+ "ListenV1SpeechStartedParams": ".v1",
+ "ListenV1UtteranceEnd": ".v1",
+ "ListenV1UtteranceEndParams": ".v1",
+ "ListenV2CloseStream": ".v2",
+ "ListenV2CloseStreamParams": ".v2",
+ "ListenV2CloseStreamType": ".v2",
+ "ListenV2Connected": ".v2",
+ "ListenV2ConnectedParams": ".v2",
+ "ListenV2FatalError": ".v2",
+ "ListenV2FatalErrorParams": ".v2",
+ "ListenV2TurnInfo": ".v2",
+ "ListenV2TurnInfoEvent": ".v2",
+ "ListenV2TurnInfoParams": ".v2",
+ "ListenV2TurnInfoWordsItem": ".v2",
+ "ListenV2TurnInfoWordsItemParams": ".v2",
+ "v1": ".v1",
+ "v2": ".v2",
+}
def __getattr__(attr_name: str) -> typing.Any:
@@ -31,4 +120,48 @@ def __dir__():
return sorted(lazy_attrs)
-__all__ = ["v1", "v2"]
+__all__ = [
+ "ListenV1CloseStream",
+ "ListenV1CloseStreamParams",
+ "ListenV1CloseStreamType",
+ "ListenV1Finalize",
+ "ListenV1FinalizeParams",
+ "ListenV1FinalizeType",
+ "ListenV1KeepAlive",
+ "ListenV1KeepAliveParams",
+ "ListenV1KeepAliveType",
+ "ListenV1Metadata",
+ "ListenV1MetadataParams",
+ "ListenV1Results",
+ "ListenV1ResultsChannel",
+ "ListenV1ResultsChannelAlternativesItem",
+ "ListenV1ResultsChannelAlternativesItemParams",
+ "ListenV1ResultsChannelAlternativesItemWordsItem",
+ "ListenV1ResultsChannelAlternativesItemWordsItemParams",
+ "ListenV1ResultsChannelParams",
+ "ListenV1ResultsEntitiesItem",
+ "ListenV1ResultsEntitiesItemParams",
+ "ListenV1ResultsMetadata",
+ "ListenV1ResultsMetadataModelInfo",
+ "ListenV1ResultsMetadataModelInfoParams",
+ "ListenV1ResultsMetadataParams",
+ "ListenV1ResultsParams",
+ "ListenV1SpeechStarted",
+ "ListenV1SpeechStartedParams",
+ "ListenV1UtteranceEnd",
+ "ListenV1UtteranceEndParams",
+ "ListenV2CloseStream",
+ "ListenV2CloseStreamParams",
+ "ListenV2CloseStreamType",
+ "ListenV2Connected",
+ "ListenV2ConnectedParams",
+ "ListenV2FatalError",
+ "ListenV2FatalErrorParams",
+ "ListenV2TurnInfo",
+ "ListenV2TurnInfoEvent",
+ "ListenV2TurnInfoParams",
+ "ListenV2TurnInfoWordsItem",
+ "ListenV2TurnInfoWordsItemParams",
+ "v1",
+ "v2",
+]
diff --git a/src/deepgram/listen/v1/__init__.py b/src/deepgram/listen/v1/__init__.py
index a3dcf43f..8b43c4ff 100644
--- a/src/deepgram/listen/v1/__init__.py
+++ b/src/deepgram/listen/v1/__init__.py
@@ -6,6 +6,24 @@
from importlib import import_module
if typing.TYPE_CHECKING:
+ from .types import (
+ ListenV1CloseStream,
+ ListenV1CloseStreamType,
+ ListenV1Finalize,
+ ListenV1FinalizeType,
+ ListenV1KeepAlive,
+ ListenV1KeepAliveType,
+ ListenV1Metadata,
+ ListenV1Results,
+ ListenV1ResultsChannel,
+ ListenV1ResultsChannelAlternativesItem,
+ ListenV1ResultsChannelAlternativesItemWordsItem,
+ ListenV1ResultsEntitiesItem,
+ ListenV1ResultsMetadata,
+ ListenV1ResultsMetadataModelInfo,
+ ListenV1SpeechStarted,
+ ListenV1UtteranceEnd,
+ )
from . import media
from .media import (
MediaTranscribeRequestCallbackMethod,
@@ -18,7 +36,51 @@
MediaTranscribeResponse,
MediaTranscribeResponseParams,
)
+ from .requests import (
+ ListenV1CloseStreamParams,
+ ListenV1FinalizeParams,
+ ListenV1KeepAliveParams,
+ ListenV1MetadataParams,
+ ListenV1ResultsChannelAlternativesItemParams,
+ ListenV1ResultsChannelAlternativesItemWordsItemParams,
+ ListenV1ResultsChannelParams,
+ ListenV1ResultsEntitiesItemParams,
+ ListenV1ResultsMetadataModelInfoParams,
+ ListenV1ResultsMetadataParams,
+ ListenV1ResultsParams,
+ ListenV1SpeechStartedParams,
+ ListenV1UtteranceEndParams,
+ )
_dynamic_imports: typing.Dict[str, str] = {
+ "ListenV1CloseStream": ".types",
+ "ListenV1CloseStreamParams": ".requests",
+ "ListenV1CloseStreamType": ".types",
+ "ListenV1Finalize": ".types",
+ "ListenV1FinalizeParams": ".requests",
+ "ListenV1FinalizeType": ".types",
+ "ListenV1KeepAlive": ".types",
+ "ListenV1KeepAliveParams": ".requests",
+ "ListenV1KeepAliveType": ".types",
+ "ListenV1Metadata": ".types",
+ "ListenV1MetadataParams": ".requests",
+ "ListenV1Results": ".types",
+ "ListenV1ResultsChannel": ".types",
+ "ListenV1ResultsChannelAlternativesItem": ".types",
+ "ListenV1ResultsChannelAlternativesItemParams": ".requests",
+ "ListenV1ResultsChannelAlternativesItemWordsItem": ".types",
+ "ListenV1ResultsChannelAlternativesItemWordsItemParams": ".requests",
+ "ListenV1ResultsChannelParams": ".requests",
+ "ListenV1ResultsEntitiesItem": ".types",
+ "ListenV1ResultsEntitiesItemParams": ".requests",
+ "ListenV1ResultsMetadata": ".types",
+ "ListenV1ResultsMetadataModelInfo": ".types",
+ "ListenV1ResultsMetadataModelInfoParams": ".requests",
+ "ListenV1ResultsMetadataParams": ".requests",
+ "ListenV1ResultsParams": ".requests",
+ "ListenV1SpeechStarted": ".types",
+ "ListenV1SpeechStartedParams": ".requests",
+ "ListenV1UtteranceEnd": ".types",
+ "ListenV1UtteranceEndParams": ".requests",
"MediaTranscribeRequestCallbackMethod": ".media",
"MediaTranscribeRequestCustomIntentMode": ".media",
"MediaTranscribeRequestCustomTopicMode": ".media",
@@ -54,6 +116,35 @@ def __dir__():
__all__ = [
+ "ListenV1CloseStream",
+ "ListenV1CloseStreamParams",
+ "ListenV1CloseStreamType",
+ "ListenV1Finalize",
+ "ListenV1FinalizeParams",
+ "ListenV1FinalizeType",
+ "ListenV1KeepAlive",
+ "ListenV1KeepAliveParams",
+ "ListenV1KeepAliveType",
+ "ListenV1Metadata",
+ "ListenV1MetadataParams",
+ "ListenV1Results",
+ "ListenV1ResultsChannel",
+ "ListenV1ResultsChannelAlternativesItem",
+ "ListenV1ResultsChannelAlternativesItemParams",
+ "ListenV1ResultsChannelAlternativesItemWordsItem",
+ "ListenV1ResultsChannelAlternativesItemWordsItemParams",
+ "ListenV1ResultsChannelParams",
+ "ListenV1ResultsEntitiesItem",
+ "ListenV1ResultsEntitiesItemParams",
+ "ListenV1ResultsMetadata",
+ "ListenV1ResultsMetadataModelInfo",
+ "ListenV1ResultsMetadataModelInfoParams",
+ "ListenV1ResultsMetadataParams",
+ "ListenV1ResultsParams",
+ "ListenV1SpeechStarted",
+ "ListenV1SpeechStartedParams",
+ "ListenV1UtteranceEnd",
+ "ListenV1UtteranceEndParams",
"MediaTranscribeRequestCallbackMethod",
"MediaTranscribeRequestCustomIntentMode",
"MediaTranscribeRequestCustomTopicMode",
diff --git a/src/deepgram/listen/v1/client.py b/src/deepgram/listen/v1/client.py
index c6cba2d8..f9aea635 100644
--- a/src/deepgram/listen/v1/client.py
+++ b/src/deepgram/listen/v1/client.py
@@ -47,6 +47,7 @@ def connect(
callback: typing.Optional[str] = None,
callback_method: typing.Optional[str] = None,
channels: typing.Optional[str] = None,
+ detect_entities: typing.Optional[str] = None,
diarize: typing.Optional[str] = None,
dictation: typing.Optional[str] = None,
encoding: typing.Optional[str] = None,
@@ -85,6 +86,8 @@ def connect(
channels : typing.Optional[str]
+ detect_entities : typing.Optional[str]
+
diarize : typing.Optional[str]
dictation : typing.Optional[str]
@@ -154,6 +157,8 @@ def connect(
query_params = query_params.add("callback_method", callback_method)
if channels is not None:
query_params = query_params.add("channels", channels)
+ if detect_entities is not None:
+ query_params = query_params.add("detect_entities", detect_entities)
if diarize is not None:
query_params = query_params.add("diarize", diarize)
if dictation is not None:
@@ -258,6 +263,7 @@ async def connect(
callback: typing.Optional[str] = None,
callback_method: typing.Optional[str] = None,
channels: typing.Optional[str] = None,
+ detect_entities: typing.Optional[str] = None,
diarize: typing.Optional[str] = None,
dictation: typing.Optional[str] = None,
encoding: typing.Optional[str] = None,
@@ -296,6 +302,8 @@ async def connect(
channels : typing.Optional[str]
+ detect_entities : typing.Optional[str]
+
diarize : typing.Optional[str]
dictation : typing.Optional[str]
@@ -365,6 +373,8 @@ async def connect(
query_params = query_params.add("callback_method", callback_method)
if channels is not None:
query_params = query_params.add("channels", channels)
+ if detect_entities is not None:
+ query_params = query_params.add("detect_entities", detect_entities)
if diarize is not None:
query_params = query_params.add("diarize", diarize)
if dictation is not None:
diff --git a/src/deepgram/listen/v1/media/client.py b/src/deepgram/listen/v1/media/client.py
index 047dfac4..ab81ae3b 100644
--- a/src/deepgram/listen/v1/media/client.py
+++ b/src/deepgram/listen/v1/media/client.py
@@ -206,6 +206,41 @@ def transcribe_url(
api_key="YOUR_API_KEY",
)
client.listen.v1.media.transcribe_url(
+ callback="callback",
+ callback_method="POST",
+ extra="extra",
+ sentiment=True,
+ summarize="v2",
+ tag="tag",
+ topics=True,
+ custom_topic="custom_topic",
+ custom_topic_mode="extended",
+ intents=True,
+ custom_intent="custom_intent",
+ custom_intent_mode="extended",
+ detect_entities=True,
+ detect_language=True,
+ diarize=True,
+ dictation=True,
+ encoding="linear16",
+ filler_words=True,
+ keywords="keywords",
+ language="language",
+ measurements=True,
+ model="nova-3",
+ multichannel=True,
+ numerals=True,
+ paragraphs=True,
+ profanity_filter=True,
+ punctuate=True,
+ redact="redact",
+ replace="replace",
+ search="search",
+ smart_format=True,
+ utterances=True,
+ utt_split=1.1,
+ version="latest",
+ mip_opt_out=True,
url="https://dpgr.am/spacewalk.wav",
)
"""
@@ -661,6 +696,41 @@ async def transcribe_url(
async def main() -> None:
await client.listen.v1.media.transcribe_url(
+ callback="callback",
+ callback_method="POST",
+ extra="extra",
+ sentiment=True,
+ summarize="v2",
+ tag="tag",
+ topics=True,
+ custom_topic="custom_topic",
+ custom_topic_mode="extended",
+ intents=True,
+ custom_intent="custom_intent",
+ custom_intent_mode="extended",
+ detect_entities=True,
+ detect_language=True,
+ diarize=True,
+ dictation=True,
+ encoding="linear16",
+ filler_words=True,
+ keywords="keywords",
+ language="language",
+ measurements=True,
+ model="nova-3",
+ multichannel=True,
+ numerals=True,
+ paragraphs=True,
+ profanity_filter=True,
+ punctuate=True,
+ redact="redact",
+ replace="replace",
+ search="search",
+ smart_format=True,
+ utterances=True,
+ utt_split=1.1,
+ version="latest",
+ mip_opt_out=True,
url="https://dpgr.am/spacewalk.wav",
)
diff --git a/src/deepgram/listen/v1/media/raw_client.py b/src/deepgram/listen/v1/media/raw_client.py
index 6cdeccc8..d1c7d675 100644
--- a/src/deepgram/listen/v1/media/raw_client.py
+++ b/src/deepgram/listen/v1/media/raw_client.py
@@ -256,9 +256,9 @@ def transcribe_url(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -496,9 +496,9 @@ def transcribe_file(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -743,9 +743,9 @@ async def transcribe_url(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -983,9 +983,9 @@ async def transcribe_file(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/listen/v1/media/types/media_transcribe_request_summarize.py b/src/deepgram/listen/v1/media/types/media_transcribe_request_summarize.py
index f09d6785..cc44a279 100644
--- a/src/deepgram/listen/v1/media/types/media_transcribe_request_summarize.py
+++ b/src/deepgram/listen/v1/media/types/media_transcribe_request_summarize.py
@@ -2,4 +2,4 @@
import typing
-MediaTranscribeRequestSummarize = typing.Union[typing.Literal["v2", "v1"], typing.Any]
+MediaTranscribeRequestSummarize = typing.Union[typing.Literal["v2"], typing.Any]
diff --git a/src/deepgram/listen/v1/raw_client.py b/src/deepgram/listen/v1/raw_client.py
index 81311319..c6768c52 100644
--- a/src/deepgram/listen/v1/raw_client.py
+++ b/src/deepgram/listen/v1/raw_client.py
@@ -28,6 +28,7 @@ def connect(
callback: typing.Optional[str] = None,
callback_method: typing.Optional[str] = None,
channels: typing.Optional[str] = None,
+ detect_entities: typing.Optional[str] = None,
diarize: typing.Optional[str] = None,
dictation: typing.Optional[str] = None,
encoding: typing.Optional[str] = None,
@@ -66,6 +67,8 @@ def connect(
channels : typing.Optional[str]
+ detect_entities : typing.Optional[str]
+
diarize : typing.Optional[str]
dictation : typing.Optional[str]
@@ -135,6 +138,8 @@ def connect(
query_params = query_params.add("callback_method", callback_method)
if channels is not None:
query_params = query_params.add("channels", channels)
+ if detect_entities is not None:
+ query_params = query_params.add("detect_entities", detect_entities)
if diarize is not None:
query_params = query_params.add("diarize", diarize)
if dictation is not None:
@@ -218,6 +223,7 @@ async def connect(
callback: typing.Optional[str] = None,
callback_method: typing.Optional[str] = None,
channels: typing.Optional[str] = None,
+ detect_entities: typing.Optional[str] = None,
diarize: typing.Optional[str] = None,
dictation: typing.Optional[str] = None,
encoding: typing.Optional[str] = None,
@@ -256,6 +262,8 @@ async def connect(
channels : typing.Optional[str]
+ detect_entities : typing.Optional[str]
+
diarize : typing.Optional[str]
dictation : typing.Optional[str]
@@ -325,6 +333,8 @@ async def connect(
query_params = query_params.add("callback_method", callback_method)
if channels is not None:
query_params = query_params.add("channels", channels)
+ if detect_entities is not None:
+ query_params = query_params.add("detect_entities", detect_entities)
if diarize is not None:
query_params = query_params.add("diarize", diarize)
if dictation is not None:
diff --git a/src/deepgram/listen/v1/requests/__init__.py b/src/deepgram/listen/v1/requests/__init__.py
new file mode 100644
index 00000000..828c41d2
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/__init__.py
@@ -0,0 +1,76 @@
+# This file was auto-generated by Fern from our API Definition.
+
+# isort: skip_file
+
+import typing
+from importlib import import_module
+
+if typing.TYPE_CHECKING:
+ from .listen_v1close_stream import ListenV1CloseStreamParams
+ from .listen_v1finalize import ListenV1FinalizeParams
+ from .listen_v1keep_alive import ListenV1KeepAliveParams
+ from .listen_v1metadata import ListenV1MetadataParams
+ from .listen_v1results import ListenV1ResultsParams
+ from .listen_v1results_channel import ListenV1ResultsChannelParams
+ from .listen_v1results_channel_alternatives_item import ListenV1ResultsChannelAlternativesItemParams
+ from .listen_v1results_channel_alternatives_item_words_item import (
+ ListenV1ResultsChannelAlternativesItemWordsItemParams,
+ )
+ from .listen_v1results_entities_item import ListenV1ResultsEntitiesItemParams
+ from .listen_v1results_metadata import ListenV1ResultsMetadataParams
+ from .listen_v1results_metadata_model_info import ListenV1ResultsMetadataModelInfoParams
+ from .listen_v1speech_started import ListenV1SpeechStartedParams
+ from .listen_v1utterance_end import ListenV1UtteranceEndParams
+_dynamic_imports: typing.Dict[str, str] = {
+ "ListenV1CloseStreamParams": ".listen_v1close_stream",
+ "ListenV1FinalizeParams": ".listen_v1finalize",
+ "ListenV1KeepAliveParams": ".listen_v1keep_alive",
+ "ListenV1MetadataParams": ".listen_v1metadata",
+ "ListenV1ResultsChannelAlternativesItemParams": ".listen_v1results_channel_alternatives_item",
+ "ListenV1ResultsChannelAlternativesItemWordsItemParams": ".listen_v1results_channel_alternatives_item_words_item",
+ "ListenV1ResultsChannelParams": ".listen_v1results_channel",
+ "ListenV1ResultsEntitiesItemParams": ".listen_v1results_entities_item",
+ "ListenV1ResultsMetadataModelInfoParams": ".listen_v1results_metadata_model_info",
+ "ListenV1ResultsMetadataParams": ".listen_v1results_metadata",
+ "ListenV1ResultsParams": ".listen_v1results",
+ "ListenV1SpeechStartedParams": ".listen_v1speech_started",
+ "ListenV1UtteranceEndParams": ".listen_v1utterance_end",
+}
+
+
+def __getattr__(attr_name: str) -> typing.Any:
+ module_name = _dynamic_imports.get(attr_name)
+ if module_name is None:
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
+ try:
+ module = import_module(module_name, __package__)
+ if module_name == f".{attr_name}":
+ return module
+ else:
+ return getattr(module, attr_name)
+ except ImportError as e:
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
+ except AttributeError as e:
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
+
+
+def __dir__():
+ lazy_attrs = list(_dynamic_imports.keys())
+ return sorted(lazy_attrs)
+
+
+__all__ = [
+ "ListenV1CloseStreamParams",
+ "ListenV1FinalizeParams",
+ "ListenV1KeepAliveParams",
+ "ListenV1MetadataParams",
+ "ListenV1ResultsChannelAlternativesItemParams",
+ "ListenV1ResultsChannelAlternativesItemWordsItemParams",
+ "ListenV1ResultsChannelParams",
+ "ListenV1ResultsEntitiesItemParams",
+ "ListenV1ResultsMetadataModelInfoParams",
+ "ListenV1ResultsMetadataParams",
+ "ListenV1ResultsParams",
+ "ListenV1SpeechStartedParams",
+ "ListenV1UtteranceEndParams",
+]
diff --git a/src/deepgram/listen/v1/requests/listen_v1close_stream.py b/src/deepgram/listen/v1/requests/listen_v1close_stream.py
new file mode 100644
index 00000000..c75ad0e1
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1close_stream.py
@@ -0,0 +1,11 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.listen_v1close_stream_type import ListenV1CloseStreamType
+
+
+class ListenV1CloseStreamParams(typing_extensions.TypedDict):
+ type: ListenV1CloseStreamType
+ """
+ Message type identifier
+ """
diff --git a/src/deepgram/listen/v1/requests/listen_v1finalize.py b/src/deepgram/listen/v1/requests/listen_v1finalize.py
new file mode 100644
index 00000000..8dd6d16e
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1finalize.py
@@ -0,0 +1,11 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.listen_v1finalize_type import ListenV1FinalizeType
+
+
+class ListenV1FinalizeParams(typing_extensions.TypedDict):
+ type: ListenV1FinalizeType
+ """
+ Message type identifier
+ """
diff --git a/src/deepgram/listen/v1/requests/listen_v1keep_alive.py b/src/deepgram/listen/v1/requests/listen_v1keep_alive.py
new file mode 100644
index 00000000..b40242bd
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1keep_alive.py
@@ -0,0 +1,11 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.listen_v1keep_alive_type import ListenV1KeepAliveType
+
+
+class ListenV1KeepAliveParams(typing_extensions.TypedDict):
+ type: ListenV1KeepAliveType
+ """
+ Message type identifier
+ """
diff --git a/src/deepgram/listen/v1/requests/listen_v1metadata.py b/src/deepgram/listen/v1/requests/listen_v1metadata.py
new file mode 100644
index 00000000..2b648b48
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1metadata.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class ListenV1MetadataParams(typing_extensions.TypedDict):
+ type: typing.Literal["Metadata"]
+ """
+ Message type identifier
+ """
+
+ transaction_key: str
+ """
+ The transaction key
+ """
+
+ request_id: str
+ """
+ The request ID
+ """
+
+ sha256: str
+ """
+ The sha256
+ """
+
+ created: str
+ """
+ The created
+ """
+
+ duration: float
+ """
+ The duration
+ """
+
+ channels: float
+ """
+ The channels
+ """
diff --git a/src/deepgram/listen/v1/requests/listen_v1results.py b/src/deepgram/listen/v1/requests/listen_v1results.py
new file mode 100644
index 00000000..96e152f0
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1results.py
@@ -0,0 +1,52 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from .listen_v1results_channel import ListenV1ResultsChannelParams
+from .listen_v1results_entities_item import ListenV1ResultsEntitiesItemParams
+from .listen_v1results_metadata import ListenV1ResultsMetadataParams
+
+
+class ListenV1ResultsParams(typing_extensions.TypedDict):
+ type: typing.Literal["Results"]
+ """
+ Message type identifier
+ """
+
+ channel_index: typing.Sequence[float]
+ """
+ The index of the channel
+ """
+
+ duration: float
+ """
+ The duration of the transcription
+ """
+
+ start: float
+ """
+ The start time of the transcription
+ """
+
+ is_final: typing_extensions.NotRequired[bool]
+ """
+ Whether the transcription is final
+ """
+
+ speech_final: typing_extensions.NotRequired[bool]
+ """
+ Whether the transcription is speech final
+ """
+
+ channel: ListenV1ResultsChannelParams
+ metadata: ListenV1ResultsMetadataParams
+ from_finalize: typing_extensions.NotRequired[bool]
+ """
+ Whether the transcription is from a finalize message
+ """
+
+ entities: typing_extensions.NotRequired[typing.Sequence[ListenV1ResultsEntitiesItemParams]]
+ """
+ Extracted entities from the audio when detect_entities is enabled. Only present in is_final messages. Returns an empty array if no entities are detected
+ """
diff --git a/src/deepgram/listen/v1/requests/listen_v1results_channel.py b/src/deepgram/listen/v1/requests/listen_v1results_channel.py
new file mode 100644
index 00000000..f27e364d
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1results_channel.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from .listen_v1results_channel_alternatives_item import ListenV1ResultsChannelAlternativesItemParams
+
+
+class ListenV1ResultsChannelParams(typing_extensions.TypedDict):
+ alternatives: typing.Sequence[ListenV1ResultsChannelAlternativesItemParams]
diff --git a/src/deepgram/listen/v1/requests/listen_v1results_channel_alternatives_item.py b/src/deepgram/listen/v1/requests/listen_v1results_channel_alternatives_item.py
new file mode 100644
index 00000000..c603e8f5
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1results_channel_alternatives_item.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from .listen_v1results_channel_alternatives_item_words_item import ListenV1ResultsChannelAlternativesItemWordsItemParams
+
+
+class ListenV1ResultsChannelAlternativesItemParams(typing_extensions.TypedDict):
+ transcript: str
+ """
+ The transcript of the transcription
+ """
+
+ confidence: float
+ """
+ The confidence of the transcription
+ """
+
+ languages: typing_extensions.NotRequired[typing.Sequence[str]]
+ words: typing.Sequence[ListenV1ResultsChannelAlternativesItemWordsItemParams]
diff --git a/src/deepgram/listen/v1/requests/listen_v1results_channel_alternatives_item_words_item.py b/src/deepgram/listen/v1/requests/listen_v1results_channel_alternatives_item_words_item.py
new file mode 100644
index 00000000..c28d5979
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1results_channel_alternatives_item_words_item.py
@@ -0,0 +1,40 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class ListenV1ResultsChannelAlternativesItemWordsItemParams(typing_extensions.TypedDict):
+ word: str
+ """
+ The word of the transcription
+ """
+
+ start: float
+ """
+ The start time of the word
+ """
+
+ end: float
+ """
+ The end time of the word
+ """
+
+ confidence: float
+ """
+ The confidence of the word
+ """
+
+ language: typing_extensions.NotRequired[str]
+ """
+ The language of the word
+ """
+
+ punctuated_word: typing_extensions.NotRequired[str]
+ """
+ The punctuated word of the word
+ """
+
+ speaker: typing_extensions.NotRequired[float]
+ """
+ The speaker of the word
+ """
diff --git a/src/deepgram/listen/v1/requests/listen_v1results_entities_item.py b/src/deepgram/listen/v1/requests/listen_v1results_entities_item.py
new file mode 100644
index 00000000..6caf7899
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1results_entities_item.py
@@ -0,0 +1,35 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class ListenV1ResultsEntitiesItemParams(typing_extensions.TypedDict):
+ label: str
+ """
+ The type/category of the entity (e.g., NAME, PHONE_NUMBER, EMAIL_ADDRESS, ORGANIZATION, CARDINAL)
+ """
+
+ value: str
+ """
+ The formatted text representation of the entity
+ """
+
+ raw_value: str
+ """
+ The original spoken text of the entity (present when formatting is enabled)
+ """
+
+ confidence: float
+ """
+ The confidence score of the entity detection
+ """
+
+ start_word: int
+ """
+ The index of the first word of the entity in the transcript (inclusive)
+ """
+
+ end_word: int
+ """
+ The index of the last word of the entity in the transcript (exclusive)
+ """
diff --git a/src/deepgram/listen/v1/requests/listen_v1results_metadata.py b/src/deepgram/listen/v1/requests/listen_v1results_metadata.py
new file mode 100644
index 00000000..fb5037c8
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1results_metadata.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from .listen_v1results_metadata_model_info import ListenV1ResultsMetadataModelInfoParams
+
+
+class ListenV1ResultsMetadataParams(typing_extensions.TypedDict):
+ request_id: str
+ """
+ The request ID
+ """
+
+ model_info: ListenV1ResultsMetadataModelInfoParams
+ model_uuid: str
+ """
+ The model UUID
+ """
diff --git a/src/deepgram/listen/v1/requests/listen_v1results_metadata_model_info.py b/src/deepgram/listen/v1/requests/listen_v1results_metadata_model_info.py
new file mode 100644
index 00000000..f953fdce
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1results_metadata_model_info.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class ListenV1ResultsMetadataModelInfoParams(typing_extensions.TypedDict):
+ name: str
+ """
+ The name of the model
+ """
+
+ version: str
+ """
+ The version of the model
+ """
+
+ arch: str
+ """
+ The arch of the model
+ """
diff --git a/src/deepgram/listen/v1/requests/listen_v1speech_started.py b/src/deepgram/listen/v1/requests/listen_v1speech_started.py
new file mode 100644
index 00000000..1cc1dcfa
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1speech_started.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class ListenV1SpeechStartedParams(typing_extensions.TypedDict):
+ type: typing.Literal["SpeechStarted"]
+ """
+ Message type identifier
+ """
+
+ channel: typing.Sequence[float]
+ """
+ The channel
+ """
+
+ timestamp: float
+ """
+ The timestamp
+ """
diff --git a/src/deepgram/listen/v1/requests/listen_v1utterance_end.py b/src/deepgram/listen/v1/requests/listen_v1utterance_end.py
new file mode 100644
index 00000000..37ae57b8
--- /dev/null
+++ b/src/deepgram/listen/v1/requests/listen_v1utterance_end.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class ListenV1UtteranceEndParams(typing_extensions.TypedDict):
+ type: typing.Literal["UtteranceEnd"]
+ """
+ Message type identifier
+ """
+
+ channel: typing.Sequence[float]
+ """
+ The channel
+ """
+
+ last_word_end: float
+ """
+ The last word end
+ """
diff --git a/src/deepgram/listen/v1/types/__init__.py b/src/deepgram/listen/v1/types/__init__.py
new file mode 100644
index 00000000..30c6849a
--- /dev/null
+++ b/src/deepgram/listen/v1/types/__init__.py
@@ -0,0 +1,83 @@
+# This file was auto-generated by Fern from our API Definition.
+
+# isort: skip_file
+
+import typing
+from importlib import import_module
+
+if typing.TYPE_CHECKING:
+ from .listen_v1close_stream import ListenV1CloseStream
+ from .listen_v1close_stream_type import ListenV1CloseStreamType
+ from .listen_v1finalize import ListenV1Finalize
+ from .listen_v1finalize_type import ListenV1FinalizeType
+ from .listen_v1keep_alive import ListenV1KeepAlive
+ from .listen_v1keep_alive_type import ListenV1KeepAliveType
+ from .listen_v1metadata import ListenV1Metadata
+ from .listen_v1results import ListenV1Results
+ from .listen_v1results_channel import ListenV1ResultsChannel
+ from .listen_v1results_channel_alternatives_item import ListenV1ResultsChannelAlternativesItem
+ from .listen_v1results_channel_alternatives_item_words_item import ListenV1ResultsChannelAlternativesItemWordsItem
+ from .listen_v1results_entities_item import ListenV1ResultsEntitiesItem
+ from .listen_v1results_metadata import ListenV1ResultsMetadata
+ from .listen_v1results_metadata_model_info import ListenV1ResultsMetadataModelInfo
+ from .listen_v1speech_started import ListenV1SpeechStarted
+ from .listen_v1utterance_end import ListenV1UtteranceEnd
+_dynamic_imports: typing.Dict[str, str] = {
+ "ListenV1CloseStream": ".listen_v1close_stream",
+ "ListenV1CloseStreamType": ".listen_v1close_stream_type",
+ "ListenV1Finalize": ".listen_v1finalize",
+ "ListenV1FinalizeType": ".listen_v1finalize_type",
+ "ListenV1KeepAlive": ".listen_v1keep_alive",
+ "ListenV1KeepAliveType": ".listen_v1keep_alive_type",
+ "ListenV1Metadata": ".listen_v1metadata",
+ "ListenV1Results": ".listen_v1results",
+ "ListenV1ResultsChannel": ".listen_v1results_channel",
+ "ListenV1ResultsChannelAlternativesItem": ".listen_v1results_channel_alternatives_item",
+ "ListenV1ResultsChannelAlternativesItemWordsItem": ".listen_v1results_channel_alternatives_item_words_item",
+ "ListenV1ResultsEntitiesItem": ".listen_v1results_entities_item",
+ "ListenV1ResultsMetadata": ".listen_v1results_metadata",
+ "ListenV1ResultsMetadataModelInfo": ".listen_v1results_metadata_model_info",
+ "ListenV1SpeechStarted": ".listen_v1speech_started",
+ "ListenV1UtteranceEnd": ".listen_v1utterance_end",
+}
+
+
+def __getattr__(attr_name: str) -> typing.Any:
+ module_name = _dynamic_imports.get(attr_name)
+ if module_name is None:
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
+ try:
+ module = import_module(module_name, __package__)
+ if module_name == f".{attr_name}":
+ return module
+ else:
+ return getattr(module, attr_name)
+ except ImportError as e:
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
+ except AttributeError as e:
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
+
+
+def __dir__():
+ lazy_attrs = list(_dynamic_imports.keys())
+ return sorted(lazy_attrs)
+
+
+__all__ = [
+ "ListenV1CloseStream",
+ "ListenV1CloseStreamType",
+ "ListenV1Finalize",
+ "ListenV1FinalizeType",
+ "ListenV1KeepAlive",
+ "ListenV1KeepAliveType",
+ "ListenV1Metadata",
+ "ListenV1Results",
+ "ListenV1ResultsChannel",
+ "ListenV1ResultsChannelAlternativesItem",
+ "ListenV1ResultsChannelAlternativesItemWordsItem",
+ "ListenV1ResultsEntitiesItem",
+ "ListenV1ResultsMetadata",
+ "ListenV1ResultsMetadataModelInfo",
+ "ListenV1SpeechStarted",
+ "ListenV1UtteranceEnd",
+]
diff --git a/src/deepgram/listen/v1/types/listen_v1close_stream.py b/src/deepgram/listen/v1/types/listen_v1close_stream.py
new file mode 100644
index 00000000..6c11646f
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1close_stream.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .listen_v1close_stream_type import ListenV1CloseStreamType
+
+
+class ListenV1CloseStream(UniversalBaseModel):
+ type: ListenV1CloseStreamType = pydantic.Field()
+ """
+ Message type identifier
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1close_stream_type.py b/src/deepgram/listen/v1/types/listen_v1close_stream_type.py
new file mode 100644
index 00000000..e5332dfd
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1close_stream_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ListenV1CloseStreamType = typing.Union[typing.Literal["Finalize", "CloseStream", "KeepAlive"], typing.Any]
diff --git a/src/deepgram/listen/v1/types/listen_v1finalize.py b/src/deepgram/listen/v1/types/listen_v1finalize.py
new file mode 100644
index 00000000..ffd48baa
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1finalize.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .listen_v1finalize_type import ListenV1FinalizeType
+
+
+class ListenV1Finalize(UniversalBaseModel):
+ type: ListenV1FinalizeType = pydantic.Field()
+ """
+ Message type identifier
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1finalize_type.py b/src/deepgram/listen/v1/types/listen_v1finalize_type.py
new file mode 100644
index 00000000..c8e1de82
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1finalize_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ListenV1FinalizeType = typing.Union[typing.Literal["Finalize", "CloseStream", "KeepAlive"], typing.Any]
diff --git a/src/deepgram/listen/v1/types/listen_v1keep_alive.py b/src/deepgram/listen/v1/types/listen_v1keep_alive.py
new file mode 100644
index 00000000..96d3e67a
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1keep_alive.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .listen_v1keep_alive_type import ListenV1KeepAliveType
+
+
+class ListenV1KeepAlive(UniversalBaseModel):
+ type: ListenV1KeepAliveType = pydantic.Field()
+ """
+ Message type identifier
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1keep_alive_type.py b/src/deepgram/listen/v1/types/listen_v1keep_alive_type.py
new file mode 100644
index 00000000..36b22ae4
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1keep_alive_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ListenV1KeepAliveType = typing.Union[typing.Literal["Finalize", "CloseStream", "KeepAlive"], typing.Any]
diff --git a/src/deepgram/listen/v1/types/listen_v1metadata.py b/src/deepgram/listen/v1/types/listen_v1metadata.py
new file mode 100644
index 00000000..d86a5253
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1metadata.py
@@ -0,0 +1,52 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class ListenV1Metadata(UniversalBaseModel):
+ type: typing.Literal["Metadata"] = pydantic.Field(default="Metadata")
+ """
+ Message type identifier
+ """
+
+ transaction_key: str = pydantic.Field()
+ """
+ The transaction key
+ """
+
+ request_id: str = pydantic.Field()
+ """
+ The request ID
+ """
+
+ sha256: str = pydantic.Field()
+ """
+ The sha256
+ """
+
+ created: str = pydantic.Field()
+ """
+ The created
+ """
+
+ duration: float = pydantic.Field()
+ """
+ The duration
+ """
+
+ channels: float = pydantic.Field()
+ """
+ The channels
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1results.py b/src/deepgram/listen/v1/types/listen_v1results.py
new file mode 100644
index 00000000..9479f87d
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1results.py
@@ -0,0 +1,62 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .listen_v1results_channel import ListenV1ResultsChannel
+from .listen_v1results_entities_item import ListenV1ResultsEntitiesItem
+from .listen_v1results_metadata import ListenV1ResultsMetadata
+
+
+class ListenV1Results(UniversalBaseModel):
+ type: typing.Literal["Results"] = pydantic.Field(default="Results")
+ """
+ Message type identifier
+ """
+
+ channel_index: typing.List[float] = pydantic.Field()
+ """
+ The index of the channel
+ """
+
+ duration: float = pydantic.Field()
+ """
+ The duration of the transcription
+ """
+
+ start: float = pydantic.Field()
+ """
+ The start time of the transcription
+ """
+
+ is_final: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the transcription is final
+ """
+
+ speech_final: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the transcription is speech final
+ """
+
+ channel: ListenV1ResultsChannel
+ metadata: ListenV1ResultsMetadata
+ from_finalize: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the transcription is from a finalize message
+ """
+
+ entities: typing.Optional[typing.List[ListenV1ResultsEntitiesItem]] = pydantic.Field(default=None)
+ """
+ Extracted entities from the audio when detect_entities is enabled. Only present in is_final messages. Returns an empty array if no entities are detected
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1results_channel.py b/src/deepgram/listen/v1/types/listen_v1results_channel.py
new file mode 100644
index 00000000..ce58c5f3
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1results_channel.py
@@ -0,0 +1,20 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .listen_v1results_channel_alternatives_item import ListenV1ResultsChannelAlternativesItem
+
+
+class ListenV1ResultsChannel(UniversalBaseModel):
+ alternatives: typing.List[ListenV1ResultsChannelAlternativesItem]
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1results_channel_alternatives_item.py b/src/deepgram/listen/v1/types/listen_v1results_channel_alternatives_item.py
new file mode 100644
index 00000000..c8e0703c
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1results_channel_alternatives_item.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .listen_v1results_channel_alternatives_item_words_item import ListenV1ResultsChannelAlternativesItemWordsItem
+
+
+class ListenV1ResultsChannelAlternativesItem(UniversalBaseModel):
+ transcript: str = pydantic.Field()
+ """
+ The transcript of the transcription
+ """
+
+ confidence: float = pydantic.Field()
+ """
+ The confidence of the transcription
+ """
+
+ languages: typing.Optional[typing.List[str]] = None
+ words: typing.List[ListenV1ResultsChannelAlternativesItemWordsItem]
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1results_channel_alternatives_item_words_item.py b/src/deepgram/listen/v1/types/listen_v1results_channel_alternatives_item_words_item.py
new file mode 100644
index 00000000..2cc3686f
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1results_channel_alternatives_item_words_item.py
@@ -0,0 +1,52 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class ListenV1ResultsChannelAlternativesItemWordsItem(UniversalBaseModel):
+ word: str = pydantic.Field()
+ """
+ The word of the transcription
+ """
+
+ start: float = pydantic.Field()
+ """
+ The start time of the word
+ """
+
+ end: float = pydantic.Field()
+ """
+ The end time of the word
+ """
+
+ confidence: float = pydantic.Field()
+ """
+ The confidence of the word
+ """
+
+ language: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The language of the word
+ """
+
+ punctuated_word: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The punctuated word of the word
+ """
+
+ speaker: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ The speaker of the word
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1results_entities_item.py b/src/deepgram/listen/v1/types/listen_v1results_entities_item.py
new file mode 100644
index 00000000..f8d5fb3a
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1results_entities_item.py
@@ -0,0 +1,47 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class ListenV1ResultsEntitiesItem(UniversalBaseModel):
+ label: str = pydantic.Field()
+ """
+ The type/category of the entity (e.g., NAME, PHONE_NUMBER, EMAIL_ADDRESS, ORGANIZATION, CARDINAL)
+ """
+
+ value: str = pydantic.Field()
+ """
+ The formatted text representation of the entity
+ """
+
+ raw_value: str = pydantic.Field()
+ """
+ The original spoken text of the entity (present when formatting is enabled)
+ """
+
+ confidence: float = pydantic.Field()
+ """
+ The confidence score of the entity detection
+ """
+
+ start_word: int = pydantic.Field()
+ """
+ The index of the first word of the entity in the transcript (inclusive)
+ """
+
+ end_word: int = pydantic.Field()
+ """
+ The index of the last word of the entity in the transcript (exclusive)
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1results_metadata.py b/src/deepgram/listen/v1/types/listen_v1results_metadata.py
new file mode 100644
index 00000000..92518626
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1results_metadata.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .listen_v1results_metadata_model_info import ListenV1ResultsMetadataModelInfo
+
+
+class ListenV1ResultsMetadata(UniversalBaseModel):
+ request_id: str = pydantic.Field()
+ """
+ The request ID
+ """
+
+ model_info: ListenV1ResultsMetadataModelInfo
+ model_uuid: str = pydantic.Field()
+ """
+ The model UUID
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1results_metadata_model_info.py b/src/deepgram/listen/v1/types/listen_v1results_metadata_model_info.py
new file mode 100644
index 00000000..19e04fa8
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1results_metadata_model_info.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class ListenV1ResultsMetadataModelInfo(UniversalBaseModel):
+ name: str = pydantic.Field()
+ """
+ The name of the model
+ """
+
+ version: str = pydantic.Field()
+ """
+ The version of the model
+ """
+
+ arch: str = pydantic.Field()
+ """
+ The arch of the model
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1speech_started.py b/src/deepgram/listen/v1/types/listen_v1speech_started.py
new file mode 100644
index 00000000..ce42f749
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1speech_started.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class ListenV1SpeechStarted(UniversalBaseModel):
+ type: typing.Literal["SpeechStarted"] = pydantic.Field(default="SpeechStarted")
+ """
+ Message type identifier
+ """
+
+ channel: typing.List[float] = pydantic.Field()
+ """
+ The channel
+ """
+
+ timestamp: float = pydantic.Field()
+ """
+ The timestamp
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v1/types/listen_v1utterance_end.py b/src/deepgram/listen/v1/types/listen_v1utterance_end.py
new file mode 100644
index 00000000..39cb1100
--- /dev/null
+++ b/src/deepgram/listen/v1/types/listen_v1utterance_end.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class ListenV1UtteranceEnd(UniversalBaseModel):
+ type: typing.Literal["UtteranceEnd"] = pydantic.Field(default="UtteranceEnd")
+ """
+ Message type identifier
+ """
+
+ channel: typing.List[float] = pydantic.Field()
+ """
+ The channel
+ """
+
+ last_word_end: float = pydantic.Field()
+ """
+ The last word end
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v2/__init__.py b/src/deepgram/listen/v2/__init__.py
index 5cde0202..db7c724c 100644
--- a/src/deepgram/listen/v2/__init__.py
+++ b/src/deepgram/listen/v2/__init__.py
@@ -2,3 +2,74 @@
# isort: skip_file
+import typing
+from importlib import import_module
+
+if typing.TYPE_CHECKING:
+ from .types import (
+ ListenV2CloseStream,
+ ListenV2CloseStreamType,
+ ListenV2Connected,
+ ListenV2FatalError,
+ ListenV2TurnInfo,
+ ListenV2TurnInfoEvent,
+ ListenV2TurnInfoWordsItem,
+ )
+ from .requests import (
+ ListenV2CloseStreamParams,
+ ListenV2ConnectedParams,
+ ListenV2FatalErrorParams,
+ ListenV2TurnInfoParams,
+ ListenV2TurnInfoWordsItemParams,
+ )
+_dynamic_imports: typing.Dict[str, str] = {
+ "ListenV2CloseStream": ".types",
+ "ListenV2CloseStreamParams": ".requests",
+ "ListenV2CloseStreamType": ".types",
+ "ListenV2Connected": ".types",
+ "ListenV2ConnectedParams": ".requests",
+ "ListenV2FatalError": ".types",
+ "ListenV2FatalErrorParams": ".requests",
+ "ListenV2TurnInfo": ".types",
+ "ListenV2TurnInfoEvent": ".types",
+ "ListenV2TurnInfoParams": ".requests",
+ "ListenV2TurnInfoWordsItem": ".types",
+ "ListenV2TurnInfoWordsItemParams": ".requests",
+}
+
+
+def __getattr__(attr_name: str) -> typing.Any:
+ module_name = _dynamic_imports.get(attr_name)
+ if module_name is None:
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
+ try:
+ module = import_module(module_name, __package__)
+ if module_name == f".{attr_name}":
+ return module
+ else:
+ return getattr(module, attr_name)
+ except ImportError as e:
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
+ except AttributeError as e:
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
+
+
+def __dir__():
+ lazy_attrs = list(_dynamic_imports.keys())
+ return sorted(lazy_attrs)
+
+
+__all__ = [
+ "ListenV2CloseStream",
+ "ListenV2CloseStreamParams",
+ "ListenV2CloseStreamType",
+ "ListenV2Connected",
+ "ListenV2ConnectedParams",
+ "ListenV2FatalError",
+ "ListenV2FatalErrorParams",
+ "ListenV2TurnInfo",
+ "ListenV2TurnInfoEvent",
+ "ListenV2TurnInfoParams",
+ "ListenV2TurnInfoWordsItem",
+ "ListenV2TurnInfoWordsItemParams",
+]
diff --git a/src/deepgram/listen/v2/requests/__init__.py b/src/deepgram/listen/v2/requests/__init__.py
new file mode 100644
index 00000000..96ce5ece
--- /dev/null
+++ b/src/deepgram/listen/v2/requests/__init__.py
@@ -0,0 +1,50 @@
+# This file was auto-generated by Fern from our API Definition.
+
+# isort: skip_file
+
+import typing
+from importlib import import_module
+
+if typing.TYPE_CHECKING:
+ from .listen_v2close_stream import ListenV2CloseStreamParams
+ from .listen_v2connected import ListenV2ConnectedParams
+ from .listen_v2fatal_error import ListenV2FatalErrorParams
+ from .listen_v2turn_info import ListenV2TurnInfoParams
+ from .listen_v2turn_info_words_item import ListenV2TurnInfoWordsItemParams
+_dynamic_imports: typing.Dict[str, str] = {
+ "ListenV2CloseStreamParams": ".listen_v2close_stream",
+ "ListenV2ConnectedParams": ".listen_v2connected",
+ "ListenV2FatalErrorParams": ".listen_v2fatal_error",
+ "ListenV2TurnInfoParams": ".listen_v2turn_info",
+ "ListenV2TurnInfoWordsItemParams": ".listen_v2turn_info_words_item",
+}
+
+
+def __getattr__(attr_name: str) -> typing.Any:
+ module_name = _dynamic_imports.get(attr_name)
+ if module_name is None:
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
+ try:
+ module = import_module(module_name, __package__)
+ if module_name == f".{attr_name}":
+ return module
+ else:
+ return getattr(module, attr_name)
+ except ImportError as e:
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
+ except AttributeError as e:
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
+
+
+def __dir__():
+ lazy_attrs = list(_dynamic_imports.keys())
+ return sorted(lazy_attrs)
+
+
+__all__ = [
+ "ListenV2CloseStreamParams",
+ "ListenV2ConnectedParams",
+ "ListenV2FatalErrorParams",
+ "ListenV2TurnInfoParams",
+ "ListenV2TurnInfoWordsItemParams",
+]
diff --git a/src/deepgram/listen/v2/requests/listen_v2close_stream.py b/src/deepgram/listen/v2/requests/listen_v2close_stream.py
new file mode 100644
index 00000000..70e4f760
--- /dev/null
+++ b/src/deepgram/listen/v2/requests/listen_v2close_stream.py
@@ -0,0 +1,11 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.listen_v2close_stream_type import ListenV2CloseStreamType
+
+
+class ListenV2CloseStreamParams(typing_extensions.TypedDict):
+ type: ListenV2CloseStreamType
+ """
+ Message type identifier
+ """
diff --git a/src/deepgram/listen/v2/requests/listen_v2connected.py b/src/deepgram/listen/v2/requests/listen_v2connected.py
new file mode 100644
index 00000000..c931eec2
--- /dev/null
+++ b/src/deepgram/listen/v2/requests/listen_v2connected.py
@@ -0,0 +1,24 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class ListenV2ConnectedParams(typing_extensions.TypedDict):
+ type: typing.Literal["Connected"]
+ """
+ Message type identifier
+ """
+
+ request_id: str
+ """
+ The unique identifier of the request
+ """
+
+ sequence_id: float
+ """
+ Starts at `0` and increments for each message the server sends
+ to the client. This includes messages of other types, like
+ `TurnInfo` messages.
+ """
diff --git a/src/deepgram/listen/v2/requests/listen_v2fatal_error.py b/src/deepgram/listen/v2/requests/listen_v2fatal_error.py
new file mode 100644
index 00000000..05cb3041
--- /dev/null
+++ b/src/deepgram/listen/v2/requests/listen_v2fatal_error.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class ListenV2FatalErrorParams(typing_extensions.TypedDict):
+ type: typing.Literal["Error"]
+ """
+ Message type identifier
+ """
+
+ sequence_id: float
+ """
+ Starts at `0` and increments for each message the server sends
+ to the client. This includes messages of other types, like
+ `Connected` messages.
+ """
+
+ code: str
+ """
+ A string code describing the error, e.g. `INTERNAL_SERVER_ERROR`
+ """
+
+ description: str
+ """
+ Prose description of the error
+ """
diff --git a/src/deepgram/listen/v2/requests/listen_v2turn_info.py b/src/deepgram/listen/v2/requests/listen_v2turn_info.py
new file mode 100644
index 00000000..d1a15fec
--- /dev/null
+++ b/src/deepgram/listen/v2/requests/listen_v2turn_info.py
@@ -0,0 +1,65 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+from ..types.listen_v2turn_info_event import ListenV2TurnInfoEvent
+from .listen_v2turn_info_words_item import ListenV2TurnInfoWordsItemParams
+
+
+class ListenV2TurnInfoParams(typing_extensions.TypedDict):
+ """
+ Describes the current turn and latest state of the turn
+ """
+
+ type: typing.Literal["TurnInfo"]
+ request_id: str
+ """
+ The unique identifier of the request
+ """
+
+ sequence_id: float
+ """
+ Starts at `0` and increments for each message the server sends to the client. This includes messages of other types, like `Connected` messages.
+ """
+
+ event: ListenV2TurnInfoEvent
+ """
+ The type of event being reported.
+
+ - **Update** - Additional audio has been transcribed, but the turn state hasn't changed
+ - **StartOfTurn** - The user has begun speaking for the first time in the turn
+ - **EagerEndOfTurn** - The system has moderate confidence that the user has finished speaking for the turn. This is an opportunity to begin preparing an agent reply
+ - **TurnResumed** - The system detected that speech had ended and therefore sent an **EagerEndOfTurn** event, but speech is actually continuing for this turn
+ - **EndOfTurn** - The user has finished speaking for the turn
+ """
+
+ turn_index: float
+ """
+ The index of the current turn
+ """
+
+ audio_window_start: float
+ """
+ Start time in seconds of the audio range that was transcribed
+ """
+
+ audio_window_end: float
+ """
+ End time in seconds of the audio range that was transcribed
+ """
+
+ transcript: str
+ """
+ Text that was said over the course of the current turn
+ """
+
+ words: typing.Sequence[ListenV2TurnInfoWordsItemParams]
+ """
+ The words in the `transcript`
+ """
+
+ end_of_turn_confidence: float
+ """
+ Confidence that no more speech is coming in this turn
+ """
diff --git a/src/deepgram/listen/v2/requests/listen_v2turn_info_words_item.py b/src/deepgram/listen/v2/requests/listen_v2turn_info_words_item.py
new file mode 100644
index 00000000..397157f5
--- /dev/null
+++ b/src/deepgram/listen/v2/requests/listen_v2turn_info_words_item.py
@@ -0,0 +1,15 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class ListenV2TurnInfoWordsItemParams(typing_extensions.TypedDict):
+ word: str
+ """
+ The individual punctuated, properly-cased word from the transcript
+ """
+
+ confidence: float
+ """
+ Confidence that this word was transcribed correctly
+ """
diff --git a/src/deepgram/listen/v2/types/__init__.py b/src/deepgram/listen/v2/types/__init__.py
new file mode 100644
index 00000000..229417bf
--- /dev/null
+++ b/src/deepgram/listen/v2/types/__init__.py
@@ -0,0 +1,56 @@
+# This file was auto-generated by Fern from our API Definition.
+
+# isort: skip_file
+
+import typing
+from importlib import import_module
+
+if typing.TYPE_CHECKING:
+ from .listen_v2close_stream import ListenV2CloseStream
+ from .listen_v2close_stream_type import ListenV2CloseStreamType
+ from .listen_v2connected import ListenV2Connected
+ from .listen_v2fatal_error import ListenV2FatalError
+ from .listen_v2turn_info import ListenV2TurnInfo
+ from .listen_v2turn_info_event import ListenV2TurnInfoEvent
+ from .listen_v2turn_info_words_item import ListenV2TurnInfoWordsItem
+_dynamic_imports: typing.Dict[str, str] = {
+ "ListenV2CloseStream": ".listen_v2close_stream",
+ "ListenV2CloseStreamType": ".listen_v2close_stream_type",
+ "ListenV2Connected": ".listen_v2connected",
+ "ListenV2FatalError": ".listen_v2fatal_error",
+ "ListenV2TurnInfo": ".listen_v2turn_info",
+ "ListenV2TurnInfoEvent": ".listen_v2turn_info_event",
+ "ListenV2TurnInfoWordsItem": ".listen_v2turn_info_words_item",
+}
+
+
+def __getattr__(attr_name: str) -> typing.Any:
+ module_name = _dynamic_imports.get(attr_name)
+ if module_name is None:
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
+ try:
+ module = import_module(module_name, __package__)
+ if module_name == f".{attr_name}":
+ return module
+ else:
+ return getattr(module, attr_name)
+ except ImportError as e:
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
+ except AttributeError as e:
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
+
+
+def __dir__():
+ lazy_attrs = list(_dynamic_imports.keys())
+ return sorted(lazy_attrs)
+
+
+__all__ = [
+ "ListenV2CloseStream",
+ "ListenV2CloseStreamType",
+ "ListenV2Connected",
+ "ListenV2FatalError",
+ "ListenV2TurnInfo",
+ "ListenV2TurnInfoEvent",
+ "ListenV2TurnInfoWordsItem",
+]
diff --git a/src/deepgram/listen/v2/types/listen_v2close_stream.py b/src/deepgram/listen/v2/types/listen_v2close_stream.py
new file mode 100644
index 00000000..00376ced
--- /dev/null
+++ b/src/deepgram/listen/v2/types/listen_v2close_stream.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .listen_v2close_stream_type import ListenV2CloseStreamType
+
+
+class ListenV2CloseStream(UniversalBaseModel):
+ type: ListenV2CloseStreamType = pydantic.Field()
+ """
+ Message type identifier
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v2/types/listen_v2close_stream_type.py b/src/deepgram/listen/v2/types/listen_v2close_stream_type.py
new file mode 100644
index 00000000..2ac3484e
--- /dev/null
+++ b/src/deepgram/listen/v2/types/listen_v2close_stream_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ListenV2CloseStreamType = typing.Union[typing.Literal["Finalize", "CloseStream", "KeepAlive"], typing.Any]
diff --git a/src/deepgram/listen/v2/types/listen_v2connected.py b/src/deepgram/listen/v2/types/listen_v2connected.py
new file mode 100644
index 00000000..29108f24
--- /dev/null
+++ b/src/deepgram/listen/v2/types/listen_v2connected.py
@@ -0,0 +1,34 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class ListenV2Connected(UniversalBaseModel):
+ type: typing.Literal["Connected"] = pydantic.Field(default="Connected")
+ """
+ Message type identifier
+ """
+
+ request_id: str = pydantic.Field()
+ """
+ The unique identifier of the request
+ """
+
+ sequence_id: float = pydantic.Field()
+ """
+ Starts at `0` and increments for each message the server sends
+ to the client. This includes messages of other types, like
+ `TurnInfo` messages.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v2/types/listen_v2fatal_error.py b/src/deepgram/listen/v2/types/listen_v2fatal_error.py
new file mode 100644
index 00000000..1eccfabc
--- /dev/null
+++ b/src/deepgram/listen/v2/types/listen_v2fatal_error.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class ListenV2FatalError(UniversalBaseModel):
+ type: typing.Literal["Error"] = pydantic.Field(default="Error")
+ """
+ Message type identifier
+ """
+
+ sequence_id: float = pydantic.Field()
+ """
+ Starts at `0` and increments for each message the server sends
+ to the client. This includes messages of other types, like
+ `Connected` messages.
+ """
+
+ code: str = pydantic.Field()
+ """
+ A string code describing the error, e.g. `INTERNAL_SERVER_ERROR`
+ """
+
+ description: str = pydantic.Field()
+ """
+ Prose description of the error
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v2/types/listen_v2turn_info.py b/src/deepgram/listen/v2/types/listen_v2turn_info.py
new file mode 100644
index 00000000..80006b6b
--- /dev/null
+++ b/src/deepgram/listen/v2/types/listen_v2turn_info.py
@@ -0,0 +1,75 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .listen_v2turn_info_event import ListenV2TurnInfoEvent
+from .listen_v2turn_info_words_item import ListenV2TurnInfoWordsItem
+
+
+class ListenV2TurnInfo(UniversalBaseModel):
+ """
+ Describes the current turn and latest state of the turn
+ """
+
+ type: typing.Literal["TurnInfo"] = "TurnInfo"
+ request_id: str = pydantic.Field()
+ """
+ The unique identifier of the request
+ """
+
+ sequence_id: float = pydantic.Field()
+ """
+ Starts at `0` and increments for each message the server sends to the client. This includes messages of other types, like `Connected` messages.
+ """
+
+ event: ListenV2TurnInfoEvent = pydantic.Field()
+ """
+ The type of event being reported.
+
+ - **Update** - Additional audio has been transcribed, but the turn state hasn't changed
+ - **StartOfTurn** - The user has begun speaking for the first time in the turn
+ - **EagerEndOfTurn** - The system has moderate confidence that the user has finished speaking for the turn. This is an opportunity to begin preparing an agent reply
+ - **TurnResumed** - The system detected that speech had ended and therefore sent an **EagerEndOfTurn** event, but speech is actually continuing for this turn
+ - **EndOfTurn** - The user has finished speaking for the turn
+ """
+
+ turn_index: float = pydantic.Field()
+ """
+ The index of the current turn
+ """
+
+ audio_window_start: float = pydantic.Field()
+ """
+ Start time in seconds of the audio range that was transcribed
+ """
+
+ audio_window_end: float = pydantic.Field()
+ """
+ End time in seconds of the audio range that was transcribed
+ """
+
+ transcript: str = pydantic.Field()
+ """
+ Text that was said over the course of the current turn
+ """
+
+ words: typing.List[ListenV2TurnInfoWordsItem] = pydantic.Field()
+ """
+ The words in the `transcript`
+ """
+
+ end_of_turn_confidence: float = pydantic.Field()
+ """
+ Confidence that no more speech is coming in this turn
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/listen/v2/types/listen_v2turn_info_event.py b/src/deepgram/listen/v2/types/listen_v2turn_info_event.py
new file mode 100644
index 00000000..d2a0510f
--- /dev/null
+++ b/src/deepgram/listen/v2/types/listen_v2turn_info_event.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ListenV2TurnInfoEvent = typing.Union[
+ typing.Literal["Update", "StartOfTurn", "EagerEndOfTurn", "TurnResumed", "EndOfTurn"], typing.Any
+]
diff --git a/src/deepgram/listen/v2/types/listen_v2turn_info_words_item.py b/src/deepgram/listen/v2/types/listen_v2turn_info_words_item.py
new file mode 100644
index 00000000..58ae2f98
--- /dev/null
+++ b/src/deepgram/listen/v2/types/listen_v2turn_info_words_item.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class ListenV2TurnInfoWordsItem(UniversalBaseModel):
+ word: str = pydantic.Field()
+ """
+ The individual punctuated, properly-cased word from the transcript
+ """
+
+ confidence: float = pydantic.Field()
+ """
+ Confidence that this word was transcribed correctly
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/manage/v1/models/client.py b/src/deepgram/manage/v1/models/client.py
index bc16c9e1..9ae001ca 100644
--- a/src/deepgram/manage/v1/models/client.py
+++ b/src/deepgram/manage/v1/models/client.py
@@ -50,7 +50,9 @@ def list(
client = DeepgramClient(
api_key="YOUR_API_KEY",
)
- client.manage.v1.models.list()
+ client.manage.v1.models.list(
+ include_outdated=True,
+ )
"""
_response = self._raw_client.list(include_outdated=include_outdated, request_options=request_options)
return _response.data
@@ -133,7 +135,9 @@ async def list(
async def main() -> None:
- await client.manage.v1.models.list()
+ await client.manage.v1.models.list(
+ include_outdated=True,
+ )
asyncio.run(main())
diff --git a/src/deepgram/manage/v1/models/raw_client.py b/src/deepgram/manage/v1/models/raw_client.py
index d45d210d..f1300e7f 100644
--- a/src/deepgram/manage/v1/models/raw_client.py
+++ b/src/deepgram/manage/v1/models/raw_client.py
@@ -60,9 +60,9 @@ def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -111,9 +111,9 @@ def get(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -170,9 +170,9 @@ async def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -221,9 +221,9 @@ async def get(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/manage/v1/projects/billing/balances/raw_client.py b/src/deepgram/manage/v1/projects/billing/balances/raw_client.py
index 50850c3b..744135f4 100644
--- a/src/deepgram/manage/v1/projects/billing/balances/raw_client.py
+++ b/src/deepgram/manage/v1/projects/billing/balances/raw_client.py
@@ -57,9 +57,9 @@ def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -111,9 +111,9 @@ def get(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -167,9 +167,9 @@ async def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -221,9 +221,9 @@ async def get(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/manage/v1/projects/billing/breakdown/client.py b/src/deepgram/manage/v1/projects/billing/breakdown/client.py
index 5ccef016..34bfbcf0 100644
--- a/src/deepgram/manage/v1/projects/billing/breakdown/client.py
+++ b/src/deepgram/manage/v1/projects/billing/breakdown/client.py
@@ -86,7 +86,10 @@ def list(
)
client.manage.v1.projects.billing.breakdown.list(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
accessor="12345678-1234-1234-1234-123456789012",
+ deployment="hosted",
tag="tag1",
line_item="streaming::nova-3",
)
@@ -186,7 +189,10 @@ async def list(
async def main() -> None:
await client.manage.v1.projects.billing.breakdown.list(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
accessor="12345678-1234-1234-1234-123456789012",
+ deployment="hosted",
tag="tag1",
line_item="streaming::nova-3",
)
diff --git a/src/deepgram/manage/v1/projects/billing/breakdown/raw_client.py b/src/deepgram/manage/v1/projects/billing/breakdown/raw_client.py
index 2646fe70..c34e4eb9 100644
--- a/src/deepgram/manage/v1/projects/billing/breakdown/raw_client.py
+++ b/src/deepgram/manage/v1/projects/billing/breakdown/raw_client.py
@@ -100,9 +100,9 @@ def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -198,9 +198,9 @@ async def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/manage/v1/projects/billing/fields/client.py b/src/deepgram/manage/v1/projects/billing/fields/client.py
index 00682a0d..749103cc 100644
--- a/src/deepgram/manage/v1/projects/billing/fields/client.py
+++ b/src/deepgram/manage/v1/projects/billing/fields/client.py
@@ -62,6 +62,8 @@ def list(
)
client.manage.v1.projects.billing.fields.list(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
)
"""
_response = self._raw_client.list(project_id, start=start, end=end, request_options=request_options)
@@ -127,6 +129,8 @@ async def list(
async def main() -> None:
await client.manage.v1.projects.billing.fields.list(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
)
diff --git a/src/deepgram/manage/v1/projects/billing/fields/raw_client.py b/src/deepgram/manage/v1/projects/billing/fields/raw_client.py
index 6ed0b2b3..548efb07 100644
--- a/src/deepgram/manage/v1/projects/billing/fields/raw_client.py
+++ b/src/deepgram/manage/v1/projects/billing/fields/raw_client.py
@@ -71,9 +71,9 @@ def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -142,9 +142,9 @@ async def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/manage/v1/projects/billing/purchases/client.py b/src/deepgram/manage/v1/projects/billing/purchases/client.py
index 7bed75c3..7afe4f92 100644
--- a/src/deepgram/manage/v1/projects/billing/purchases/client.py
+++ b/src/deepgram/manage/v1/projects/billing/purchases/client.py
@@ -58,6 +58,7 @@ def list(
)
client.manage.v1.projects.billing.purchases.list(
project_id="123456-7890-1234-5678-901234",
+ limit=1.1,
)
"""
_response = self._raw_client.list(project_id, limit=limit, request_options=request_options)
@@ -119,6 +120,7 @@ async def list(
async def main() -> None:
await client.manage.v1.projects.billing.purchases.list(
project_id="123456-7890-1234-5678-901234",
+ limit=1.1,
)
diff --git a/src/deepgram/manage/v1/projects/billing/purchases/raw_client.py b/src/deepgram/manage/v1/projects/billing/purchases/raw_client.py
index e5cd7b16..7839ad56 100644
--- a/src/deepgram/manage/v1/projects/billing/purchases/raw_client.py
+++ b/src/deepgram/manage/v1/projects/billing/purchases/raw_client.py
@@ -66,9 +66,9 @@ def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -132,9 +132,9 @@ async def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/manage/v1/projects/client.py b/src/deepgram/manage/v1/projects/client.py
index 439f0220..33b0fc8c 100644
--- a/src/deepgram/manage/v1/projects/client.py
+++ b/src/deepgram/manage/v1/projects/client.py
@@ -111,6 +111,8 @@ def get(
)
client.manage.v1.projects.get(
project_id="123456-7890-1234-5678-901234",
+ limit=1.1,
+ page=1.1,
)
"""
_response = self._raw_client.get(project_id, limit=limit, page=page, request_options=request_options)
@@ -371,6 +373,8 @@ async def get(
async def main() -> None:
await client.manage.v1.projects.get(
project_id="123456-7890-1234-5678-901234",
+ limit=1.1,
+ page=1.1,
)
diff --git a/src/deepgram/manage/v1/projects/keys/client.py b/src/deepgram/manage/v1/projects/keys/client.py
index 88885f1e..698bf841 100644
--- a/src/deepgram/manage/v1/projects/keys/client.py
+++ b/src/deepgram/manage/v1/projects/keys/client.py
@@ -66,6 +66,7 @@ def list(
)
client.manage.v1.projects.keys.list(
project_id="123456-7890-1234-5678-901234",
+ status="active",
)
"""
_response = self._raw_client.list(project_id, status=status, request_options=request_options)
@@ -241,6 +242,7 @@ async def list(
async def main() -> None:
await client.manage.v1.projects.keys.list(
project_id="123456-7890-1234-5678-901234",
+ status="active",
)
diff --git a/src/deepgram/manage/v1/projects/keys/raw_client.py b/src/deepgram/manage/v1/projects/keys/raw_client.py
index df7164ab..435f70eb 100644
--- a/src/deepgram/manage/v1/projects/keys/raw_client.py
+++ b/src/deepgram/manage/v1/projects/keys/raw_client.py
@@ -74,9 +74,9 @@ def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -136,9 +136,9 @@ def create(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -190,9 +190,9 @@ def get(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -244,9 +244,9 @@ def delete(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -310,9 +310,9 @@ async def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -372,9 +372,9 @@ async def create(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -426,9 +426,9 @@ async def get(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -480,9 +480,9 @@ async def delete(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/manage/v1/projects/members/invites/raw_client.py b/src/deepgram/manage/v1/projects/members/invites/raw_client.py
index 73d10c43..d7dce374 100644
--- a/src/deepgram/manage/v1/projects/members/invites/raw_client.py
+++ b/src/deepgram/manage/v1/projects/members/invites/raw_client.py
@@ -61,9 +61,9 @@ def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -126,9 +126,9 @@ def create(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -180,9 +180,9 @@ def delete(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -236,9 +236,9 @@ async def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -301,9 +301,9 @@ async def create(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -355,9 +355,9 @@ async def delete(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/manage/v1/projects/members/raw_client.py b/src/deepgram/manage/v1/projects/members/raw_client.py
index 7b9982f4..fa671c6b 100644
--- a/src/deepgram/manage/v1/projects/members/raw_client.py
+++ b/src/deepgram/manage/v1/projects/members/raw_client.py
@@ -57,9 +57,9 @@ def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -111,9 +111,9 @@ def delete(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -167,9 +167,9 @@ async def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -221,9 +221,9 @@ async def delete(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/manage/v1/projects/members/scopes/raw_client.py b/src/deepgram/manage/v1/projects/members/scopes/raw_client.py
index ecf3a872..2cbfe030 100644
--- a/src/deepgram/manage/v1/projects/members/scopes/raw_client.py
+++ b/src/deepgram/manage/v1/projects/members/scopes/raw_client.py
@@ -63,9 +63,9 @@ def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -127,9 +127,9 @@ def update(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -186,9 +186,9 @@ async def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -250,9 +250,9 @@ async def update(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/manage/v1/projects/models/client.py b/src/deepgram/manage/v1/projects/models/client.py
index 6ff8474f..9f78ba4d 100644
--- a/src/deepgram/manage/v1/projects/models/client.py
+++ b/src/deepgram/manage/v1/projects/models/client.py
@@ -59,6 +59,7 @@ def list(
)
client.manage.v1.projects.models.list(
project_id="123456-7890-1234-5678-901234",
+ include_outdated=True,
)
"""
_response = self._raw_client.list(
@@ -159,6 +160,7 @@ async def list(
async def main() -> None:
await client.manage.v1.projects.models.list(
project_id="123456-7890-1234-5678-901234",
+ include_outdated=True,
)
diff --git a/src/deepgram/manage/v1/projects/models/raw_client.py b/src/deepgram/manage/v1/projects/models/raw_client.py
index 15313ceb..518870bf 100644
--- a/src/deepgram/manage/v1/projects/models/raw_client.py
+++ b/src/deepgram/manage/v1/projects/models/raw_client.py
@@ -67,9 +67,9 @@ def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -121,9 +121,9 @@ def get(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -187,9 +187,9 @@ async def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -241,9 +241,9 @@ async def get(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/manage/v1/projects/raw_client.py b/src/deepgram/manage/v1/projects/raw_client.py
index 935c3d4a..bdcc7fbd 100644
--- a/src/deepgram/manage/v1/projects/raw_client.py
+++ b/src/deepgram/manage/v1/projects/raw_client.py
@@ -58,9 +58,9 @@ def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> Ht
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -124,9 +124,9 @@ def get(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -175,9 +175,9 @@ def delete(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -240,9 +240,9 @@ def update(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -291,9 +291,9 @@ def leave(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -344,9 +344,9 @@ async def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -410,9 +410,9 @@ async def get(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -461,9 +461,9 @@ async def delete(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -526,9 +526,9 @@ async def update(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -577,9 +577,9 @@ async def leave(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/manage/v1/projects/requests/client.py b/src/deepgram/manage/v1/projects/requests/client.py
index a8e0246d..6d1435aa 100644
--- a/src/deepgram/manage/v1/projects/requests/client.py
+++ b/src/deepgram/manage/v1/projects/requests/client.py
@@ -93,6 +93,8 @@ def list(
Examples
--------
+ import datetime
+
from deepgram import DeepgramClient
client = DeepgramClient(
@@ -100,8 +102,20 @@ def list(
)
client.manage.v1.projects.requests.list(
project_id="123456-7890-1234-5678-901234",
+ start=datetime.datetime.fromisoformat(
+ "2024-01-15 09:30:00+00:00",
+ ),
+ end=datetime.datetime.fromisoformat(
+ "2024-01-15 09:30:00+00:00",
+ ),
+ limit=1.1,
+ page=1.1,
accessor="12345678-1234-1234-1234-123456789012",
request_id="12345678-1234-1234-1234-123456789012",
+ deployment="hosted",
+ endpoint="listen",
+ method="sync",
+ status="succeeded",
)
"""
_response = self._raw_client.list(
@@ -238,6 +252,7 @@ async def list(
Examples
--------
import asyncio
+ import datetime
from deepgram import AsyncDeepgramClient
@@ -249,8 +264,20 @@ async def list(
async def main() -> None:
await client.manage.v1.projects.requests.list(
project_id="123456-7890-1234-5678-901234",
+ start=datetime.datetime.fromisoformat(
+ "2024-01-15 09:30:00+00:00",
+ ),
+ end=datetime.datetime.fromisoformat(
+ "2024-01-15 09:30:00+00:00",
+ ),
+ limit=1.1,
+ page=1.1,
accessor="12345678-1234-1234-1234-123456789012",
request_id="12345678-1234-1234-1234-123456789012",
+ deployment="hosted",
+ endpoint="listen",
+ method="sync",
+ status="succeeded",
)
diff --git a/src/deepgram/manage/v1/projects/requests/raw_client.py b/src/deepgram/manage/v1/projects/requests/raw_client.py
index d1b335c6..e71b5f56 100644
--- a/src/deepgram/manage/v1/projects/requests/raw_client.py
+++ b/src/deepgram/manage/v1/projects/requests/raw_client.py
@@ -118,9 +118,9 @@ def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -172,9 +172,9 @@ def get(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -283,9 +283,9 @@ async def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -337,9 +337,9 @@ async def get(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/manage/v1/projects/usage/breakdown/client.py b/src/deepgram/manage/v1/projects/usage/breakdown/client.py
index 57532bbd..1f0822cb 100644
--- a/src/deepgram/manage/v1/projects/usage/breakdown/client.py
+++ b/src/deepgram/manage/v1/projects/usage/breakdown/client.py
@@ -238,10 +238,51 @@ def get(
)
client.manage.v1.projects.usage.breakdown.get(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
+ grouping="accessor",
accessor="12345678-1234-1234-1234-123456789012",
+ alternatives=True,
+ callback_method=True,
+ callback=True,
+ channels=True,
+ custom_intent_mode=True,
+ custom_intent=True,
+ custom_topic_mode=True,
+ custom_topic=True,
+ deployment="hosted",
+ detect_entities=True,
+ detect_language=True,
+ diarize=True,
+ dictation=True,
+ encoding=True,
+ endpoint="listen",
+ extra=True,
+ filler_words=True,
+ intents=True,
+ keyterm=True,
+ keywords=True,
+ language=True,
+ measurements=True,
+ method="sync",
model="6f548761-c9c0-429a-9315-11a1d28499c8",
+ multichannel=True,
+ numerals=True,
+ paragraphs=True,
+ profanity_filter=True,
+ punctuate=True,
+ redact=True,
+ replace=True,
sample_rate=True,
+ search=True,
+ sentiment=True,
+ smart_format=True,
+ summarize=True,
tag="tag1",
+ topics=True,
+ utt_split=True,
+ utterances=True,
+ version=True,
)
"""
_response = self._raw_client.get(
@@ -527,10 +568,51 @@ async def get(
async def main() -> None:
await client.manage.v1.projects.usage.breakdown.get(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
+ grouping="accessor",
accessor="12345678-1234-1234-1234-123456789012",
+ alternatives=True,
+ callback_method=True,
+ callback=True,
+ channels=True,
+ custom_intent_mode=True,
+ custom_intent=True,
+ custom_topic_mode=True,
+ custom_topic=True,
+ deployment="hosted",
+ detect_entities=True,
+ detect_language=True,
+ diarize=True,
+ dictation=True,
+ encoding=True,
+ endpoint="listen",
+ extra=True,
+ filler_words=True,
+ intents=True,
+ keyterm=True,
+ keywords=True,
+ language=True,
+ measurements=True,
+ method="sync",
model="6f548761-c9c0-429a-9315-11a1d28499c8",
+ multichannel=True,
+ numerals=True,
+ paragraphs=True,
+ profanity_filter=True,
+ punctuate=True,
+ redact=True,
+ replace=True,
sample_rate=True,
+ search=True,
+ sentiment=True,
+ smart_format=True,
+ summarize=True,
tag="tag1",
+ topics=True,
+ utt_split=True,
+ utterances=True,
+ version=True,
)
diff --git a/src/deepgram/manage/v1/projects/usage/breakdown/raw_client.py b/src/deepgram/manage/v1/projects/usage/breakdown/raw_client.py
index 93cfb095..13e4a1b5 100644
--- a/src/deepgram/manage/v1/projects/usage/breakdown/raw_client.py
+++ b/src/deepgram/manage/v1/projects/usage/breakdown/raw_client.py
@@ -290,9 +290,9 @@ def get(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -576,9 +576,9 @@ async def get(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/manage/v1/projects/usage/client.py b/src/deepgram/manage/v1/projects/usage/client.py
index d2c1c7e0..6ffad7f2 100644
--- a/src/deepgram/manage/v1/projects/usage/client.py
+++ b/src/deepgram/manage/v1/projects/usage/client.py
@@ -242,10 +242,50 @@ def get(
)
client.manage.v1.projects.usage.get(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
accessor="12345678-1234-1234-1234-123456789012",
+ alternatives=True,
+ callback_method=True,
+ callback=True,
+ channels=True,
+ custom_intent_mode=True,
+ custom_intent=True,
+ custom_topic_mode=True,
+ custom_topic=True,
+ deployment="hosted",
+ detect_entities=True,
+ detect_language=True,
+ diarize=True,
+ dictation=True,
+ encoding=True,
+ endpoint="listen",
+ extra=True,
+ filler_words=True,
+ intents=True,
+ keyterm=True,
+ keywords=True,
+ language=True,
+ measurements=True,
+ method="sync",
model="6f548761-c9c0-429a-9315-11a1d28499c8",
+ multichannel=True,
+ numerals=True,
+ paragraphs=True,
+ profanity_filter=True,
+ punctuate=True,
+ redact=True,
+ replace=True,
sample_rate=True,
+ search=True,
+ sentiment=True,
+ smart_format=True,
+ summarize=True,
tag="tag1",
+ topics=True,
+ utt_split=True,
+ utterances=True,
+ version=True,
)
"""
_response = self._raw_client.get(
@@ -545,10 +585,50 @@ async def get(
async def main() -> None:
await client.manage.v1.projects.usage.get(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
accessor="12345678-1234-1234-1234-123456789012",
+ alternatives=True,
+ callback_method=True,
+ callback=True,
+ channels=True,
+ custom_intent_mode=True,
+ custom_intent=True,
+ custom_topic_mode=True,
+ custom_topic=True,
+ deployment="hosted",
+ detect_entities=True,
+ detect_language=True,
+ diarize=True,
+ dictation=True,
+ encoding=True,
+ endpoint="listen",
+ extra=True,
+ filler_words=True,
+ intents=True,
+ keyterm=True,
+ keywords=True,
+ language=True,
+ measurements=True,
+ method="sync",
model="6f548761-c9c0-429a-9315-11a1d28499c8",
+ multichannel=True,
+ numerals=True,
+ paragraphs=True,
+ profanity_filter=True,
+ punctuate=True,
+ redact=True,
+ replace=True,
sample_rate=True,
+ search=True,
+ sentiment=True,
+ smart_format=True,
+ summarize=True,
tag="tag1",
+ topics=True,
+ utt_split=True,
+ utterances=True,
+ version=True,
)
diff --git a/src/deepgram/manage/v1/projects/usage/fields/client.py b/src/deepgram/manage/v1/projects/usage/fields/client.py
index d810f50f..c1e144ab 100644
--- a/src/deepgram/manage/v1/projects/usage/fields/client.py
+++ b/src/deepgram/manage/v1/projects/usage/fields/client.py
@@ -62,6 +62,8 @@ def list(
)
client.manage.v1.projects.usage.fields.list(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
)
"""
_response = self._raw_client.list(project_id, start=start, end=end, request_options=request_options)
@@ -127,6 +129,8 @@ async def list(
async def main() -> None:
await client.manage.v1.projects.usage.fields.list(
project_id="123456-7890-1234-5678-901234",
+ start="start",
+ end="end",
)
diff --git a/src/deepgram/manage/v1/projects/usage/fields/raw_client.py b/src/deepgram/manage/v1/projects/usage/fields/raw_client.py
index ad0b4131..e80b6fa0 100644
--- a/src/deepgram/manage/v1/projects/usage/fields/raw_client.py
+++ b/src/deepgram/manage/v1/projects/usage/fields/raw_client.py
@@ -71,9 +71,9 @@ def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -142,9 +142,9 @@ async def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/manage/v1/projects/usage/raw_client.py b/src/deepgram/manage/v1/projects/usage/raw_client.py
index dc2e88ef..15077e42 100644
--- a/src/deepgram/manage/v1/projects/usage/raw_client.py
+++ b/src/deepgram/manage/v1/projects/usage/raw_client.py
@@ -284,9 +284,9 @@ def get(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -565,9 +565,9 @@ async def get(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/read/v1/text/client.py b/src/deepgram/read/v1/text/client.py
index b906e76a..d04b8a7c 100644
--- a/src/deepgram/read/v1/text/client.py
+++ b/src/deepgram/read/v1/text/client.py
@@ -108,6 +108,18 @@ def analyze(
api_key="YOUR_API_KEY",
)
client.read.v1.text.analyze(
+ callback="callback",
+ callback_method="POST",
+ sentiment=True,
+ summarize="v2",
+ tag="tag",
+ topics=True,
+ custom_topic="custom_topic",
+ custom_topic_mode="extended",
+ intents=True,
+ custom_intent="custom_intent",
+ custom_intent_mode="extended",
+ language="language",
request={"url": "url"},
)
"""
@@ -227,6 +239,18 @@ async def analyze(
async def main() -> None:
await client.read.v1.text.analyze(
+ callback="callback",
+ callback_method="POST",
+ sentiment=True,
+ summarize="v2",
+ tag="tag",
+ topics=True,
+ custom_topic="custom_topic",
+ custom_topic_mode="extended",
+ intents=True,
+ custom_intent="custom_intent",
+ custom_intent_mode="extended",
+ language="language",
request={"url": "url"},
)
diff --git a/src/deepgram/read/v1/text/raw_client.py b/src/deepgram/read/v1/text/raw_client.py
index 349a797d..cdfc5f32 100644
--- a/src/deepgram/read/v1/text/raw_client.py
+++ b/src/deepgram/read/v1/text/raw_client.py
@@ -135,9 +135,9 @@ def analyze(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -262,9 +262,9 @@ async def analyze(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/read/v1/text/types/text_analyze_request_summarize.py b/src/deepgram/read/v1/text/types/text_analyze_request_summarize.py
index 96cbdc28..74aa7e3b 100644
--- a/src/deepgram/read/v1/text/types/text_analyze_request_summarize.py
+++ b/src/deepgram/read/v1/text/types/text_analyze_request_summarize.py
@@ -2,4 +2,4 @@
import typing
-TextAnalyzeRequestSummarize = typing.Union[typing.Literal["v2", "v1"], typing.Any]
+TextAnalyzeRequestSummarize = typing.Union[typing.Literal["v2"], typing.Any]
diff --git a/src/deepgram/requests/__init__.py b/src/deepgram/requests/__init__.py
index 42b13057..0a34f380 100644
--- a/src/deepgram/requests/__init__.py
+++ b/src/deepgram/requests/__init__.py
@@ -97,6 +97,9 @@
from .listen_v1response_results_channels_item_alternatives_item import (
ListenV1ResponseResultsChannelsItemAlternativesItemParams,
)
+ from .listen_v1response_results_channels_item_alternatives_item_entities_item import (
+ ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItemParams,
+ )
from .listen_v1response_results_channels_item_alternatives_item_paragraphs import (
ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParams,
)
@@ -240,6 +243,7 @@
"ListenV1ResponseMetadataSummaryInfoParams": ".listen_v1response_metadata_summary_info",
"ListenV1ResponseMetadataTopicsInfoParams": ".listen_v1response_metadata_topics_info",
"ListenV1ResponseParams": ".listen_v1response",
+ "ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItemParams": ".listen_v1response_results_channels_item_alternatives_item_entities_item",
"ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItemParams": ".listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item",
"ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItemSentencesItemParams": ".listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item_sentences_item",
"ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParams": ".listen_v1response_results_channels_item_alternatives_item_paragraphs",
@@ -391,6 +395,7 @@ def __dir__():
"ListenV1ResponseMetadataSummaryInfoParams",
"ListenV1ResponseMetadataTopicsInfoParams",
"ListenV1ResponseParams",
+ "ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItemParams",
"ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItemParams",
"ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItemSentencesItemParams",
"ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParams",
diff --git a/src/deepgram/requests/listen_v1response_metadata.py b/src/deepgram/requests/listen_v1response_metadata.py
index cb987643..7a1671ac 100644
--- a/src/deepgram/requests/listen_v1response_metadata.py
+++ b/src/deepgram/requests/listen_v1response_metadata.py
@@ -18,7 +18,7 @@ class ListenV1ResponseMetadataParams(typing_extensions.TypedDict):
duration: float
channels: float
models: typing.Sequence[str]
- model_info: typing.Dict[str, typing.Optional[typing.Any]]
+ model_info: typing.Dict[str, typing.Any]
summary_info: typing_extensions.NotRequired[ListenV1ResponseMetadataSummaryInfoParams]
sentiment_info: typing_extensions.NotRequired[ListenV1ResponseMetadataSentimentInfoParams]
topics_info: typing_extensions.NotRequired[ListenV1ResponseMetadataTopicsInfoParams]
diff --git a/src/deepgram/requests/listen_v1response_results_channels_item_alternatives_item.py b/src/deepgram/requests/listen_v1response_results_channels_item_alternatives_item.py
index 915381e8..f5c10f75 100644
--- a/src/deepgram/requests/listen_v1response_results_channels_item_alternatives_item.py
+++ b/src/deepgram/requests/listen_v1response_results_channels_item_alternatives_item.py
@@ -3,6 +3,9 @@
import typing
import typing_extensions
+from .listen_v1response_results_channels_item_alternatives_item_entities_item import (
+ ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItemParams,
+)
from .listen_v1response_results_channels_item_alternatives_item_paragraphs import (
ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParams,
)
@@ -24,6 +27,9 @@ class ListenV1ResponseResultsChannelsItemAlternativesItemParams(typing_extension
typing.Sequence[ListenV1ResponseResultsChannelsItemAlternativesItemWordsItemParams]
]
paragraphs: typing_extensions.NotRequired[ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParams]
+ entities: typing_extensions.NotRequired[
+ typing.Sequence[ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItemParams]
+ ]
summaries: typing_extensions.NotRequired[
typing.Sequence[ListenV1ResponseResultsChannelsItemAlternativesItemSummariesItemParams]
]
diff --git a/src/deepgram/requests/listen_v1response_results_channels_item_alternatives_item_entities_item.py b/src/deepgram/requests/listen_v1response_results_channels_item_alternatives_item_entities_item.py
new file mode 100644
index 00000000..ef253971
--- /dev/null
+++ b/src/deepgram/requests/listen_v1response_results_channels_item_alternatives_item_entities_item.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItemParams(typing_extensions.TypedDict):
+ label: typing_extensions.NotRequired[str]
+ value: typing_extensions.NotRequired[str]
+ raw_value: typing_extensions.NotRequired[str]
+ confidence: typing_extensions.NotRequired[float]
+ start_word: typing_extensions.NotRequired[float]
+ end_word: typing_extensions.NotRequired[float]
diff --git a/src/deepgram/requests/project_request_response.py b/src/deepgram/requests/project_request_response.py
index fef8ae38..ab64684d 100644
--- a/src/deepgram/requests/project_request_response.py
+++ b/src/deepgram/requests/project_request_response.py
@@ -36,7 +36,7 @@ class ProjectRequestResponseParams(typing_extensions.TypedDict):
The unique identifier of the API key
"""
- response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ response: typing_extensions.NotRequired[typing.Dict[str, typing.Any]]
"""
The response of the request
"""
diff --git a/src/deepgram/self_hosted/v1/distribution_credentials/raw_client.py b/src/deepgram/self_hosted/v1/distribution_credentials/raw_client.py
index 8b656d70..57b43ff8 100644
--- a/src/deepgram/self_hosted/v1/distribution_credentials/raw_client.py
+++ b/src/deepgram/self_hosted/v1/distribution_credentials/raw_client.py
@@ -62,9 +62,9 @@ def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -144,9 +144,9 @@ def create(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -202,9 +202,9 @@ def get(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -260,9 +260,9 @@ def delete(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -316,9 +316,9 @@ async def list(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -398,9 +398,9 @@ async def create(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -456,9 +456,9 @@ async def get(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -514,9 +514,9 @@ async def delete(
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/speak/__init__.py b/src/deepgram/speak/__init__.py
index 148ad154..73eda24c 100644
--- a/src/deepgram/speak/__init__.py
+++ b/src/deepgram/speak/__init__.py
@@ -7,7 +7,53 @@
if typing.TYPE_CHECKING:
from . import v1
-_dynamic_imports: typing.Dict[str, str] = {"v1": ".v1"}
+ from .v1 import (
+ SpeakV1Clear,
+ SpeakV1ClearParams,
+ SpeakV1ClearType,
+ SpeakV1Cleared,
+ SpeakV1ClearedParams,
+ SpeakV1ClearedType,
+ SpeakV1Close,
+ SpeakV1CloseParams,
+ SpeakV1CloseType,
+ SpeakV1Flush,
+ SpeakV1FlushParams,
+ SpeakV1FlushType,
+ SpeakV1Flushed,
+ SpeakV1FlushedParams,
+ SpeakV1FlushedType,
+ SpeakV1Metadata,
+ SpeakV1MetadataParams,
+ SpeakV1Text,
+ SpeakV1TextParams,
+ SpeakV1Warning,
+ SpeakV1WarningParams,
+ )
+_dynamic_imports: typing.Dict[str, str] = {
+ "SpeakV1Clear": ".v1",
+ "SpeakV1ClearParams": ".v1",
+ "SpeakV1ClearType": ".v1",
+ "SpeakV1Cleared": ".v1",
+ "SpeakV1ClearedParams": ".v1",
+ "SpeakV1ClearedType": ".v1",
+ "SpeakV1Close": ".v1",
+ "SpeakV1CloseParams": ".v1",
+ "SpeakV1CloseType": ".v1",
+ "SpeakV1Flush": ".v1",
+ "SpeakV1FlushParams": ".v1",
+ "SpeakV1FlushType": ".v1",
+ "SpeakV1Flushed": ".v1",
+ "SpeakV1FlushedParams": ".v1",
+ "SpeakV1FlushedType": ".v1",
+ "SpeakV1Metadata": ".v1",
+ "SpeakV1MetadataParams": ".v1",
+ "SpeakV1Text": ".v1",
+ "SpeakV1TextParams": ".v1",
+ "SpeakV1Warning": ".v1",
+ "SpeakV1WarningParams": ".v1",
+ "v1": ".v1",
+}
def __getattr__(attr_name: str) -> typing.Any:
@@ -31,4 +77,27 @@ def __dir__():
return sorted(lazy_attrs)
-__all__ = ["v1"]
+__all__ = [
+ "SpeakV1Clear",
+ "SpeakV1ClearParams",
+ "SpeakV1ClearType",
+ "SpeakV1Cleared",
+ "SpeakV1ClearedParams",
+ "SpeakV1ClearedType",
+ "SpeakV1Close",
+ "SpeakV1CloseParams",
+ "SpeakV1CloseType",
+ "SpeakV1Flush",
+ "SpeakV1FlushParams",
+ "SpeakV1FlushType",
+ "SpeakV1Flushed",
+ "SpeakV1FlushedParams",
+ "SpeakV1FlushedType",
+ "SpeakV1Metadata",
+ "SpeakV1MetadataParams",
+ "SpeakV1Text",
+ "SpeakV1TextParams",
+ "SpeakV1Warning",
+ "SpeakV1WarningParams",
+ "v1",
+]
diff --git a/src/deepgram/speak/v1/__init__.py b/src/deepgram/speak/v1/__init__.py
index 40fff37f..c874ef09 100644
--- a/src/deepgram/speak/v1/__init__.py
+++ b/src/deepgram/speak/v1/__init__.py
@@ -6,6 +6,21 @@
from importlib import import_module
if typing.TYPE_CHECKING:
+ from .types import (
+ SpeakV1Clear,
+ SpeakV1ClearType,
+ SpeakV1Cleared,
+ SpeakV1ClearedType,
+ SpeakV1Close,
+ SpeakV1CloseType,
+ SpeakV1Flush,
+ SpeakV1FlushType,
+ SpeakV1Flushed,
+ SpeakV1FlushedType,
+ SpeakV1Metadata,
+ SpeakV1Text,
+ SpeakV1Warning,
+ )
from . import audio
from .audio import (
AudioGenerateRequestCallbackMethod,
@@ -13,11 +28,42 @@
AudioGenerateRequestEncoding,
AudioGenerateRequestModel,
)
+ from .requests import (
+ SpeakV1ClearParams,
+ SpeakV1ClearedParams,
+ SpeakV1CloseParams,
+ SpeakV1FlushParams,
+ SpeakV1FlushedParams,
+ SpeakV1MetadataParams,
+ SpeakV1TextParams,
+ SpeakV1WarningParams,
+ )
_dynamic_imports: typing.Dict[str, str] = {
"AudioGenerateRequestCallbackMethod": ".audio",
"AudioGenerateRequestContainer": ".audio",
"AudioGenerateRequestEncoding": ".audio",
"AudioGenerateRequestModel": ".audio",
+ "SpeakV1Clear": ".types",
+ "SpeakV1ClearParams": ".requests",
+ "SpeakV1ClearType": ".types",
+ "SpeakV1Cleared": ".types",
+ "SpeakV1ClearedParams": ".requests",
+ "SpeakV1ClearedType": ".types",
+ "SpeakV1Close": ".types",
+ "SpeakV1CloseParams": ".requests",
+ "SpeakV1CloseType": ".types",
+ "SpeakV1Flush": ".types",
+ "SpeakV1FlushParams": ".requests",
+ "SpeakV1FlushType": ".types",
+ "SpeakV1Flushed": ".types",
+ "SpeakV1FlushedParams": ".requests",
+ "SpeakV1FlushedType": ".types",
+ "SpeakV1Metadata": ".types",
+ "SpeakV1MetadataParams": ".requests",
+ "SpeakV1Text": ".types",
+ "SpeakV1TextParams": ".requests",
+ "SpeakV1Warning": ".types",
+ "SpeakV1WarningParams": ".requests",
"audio": ".audio",
}
@@ -48,5 +94,26 @@ def __dir__():
"AudioGenerateRequestContainer",
"AudioGenerateRequestEncoding",
"AudioGenerateRequestModel",
+ "SpeakV1Clear",
+ "SpeakV1ClearParams",
+ "SpeakV1ClearType",
+ "SpeakV1Cleared",
+ "SpeakV1ClearedParams",
+ "SpeakV1ClearedType",
+ "SpeakV1Close",
+ "SpeakV1CloseParams",
+ "SpeakV1CloseType",
+ "SpeakV1Flush",
+ "SpeakV1FlushParams",
+ "SpeakV1FlushType",
+ "SpeakV1Flushed",
+ "SpeakV1FlushedParams",
+ "SpeakV1FlushedType",
+ "SpeakV1Metadata",
+ "SpeakV1MetadataParams",
+ "SpeakV1Text",
+ "SpeakV1TextParams",
+ "SpeakV1Warning",
+ "SpeakV1WarningParams",
"audio",
]
diff --git a/src/deepgram/speak/v1/audio/raw_client.py b/src/deepgram/speak/v1/audio/raw_client.py
index f426311c..425c2daf 100644
--- a/src/deepgram/speak/v1/audio/raw_client.py
+++ b/src/deepgram/speak/v1/audio/raw_client.py
@@ -119,9 +119,9 @@ def _stream() -> HttpResponse[typing.Iterator[bytes]]:
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
@@ -237,9 +237,9 @@ async def _stream() -> AsyncHttpResponse[typing.AsyncIterator[bytes]]:
raise BadRequestError(
headers=dict(_response.headers),
body=typing.cast(
- typing.Optional[typing.Any],
+ typing.Any,
parse_obj_as(
- type_=typing.Optional[typing.Any], # type: ignore
+ type_=typing.Any, # type: ignore
object_=_response.json(),
),
),
diff --git a/src/deepgram/speak/v1/requests/__init__.py b/src/deepgram/speak/v1/requests/__init__.py
new file mode 100644
index 00000000..4e8e1826
--- /dev/null
+++ b/src/deepgram/speak/v1/requests/__init__.py
@@ -0,0 +1,59 @@
+# This file was auto-generated by Fern from our API Definition.
+
+# isort: skip_file
+
+import typing
+from importlib import import_module
+
+if typing.TYPE_CHECKING:
+ from .speak_v1clear import SpeakV1ClearParams
+ from .speak_v1cleared import SpeakV1ClearedParams
+ from .speak_v1close import SpeakV1CloseParams
+ from .speak_v1flush import SpeakV1FlushParams
+ from .speak_v1flushed import SpeakV1FlushedParams
+ from .speak_v1metadata import SpeakV1MetadataParams
+ from .speak_v1text import SpeakV1TextParams
+ from .speak_v1warning import SpeakV1WarningParams
+_dynamic_imports: typing.Dict[str, str] = {
+ "SpeakV1ClearParams": ".speak_v1clear",
+ "SpeakV1ClearedParams": ".speak_v1cleared",
+ "SpeakV1CloseParams": ".speak_v1close",
+ "SpeakV1FlushParams": ".speak_v1flush",
+ "SpeakV1FlushedParams": ".speak_v1flushed",
+ "SpeakV1MetadataParams": ".speak_v1metadata",
+ "SpeakV1TextParams": ".speak_v1text",
+ "SpeakV1WarningParams": ".speak_v1warning",
+}
+
+
+def __getattr__(attr_name: str) -> typing.Any:
+ module_name = _dynamic_imports.get(attr_name)
+ if module_name is None:
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
+ try:
+ module = import_module(module_name, __package__)
+ if module_name == f".{attr_name}":
+ return module
+ else:
+ return getattr(module, attr_name)
+ except ImportError as e:
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
+ except AttributeError as e:
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
+
+
+def __dir__():
+ lazy_attrs = list(_dynamic_imports.keys())
+ return sorted(lazy_attrs)
+
+
+__all__ = [
+ "SpeakV1ClearParams",
+ "SpeakV1ClearedParams",
+ "SpeakV1CloseParams",
+ "SpeakV1FlushParams",
+ "SpeakV1FlushedParams",
+ "SpeakV1MetadataParams",
+ "SpeakV1TextParams",
+ "SpeakV1WarningParams",
+]
diff --git a/src/deepgram/speak/v1/requests/speak_v1clear.py b/src/deepgram/speak/v1/requests/speak_v1clear.py
new file mode 100644
index 00000000..6ffc2f3e
--- /dev/null
+++ b/src/deepgram/speak/v1/requests/speak_v1clear.py
@@ -0,0 +1,11 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.speak_v1clear_type import SpeakV1ClearType
+
+
+class SpeakV1ClearParams(typing_extensions.TypedDict):
+ type: SpeakV1ClearType
+ """
+ Message type identifier
+ """
diff --git a/src/deepgram/speak/v1/requests/speak_v1cleared.py b/src/deepgram/speak/v1/requests/speak_v1cleared.py
new file mode 100644
index 00000000..e1f1784b
--- /dev/null
+++ b/src/deepgram/speak/v1/requests/speak_v1cleared.py
@@ -0,0 +1,16 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.speak_v1cleared_type import SpeakV1ClearedType
+
+
+class SpeakV1ClearedParams(typing_extensions.TypedDict):
+ type: SpeakV1ClearedType
+ """
+ Message type identifier
+ """
+
+ sequence_id: float
+ """
+ The sequence ID of the response
+ """
diff --git a/src/deepgram/speak/v1/requests/speak_v1close.py b/src/deepgram/speak/v1/requests/speak_v1close.py
new file mode 100644
index 00000000..7a3219c3
--- /dev/null
+++ b/src/deepgram/speak/v1/requests/speak_v1close.py
@@ -0,0 +1,11 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.speak_v1close_type import SpeakV1CloseType
+
+
+class SpeakV1CloseParams(typing_extensions.TypedDict):
+ type: SpeakV1CloseType
+ """
+ Message type identifier
+ """
diff --git a/src/deepgram/speak/v1/requests/speak_v1flush.py b/src/deepgram/speak/v1/requests/speak_v1flush.py
new file mode 100644
index 00000000..8bafc736
--- /dev/null
+++ b/src/deepgram/speak/v1/requests/speak_v1flush.py
@@ -0,0 +1,11 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.speak_v1flush_type import SpeakV1FlushType
+
+
+class SpeakV1FlushParams(typing_extensions.TypedDict):
+ type: SpeakV1FlushType
+ """
+ Message type identifier
+ """
diff --git a/src/deepgram/speak/v1/requests/speak_v1flushed.py b/src/deepgram/speak/v1/requests/speak_v1flushed.py
new file mode 100644
index 00000000..674cb52d
--- /dev/null
+++ b/src/deepgram/speak/v1/requests/speak_v1flushed.py
@@ -0,0 +1,16 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+from ..types.speak_v1flushed_type import SpeakV1FlushedType
+
+
+class SpeakV1FlushedParams(typing_extensions.TypedDict):
+ type: SpeakV1FlushedType
+ """
+ Message type identifier
+ """
+
+ sequence_id: float
+ """
+ The sequence ID of the response
+ """
diff --git a/src/deepgram/speak/v1/requests/speak_v1metadata.py b/src/deepgram/speak/v1/requests/speak_v1metadata.py
new file mode 100644
index 00000000..89fb6809
--- /dev/null
+++ b/src/deepgram/speak/v1/requests/speak_v1metadata.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class SpeakV1MetadataParams(typing_extensions.TypedDict):
+ type: typing.Literal["Metadata"]
+ """
+ Message type identifier
+ """
+
+ request_id: str
+ """
+ Unique identifier for the request
+ """
+
+ model_name: str
+ """
+ Name of the model being used
+ """
+
+ model_version: str
+ """
+ Version of the model being used
+ """
+
+ model_uuid: str
+ """
+ Unique identifier for the model
+ """
diff --git a/src/deepgram/speak/v1/requests/speak_v1text.py b/src/deepgram/speak/v1/requests/speak_v1text.py
new file mode 100644
index 00000000..78873194
--- /dev/null
+++ b/src/deepgram/speak/v1/requests/speak_v1text.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class SpeakV1TextParams(typing_extensions.TypedDict):
+ type: typing.Literal["Speak"]
+ """
+ Message type identifier
+ """
+
+ text: str
+ """
+ The input text to be converted to speech
+ """
diff --git a/src/deepgram/speak/v1/requests/speak_v1warning.py b/src/deepgram/speak/v1/requests/speak_v1warning.py
new file mode 100644
index 00000000..ca6c78f8
--- /dev/null
+++ b/src/deepgram/speak/v1/requests/speak_v1warning.py
@@ -0,0 +1,22 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import typing_extensions
+
+
+class SpeakV1WarningParams(typing_extensions.TypedDict):
+ type: typing.Literal["Warning"]
+ """
+ Message type identifier
+ """
+
+ description: str
+ """
+ A description of what went wrong
+ """
+
+ code: str
+ """
+ Error code identifying the type of error
+ """
diff --git a/src/deepgram/speak/v1/types/__init__.py b/src/deepgram/speak/v1/types/__init__.py
new file mode 100644
index 00000000..72a25d1b
--- /dev/null
+++ b/src/deepgram/speak/v1/types/__init__.py
@@ -0,0 +1,74 @@
+# This file was auto-generated by Fern from our API Definition.
+
+# isort: skip_file
+
+import typing
+from importlib import import_module
+
+if typing.TYPE_CHECKING:
+ from .speak_v1clear import SpeakV1Clear
+ from .speak_v1clear_type import SpeakV1ClearType
+ from .speak_v1cleared import SpeakV1Cleared
+ from .speak_v1cleared_type import SpeakV1ClearedType
+ from .speak_v1close import SpeakV1Close
+ from .speak_v1close_type import SpeakV1CloseType
+ from .speak_v1flush import SpeakV1Flush
+ from .speak_v1flush_type import SpeakV1FlushType
+ from .speak_v1flushed import SpeakV1Flushed
+ from .speak_v1flushed_type import SpeakV1FlushedType
+ from .speak_v1metadata import SpeakV1Metadata
+ from .speak_v1text import SpeakV1Text
+ from .speak_v1warning import SpeakV1Warning
+_dynamic_imports: typing.Dict[str, str] = {
+ "SpeakV1Clear": ".speak_v1clear",
+ "SpeakV1ClearType": ".speak_v1clear_type",
+ "SpeakV1Cleared": ".speak_v1cleared",
+ "SpeakV1ClearedType": ".speak_v1cleared_type",
+ "SpeakV1Close": ".speak_v1close",
+ "SpeakV1CloseType": ".speak_v1close_type",
+ "SpeakV1Flush": ".speak_v1flush",
+ "SpeakV1FlushType": ".speak_v1flush_type",
+ "SpeakV1Flushed": ".speak_v1flushed",
+ "SpeakV1FlushedType": ".speak_v1flushed_type",
+ "SpeakV1Metadata": ".speak_v1metadata",
+ "SpeakV1Text": ".speak_v1text",
+ "SpeakV1Warning": ".speak_v1warning",
+}
+
+
+def __getattr__(attr_name: str) -> typing.Any:
+ module_name = _dynamic_imports.get(attr_name)
+ if module_name is None:
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
+ try:
+ module = import_module(module_name, __package__)
+ if module_name == f".{attr_name}":
+ return module
+ else:
+ return getattr(module, attr_name)
+ except ImportError as e:
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
+ except AttributeError as e:
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
+
+
+def __dir__():
+ lazy_attrs = list(_dynamic_imports.keys())
+ return sorted(lazy_attrs)
+
+
+__all__ = [
+ "SpeakV1Clear",
+ "SpeakV1ClearType",
+ "SpeakV1Cleared",
+ "SpeakV1ClearedType",
+ "SpeakV1Close",
+ "SpeakV1CloseType",
+ "SpeakV1Flush",
+ "SpeakV1FlushType",
+ "SpeakV1Flushed",
+ "SpeakV1FlushedType",
+ "SpeakV1Metadata",
+ "SpeakV1Text",
+ "SpeakV1Warning",
+]
diff --git a/src/deepgram/speak/v1/types/speak_v1clear.py b/src/deepgram/speak/v1/types/speak_v1clear.py
new file mode 100644
index 00000000..b528050a
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1clear.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .speak_v1clear_type import SpeakV1ClearType
+
+
+class SpeakV1Clear(UniversalBaseModel):
+ type: SpeakV1ClearType = pydantic.Field()
+ """
+ Message type identifier
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/speak/v1/types/speak_v1clear_type.py b/src/deepgram/speak/v1/types/speak_v1clear_type.py
new file mode 100644
index 00000000..93317162
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1clear_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SpeakV1ClearType = typing.Union[typing.Literal["Flush", "Clear", "Close"], typing.Any]
diff --git a/src/deepgram/speak/v1/types/speak_v1cleared.py b/src/deepgram/speak/v1/types/speak_v1cleared.py
new file mode 100644
index 00000000..9e88c530
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1cleared.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .speak_v1cleared_type import SpeakV1ClearedType
+
+
+class SpeakV1Cleared(UniversalBaseModel):
+ type: SpeakV1ClearedType = pydantic.Field()
+ """
+ Message type identifier
+ """
+
+ sequence_id: float = pydantic.Field()
+ """
+ The sequence ID of the response
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/speak/v1/types/speak_v1cleared_type.py b/src/deepgram/speak/v1/types/speak_v1cleared_type.py
new file mode 100644
index 00000000..2e2b0158
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1cleared_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SpeakV1ClearedType = typing.Union[typing.Literal["Flushed", "Cleared"], typing.Any]
diff --git a/src/deepgram/speak/v1/types/speak_v1close.py b/src/deepgram/speak/v1/types/speak_v1close.py
new file mode 100644
index 00000000..f801dc92
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1close.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .speak_v1close_type import SpeakV1CloseType
+
+
+class SpeakV1Close(UniversalBaseModel):
+ type: SpeakV1CloseType = pydantic.Field()
+ """
+ Message type identifier
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/speak/v1/types/speak_v1close_type.py b/src/deepgram/speak/v1/types/speak_v1close_type.py
new file mode 100644
index 00000000..c3381c96
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1close_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SpeakV1CloseType = typing.Union[typing.Literal["Flush", "Clear", "Close"], typing.Any]
diff --git a/src/deepgram/speak/v1/types/speak_v1flush.py b/src/deepgram/speak/v1/types/speak_v1flush.py
new file mode 100644
index 00000000..bfa3f72c
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1flush.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .speak_v1flush_type import SpeakV1FlushType
+
+
+class SpeakV1Flush(UniversalBaseModel):
+ type: SpeakV1FlushType = pydantic.Field()
+ """
+ Message type identifier
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/speak/v1/types/speak_v1flush_type.py b/src/deepgram/speak/v1/types/speak_v1flush_type.py
new file mode 100644
index 00000000..eaf4237f
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1flush_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SpeakV1FlushType = typing.Union[typing.Literal["Flush", "Clear", "Close"], typing.Any]
diff --git a/src/deepgram/speak/v1/types/speak_v1flushed.py b/src/deepgram/speak/v1/types/speak_v1flushed.py
new file mode 100644
index 00000000..6a5fd2c2
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1flushed.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .speak_v1flushed_type import SpeakV1FlushedType
+
+
+class SpeakV1Flushed(UniversalBaseModel):
+ type: SpeakV1FlushedType = pydantic.Field()
+ """
+ Message type identifier
+ """
+
+ sequence_id: float = pydantic.Field()
+ """
+ The sequence ID of the response
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/speak/v1/types/speak_v1flushed_type.py b/src/deepgram/speak/v1/types/speak_v1flushed_type.py
new file mode 100644
index 00000000..4651ac44
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1flushed_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+SpeakV1FlushedType = typing.Union[typing.Literal["Flushed", "Cleared"], typing.Any]
diff --git a/src/deepgram/speak/v1/types/speak_v1metadata.py b/src/deepgram/speak/v1/types/speak_v1metadata.py
new file mode 100644
index 00000000..4502f0c6
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1metadata.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class SpeakV1Metadata(UniversalBaseModel):
+ type: typing.Literal["Metadata"] = pydantic.Field(default="Metadata")
+ """
+ Message type identifier
+ """
+
+ request_id: str = pydantic.Field()
+ """
+ Unique identifier for the request
+ """
+
+ model_name: str = pydantic.Field()
+ """
+ Name of the model being used
+ """
+
+ model_version: str = pydantic.Field()
+ """
+ Version of the model being used
+ """
+
+ model_uuid: str = pydantic.Field()
+ """
+ Unique identifier for the model
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/speak/v1/types/speak_v1text.py b/src/deepgram/speak/v1/types/speak_v1text.py
new file mode 100644
index 00000000..94ec70c8
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1text.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class SpeakV1Text(UniversalBaseModel):
+ type: typing.Literal["Speak"] = pydantic.Field(default="Speak")
+ """
+ Message type identifier
+ """
+
+ text: str = pydantic.Field()
+ """
+ The input text to be converted to speech
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/speak/v1/types/speak_v1warning.py b/src/deepgram/speak/v1/types/speak_v1warning.py
new file mode 100644
index 00000000..95815596
--- /dev/null
+++ b/src/deepgram/speak/v1/types/speak_v1warning.py
@@ -0,0 +1,32 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class SpeakV1Warning(UniversalBaseModel):
+ type: typing.Literal["Warning"] = pydantic.Field(default="Warning")
+ """
+ Message type identifier
+ """
+
+ description: str = pydantic.Field()
+ """
+ A description of what went wrong
+ """
+
+ code: str = pydantic.Field()
+ """
+ Error code identifying the type of error
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/types/__init__.py b/src/deepgram/types/__init__.py
index bed947cd..3ef99cd6 100644
--- a/src/deepgram/types/__init__.py
+++ b/src/deepgram/types/__init__.py
@@ -94,6 +94,7 @@
from .listen_v1callback import ListenV1Callback
from .listen_v1callback_method import ListenV1CallbackMethod
from .listen_v1channels import ListenV1Channels
+ from .listen_v1detect_entities import ListenV1DetectEntities
from .listen_v1diarize import ListenV1Diarize
from .listen_v1dictation import ListenV1Dictation
from .listen_v1encoding import ListenV1Encoding
@@ -124,6 +125,9 @@
from .listen_v1response_results_channels_item_alternatives_item import (
ListenV1ResponseResultsChannelsItemAlternativesItem,
)
+ from .listen_v1response_results_channels_item_alternatives_item_entities_item import (
+ ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItem,
+ )
from .listen_v1response_results_channels_item_alternatives_item_paragraphs import (
ListenV1ResponseResultsChannelsItemAlternativesItemParagraphs,
)
@@ -286,6 +290,7 @@
"ListenV1Callback": ".listen_v1callback",
"ListenV1CallbackMethod": ".listen_v1callback_method",
"ListenV1Channels": ".listen_v1channels",
+ "ListenV1DetectEntities": ".listen_v1detect_entities",
"ListenV1Diarize": ".listen_v1diarize",
"ListenV1Dictation": ".listen_v1dictation",
"ListenV1Encoding": ".listen_v1encoding",
@@ -314,6 +319,7 @@
"ListenV1ResponseResultsChannels": ".listen_v1response_results_channels",
"ListenV1ResponseResultsChannelsItem": ".listen_v1response_results_channels_item",
"ListenV1ResponseResultsChannelsItemAlternativesItem": ".listen_v1response_results_channels_item_alternatives_item",
+ "ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItem": ".listen_v1response_results_channels_item_alternatives_item_entities_item",
"ListenV1ResponseResultsChannelsItemAlternativesItemParagraphs": ".listen_v1response_results_channels_item_alternatives_item_paragraphs",
"ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItem": ".listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item",
"ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItemSentencesItem": ".listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item_sentences_item",
@@ -484,6 +490,7 @@ def __dir__():
"ListenV1Callback",
"ListenV1CallbackMethod",
"ListenV1Channels",
+ "ListenV1DetectEntities",
"ListenV1Diarize",
"ListenV1Dictation",
"ListenV1Encoding",
@@ -512,6 +519,7 @@ def __dir__():
"ListenV1ResponseResultsChannels",
"ListenV1ResponseResultsChannelsItem",
"ListenV1ResponseResultsChannelsItemAlternativesItem",
+ "ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItem",
"ListenV1ResponseResultsChannelsItemAlternativesItemParagraphs",
"ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItem",
"ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItemSentencesItem",
diff --git a/src/deepgram/types/create_key_v1request_one.py b/src/deepgram/types/create_key_v1request_one.py
index 90293b87..0a5d546a 100644
--- a/src/deepgram/types/create_key_v1request_one.py
+++ b/src/deepgram/types/create_key_v1request_one.py
@@ -2,4 +2,4 @@
import typing
-CreateKeyV1RequestOne = typing.Optional[typing.Any]
+CreateKeyV1RequestOne = typing.Any
diff --git a/src/deepgram/types/listen_v1callback.py b/src/deepgram/types/listen_v1callback.py
index 0109163b..a561c5f7 100644
--- a/src/deepgram/types/listen_v1callback.py
+++ b/src/deepgram/types/listen_v1callback.py
@@ -2,4 +2,4 @@
import typing
-ListenV1Callback = typing.Optional[typing.Any]
+ListenV1Callback = typing.Any
diff --git a/src/deepgram/types/listen_v1channels.py b/src/deepgram/types/listen_v1channels.py
index d39d19a9..6c2d1dd1 100644
--- a/src/deepgram/types/listen_v1channels.py
+++ b/src/deepgram/types/listen_v1channels.py
@@ -2,4 +2,4 @@
import typing
-ListenV1Channels = typing.Optional[typing.Any]
+ListenV1Channels = typing.Any
diff --git a/src/deepgram/types/listen_v1detect_entities.py b/src/deepgram/types/listen_v1detect_entities.py
new file mode 100644
index 00000000..107337dd
--- /dev/null
+++ b/src/deepgram/types/listen_v1detect_entities.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ListenV1DetectEntities = typing.Union[typing.Literal["true", "false"], typing.Any]
diff --git a/src/deepgram/types/listen_v1endpointing.py b/src/deepgram/types/listen_v1endpointing.py
index 0163c3ab..72097702 100644
--- a/src/deepgram/types/listen_v1endpointing.py
+++ b/src/deepgram/types/listen_v1endpointing.py
@@ -2,4 +2,4 @@
import typing
-ListenV1Endpointing = typing.Optional[typing.Any]
+ListenV1Endpointing = typing.Any
diff --git a/src/deepgram/types/listen_v1extra.py b/src/deepgram/types/listen_v1extra.py
index 299c0244..e7ca9ae3 100644
--- a/src/deepgram/types/listen_v1extra.py
+++ b/src/deepgram/types/listen_v1extra.py
@@ -2,4 +2,4 @@
import typing
-ListenV1Extra = typing.Optional[typing.Any]
+ListenV1Extra = typing.Any
diff --git a/src/deepgram/types/listen_v1keyterm.py b/src/deepgram/types/listen_v1keyterm.py
index eb3b1b44..5bbffd49 100644
--- a/src/deepgram/types/listen_v1keyterm.py
+++ b/src/deepgram/types/listen_v1keyterm.py
@@ -2,4 +2,4 @@
import typing
-ListenV1Keyterm = typing.Optional[typing.Any]
+ListenV1Keyterm = typing.Any
diff --git a/src/deepgram/types/listen_v1keywords.py b/src/deepgram/types/listen_v1keywords.py
index 0be380b6..d5a5f7cf 100644
--- a/src/deepgram/types/listen_v1keywords.py
+++ b/src/deepgram/types/listen_v1keywords.py
@@ -2,4 +2,4 @@
import typing
-ListenV1Keywords = typing.Optional[typing.Any]
+ListenV1Keywords = typing.Any
diff --git a/src/deepgram/types/listen_v1language.py b/src/deepgram/types/listen_v1language.py
index cf06645d..78d7ad03 100644
--- a/src/deepgram/types/listen_v1language.py
+++ b/src/deepgram/types/listen_v1language.py
@@ -2,4 +2,4 @@
import typing
-ListenV1Language = typing.Optional[typing.Any]
+ListenV1Language = typing.Any
diff --git a/src/deepgram/types/listen_v1mip_opt_out.py b/src/deepgram/types/listen_v1mip_opt_out.py
index f3843c76..1b548f7b 100644
--- a/src/deepgram/types/listen_v1mip_opt_out.py
+++ b/src/deepgram/types/listen_v1mip_opt_out.py
@@ -2,4 +2,4 @@
import typing
-ListenV1MipOptOut = typing.Optional[typing.Any]
+ListenV1MipOptOut = typing.Any
diff --git a/src/deepgram/types/listen_v1replace.py b/src/deepgram/types/listen_v1replace.py
index 5914cbfa..a1424264 100644
--- a/src/deepgram/types/listen_v1replace.py
+++ b/src/deepgram/types/listen_v1replace.py
@@ -2,4 +2,4 @@
import typing
-ListenV1Replace = typing.Optional[typing.Any]
+ListenV1Replace = typing.Any
diff --git a/src/deepgram/types/listen_v1response_metadata.py b/src/deepgram/types/listen_v1response_metadata.py
index beb3d9d0..4a0463ee 100644
--- a/src/deepgram/types/listen_v1response_metadata.py
+++ b/src/deepgram/types/listen_v1response_metadata.py
@@ -19,7 +19,7 @@ class ListenV1ResponseMetadata(UniversalBaseModel):
duration: float
channels: float
models: typing.List[str]
- model_info: typing.Dict[str, typing.Optional[typing.Any]]
+ model_info: typing.Dict[str, typing.Any]
summary_info: typing.Optional[ListenV1ResponseMetadataSummaryInfo] = None
sentiment_info: typing.Optional[ListenV1ResponseMetadataSentimentInfo] = None
topics_info: typing.Optional[ListenV1ResponseMetadataTopicsInfo] = None
diff --git a/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item.py b/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item.py
index 87ca1ca3..ddf91b60 100644
--- a/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item.py
+++ b/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item.py
@@ -4,6 +4,9 @@
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+from .listen_v1response_results_channels_item_alternatives_item_entities_item import (
+ ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItem,
+)
from .listen_v1response_results_channels_item_alternatives_item_paragraphs import (
ListenV1ResponseResultsChannelsItemAlternativesItemParagraphs,
)
@@ -23,6 +26,7 @@ class ListenV1ResponseResultsChannelsItemAlternativesItem(UniversalBaseModel):
confidence: typing.Optional[float] = None
words: typing.Optional[typing.List[ListenV1ResponseResultsChannelsItemAlternativesItemWordsItem]] = None
paragraphs: typing.Optional[ListenV1ResponseResultsChannelsItemAlternativesItemParagraphs] = None
+ entities: typing.Optional[typing.List[ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItem]] = None
summaries: typing.Optional[typing.List[ListenV1ResponseResultsChannelsItemAlternativesItemSummariesItem]] = None
topics: typing.Optional[typing.List[ListenV1ResponseResultsChannelsItemAlternativesItemTopicsItem]] = None
diff --git a/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_entities_item.py b/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_entities_item.py
new file mode 100644
index 00000000..6fb26e5a
--- /dev/null
+++ b/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_entities_item.py
@@ -0,0 +1,24 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
+
+
+class ListenV1ResponseResultsChannelsItemAlternativesItemEntitiesItem(UniversalBaseModel):
+ label: typing.Optional[str] = None
+ value: typing.Optional[str] = None
+ raw_value: typing.Optional[str] = None
+ confidence: typing.Optional[float] = None
+ start_word: typing.Optional[float] = None
+ end_word: typing.Optional[float] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/deepgram/types/listen_v1sample_rate.py b/src/deepgram/types/listen_v1sample_rate.py
index 5f503202..ab4a2a92 100644
--- a/src/deepgram/types/listen_v1sample_rate.py
+++ b/src/deepgram/types/listen_v1sample_rate.py
@@ -2,4 +2,4 @@
import typing
-ListenV1SampleRate = typing.Optional[typing.Any]
+ListenV1SampleRate = typing.Any
diff --git a/src/deepgram/types/listen_v1search.py b/src/deepgram/types/listen_v1search.py
index 6eebf50b..4f31cb4d 100644
--- a/src/deepgram/types/listen_v1search.py
+++ b/src/deepgram/types/listen_v1search.py
@@ -2,4 +2,4 @@
import typing
-ListenV1Search = typing.Optional[typing.Any]
+ListenV1Search = typing.Any
diff --git a/src/deepgram/types/listen_v1tag.py b/src/deepgram/types/listen_v1tag.py
index 75087a7e..e8871d0d 100644
--- a/src/deepgram/types/listen_v1tag.py
+++ b/src/deepgram/types/listen_v1tag.py
@@ -2,4 +2,4 @@
import typing
-ListenV1Tag = typing.Optional[typing.Any]
+ListenV1Tag = typing.Any
diff --git a/src/deepgram/types/listen_v1utterance_end_ms.py b/src/deepgram/types/listen_v1utterance_end_ms.py
index ee111eca..b774a294 100644
--- a/src/deepgram/types/listen_v1utterance_end_ms.py
+++ b/src/deepgram/types/listen_v1utterance_end_ms.py
@@ -2,4 +2,4 @@
import typing
-ListenV1UtteranceEndMs = typing.Optional[typing.Any]
+ListenV1UtteranceEndMs = typing.Any
diff --git a/src/deepgram/types/listen_v1version.py b/src/deepgram/types/listen_v1version.py
index 3000be04..2085b633 100644
--- a/src/deepgram/types/listen_v1version.py
+++ b/src/deepgram/types/listen_v1version.py
@@ -2,4 +2,4 @@
import typing
-ListenV1Version = typing.Optional[typing.Any]
+ListenV1Version = typing.Any
diff --git a/src/deepgram/types/listen_v2eager_eot_threshold.py b/src/deepgram/types/listen_v2eager_eot_threshold.py
index 9f5bd4fc..cd09e7fb 100644
--- a/src/deepgram/types/listen_v2eager_eot_threshold.py
+++ b/src/deepgram/types/listen_v2eager_eot_threshold.py
@@ -2,4 +2,4 @@
import typing
-ListenV2EagerEotThreshold = typing.Optional[typing.Any]
+ListenV2EagerEotThreshold = typing.Any
diff --git a/src/deepgram/types/listen_v2eot_threshold.py b/src/deepgram/types/listen_v2eot_threshold.py
index 1212083e..eb5d3887 100644
--- a/src/deepgram/types/listen_v2eot_threshold.py
+++ b/src/deepgram/types/listen_v2eot_threshold.py
@@ -2,4 +2,4 @@
import typing
-ListenV2EotThreshold = typing.Optional[typing.Any]
+ListenV2EotThreshold = typing.Any
diff --git a/src/deepgram/types/listen_v2eot_timeout_ms.py b/src/deepgram/types/listen_v2eot_timeout_ms.py
index 9dcc3298..19a6924d 100644
--- a/src/deepgram/types/listen_v2eot_timeout_ms.py
+++ b/src/deepgram/types/listen_v2eot_timeout_ms.py
@@ -2,4 +2,4 @@
import typing
-ListenV2EotTimeoutMs = typing.Optional[typing.Any]
+ListenV2EotTimeoutMs = typing.Any
diff --git a/src/deepgram/types/listen_v2mip_opt_out.py b/src/deepgram/types/listen_v2mip_opt_out.py
index 18caf04d..689450da 100644
--- a/src/deepgram/types/listen_v2mip_opt_out.py
+++ b/src/deepgram/types/listen_v2mip_opt_out.py
@@ -2,4 +2,4 @@
import typing
-ListenV2MipOptOut = typing.Optional[typing.Any]
+ListenV2MipOptOut = typing.Any
diff --git a/src/deepgram/types/listen_v2sample_rate.py b/src/deepgram/types/listen_v2sample_rate.py
index 78d73d0b..053a3e03 100644
--- a/src/deepgram/types/listen_v2sample_rate.py
+++ b/src/deepgram/types/listen_v2sample_rate.py
@@ -2,4 +2,4 @@
import typing
-ListenV2SampleRate = typing.Optional[typing.Any]
+ListenV2SampleRate = typing.Any
diff --git a/src/deepgram/types/listen_v2tag.py b/src/deepgram/types/listen_v2tag.py
index 7279fae9..5a535a77 100644
--- a/src/deepgram/types/listen_v2tag.py
+++ b/src/deepgram/types/listen_v2tag.py
@@ -2,4 +2,4 @@
import typing
-ListenV2Tag = typing.Optional[typing.Any]
+ListenV2Tag = typing.Any
diff --git a/src/deepgram/types/project_request_response.py b/src/deepgram/types/project_request_response.py
index 9b45765a..fde3ca2d 100644
--- a/src/deepgram/types/project_request_response.py
+++ b/src/deepgram/types/project_request_response.py
@@ -37,7 +37,7 @@ class ProjectRequestResponse(UniversalBaseModel):
The unique identifier of the API key
"""
- response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ response: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None)
"""
The response of the request
"""
diff --git a/src/deepgram/types/speak_v1mip_opt_out.py b/src/deepgram/types/speak_v1mip_opt_out.py
index d7312ba7..80089874 100644
--- a/src/deepgram/types/speak_v1mip_opt_out.py
+++ b/src/deepgram/types/speak_v1mip_opt_out.py
@@ -2,4 +2,4 @@
import typing
-SpeakV1MipOptOut = typing.Optional[typing.Any]
+SpeakV1MipOptOut = typing.Any
diff --git a/src/deepgram/types/speak_v1sample_rate.py b/src/deepgram/types/speak_v1sample_rate.py
index a0031cb1..3b8036fc 100644
--- a/src/deepgram/types/speak_v1sample_rate.py
+++ b/src/deepgram/types/speak_v1sample_rate.py
@@ -2,4 +2,4 @@
import typing
-SpeakV1SampleRate = typing.Union[typing.Literal["8000", "16000", "24000", "44100", "48000"], typing.Any]
+SpeakV1SampleRate = typing.Union[typing.Literal["8000", "16000", "24000", "32000", "48000"], typing.Any]
diff --git a/tests/utils/test_http_client.py b/tests/utils/test_http_client.py
index 31bde0b7..8c89c234 100644
--- a/tests/utils/test_http_client.py
+++ b/tests/utils/test_http_client.py
@@ -1,6 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
-from deepgram.core.http_client import get_request_body
+from deepgram.core.http_client import get_request_body, remove_none_from_dict
from deepgram.core.request_options import RequestOptions
@@ -8,6 +8,10 @@ def get_request_options() -> RequestOptions:
return {"additional_body_parameters": {"see you": "later"}}
+def get_request_options_with_none() -> RequestOptions:
+ return {"additional_body_parameters": {"see you": "later", "optional": None}}
+
+
def test_get_json_request_body() -> None:
json_body, data_body = get_request_body(json={"hello": "world"}, data=None, request_options=None, omit=None)
assert json_body == {"hello": "world"}
@@ -59,3 +63,47 @@ def test_get_empty_json_request_body() -> None:
assert json_body_extras is None
assert data_body_extras is None
+
+
+def test_json_body_preserves_none_values() -> None:
+ """Test that JSON bodies preserve None values (they become JSON null)."""
+ json_body, data_body = get_request_body(
+ json={"hello": "world", "optional": None}, data=None, request_options=None, omit=None
+ )
+ # JSON bodies should preserve None values
+ assert json_body == {"hello": "world", "optional": None}
+ assert data_body is None
+
+
+def test_data_body_preserves_none_values_without_multipart() -> None:
+ """Test that data bodies preserve None values when not using multipart.
+
+ The filtering of None values happens in HttpClient.request/stream methods,
+ not in get_request_body. This test verifies get_request_body doesn't filter None.
+ """
+ json_body, data_body = get_request_body(
+ json=None, data={"hello": "world", "optional": None}, request_options=None, omit=None
+ )
+ # get_request_body should preserve None values in data body
+ # The filtering happens later in HttpClient.request when multipart is detected
+ assert data_body == {"hello": "world", "optional": None}
+ assert json_body is None
+
+
+def test_remove_none_from_dict_filters_none_values() -> None:
+ """Test that remove_none_from_dict correctly filters out None values."""
+ original = {"hello": "world", "optional": None, "another": "value", "also_none": None}
+ filtered = remove_none_from_dict(original)
+ assert filtered == {"hello": "world", "another": "value"}
+ # Original should not be modified
+ assert original == {"hello": "world", "optional": None, "another": "value", "also_none": None}
+
+
+def test_remove_none_from_dict_empty_dict() -> None:
+ """Test that remove_none_from_dict handles empty dict."""
+ assert remove_none_from_dict({}) == {}
+
+
+def test_remove_none_from_dict_all_none() -> None:
+ """Test that remove_none_from_dict handles dict with all None values."""
+ assert remove_none_from_dict({"a": None, "b": None}) == {}