From 541a2b48adc82d840e2aa02d7f597c236012644e Mon Sep 17 00:00:00 2001 From: gaspardBT Date: Fri, 13 Sep 2024 15:04:16 +0200 Subject: [PATCH] generation with new version --- .speakeasy/gen.lock | 670 ++++++++++------- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 7 +- .speakeasy/workflow.yaml | 2 +- README.md | 145 ++-- USAGE.md | 16 +- docs/sdks/agents/README.md | 12 +- docs/sdks/chat/README.md | 2 - docs/sdks/embeddings/README.md | 3 +- docs/sdks/files/README.md | 8 +- docs/sdks/fim/README.md | 2 - docs/sdks/jobs/README.md | 5 - docs/sdks/models/README.md | 6 - packages/mistralai_azure/.speakeasy/gen.lock | 190 ++--- packages/mistralai_azure/.speakeasy/gen.yaml | 2 +- packages/mistralai_azure/poetry.lock | 222 +++--- packages/mistralai_azure/pyproject.toml | 5 +- .../mistralai_azure/scripts/prepare-readme.py | 2 +- .../src/mistralai_azure/_hooks/sdkhooks.py | 27 +- .../src/mistralai_azure/_hooks/types.py | 36 +- .../src/mistralai_azure/basesdk.py | 97 ++- .../src/mistralai_azure/chat.py | 396 ++++++---- .../src/mistralai_azure/models/__init__.py | 159 +++- .../models/assistantmessage.py | 25 +- .../models/chatcompletionchoice.py | 10 +- .../models/chatcompletionrequest.py | 63 +- .../models/chatcompletionresponse.py | 8 +- .../models/chatcompletionstreamrequest.py | 67 +- .../mistralai_azure/models/completionchunk.py | 13 +- .../mistralai_azure/models/completionevent.py | 3 +- .../models/completionresponsestreamchoice.py | 14 +- .../mistralai_azure/models/contentchunk.py | 1 - .../mistralai_azure/models/deltamessage.py | 21 +- .../src/mistralai_azure/models/function.py | 5 +- .../mistralai_azure/models/functioncall.py | 4 +- .../mistralai_azure/models/functionname.py | 7 +- .../models/httpvalidationerror.py | 4 +- .../mistralai_azure/models/responseformat.py | 3 +- .../src/mistralai_azure/models/security.py | 15 +- .../mistralai_azure/models/systemmessage.py | 6 +- .../src/mistralai_azure/models/textchunk.py | 7 +- .../src/mistralai_azure/models/tool.py | 8 +- .../src/mistralai_azure/models/toolcall.py | 9 +- .../src/mistralai_azure/models/toolchoice.py | 12 +- .../src/mistralai_azure/models/toolmessage.py | 25 +- .../src/mistralai_azure/models/usageinfo.py | 5 +- .../src/mistralai_azure/models/usermessage.py | 6 +- .../mistralai_azure/models/validationerror.py | 5 +- .../src/mistralai_azure/sdkconfiguration.py | 14 +- .../src/mistralai_azure/utils/__init__.py | 7 +- .../src/mistralai_azure/utils/logger.py | 5 +- .../src/mistralai_azure/utils/retries.py | 3 +- .../src/mistralai_azure/utils/security.py | 18 +- packages/mistralai_gcp/.speakeasy/gen.lock | 220 +++--- packages/mistralai_gcp/.speakeasy/gen.yaml | 2 +- packages/mistralai_gcp/poetry.lock | 224 +++--- packages/mistralai_gcp/pyproject.toml | 5 +- .../mistralai_gcp/scripts/prepare-readme.py | 2 +- .../src/mistralai_gcp/_hooks/sdkhooks.py | 27 +- .../src/mistralai_gcp/_hooks/types.py | 36 +- .../src/mistralai_gcp/basesdk.py | 97 ++- .../mistralai_gcp/src/mistralai_gcp/chat.py | 396 ++++++---- .../mistralai_gcp/src/mistralai_gcp/fim.py | 276 ++++--- .../src/mistralai_gcp/models/__init__.py | 183 ++++- .../mistralai_gcp/models/assistantmessage.py | 25 +- .../models/chatcompletionchoice.py | 10 +- .../models/chatcompletionrequest.py | 60 +- .../models/chatcompletionresponse.py | 8 +- .../models/chatcompletionstreamrequest.py | 64 +- .../mistralai_gcp/models/completionchunk.py | 13 +- .../mistralai_gcp/models/completionevent.py | 3 +- .../models/completionresponsestreamchoice.py | 14 +- .../src/mistralai_gcp/models/contentchunk.py | 1 - .../src/mistralai_gcp/models/deltamessage.py | 21 +- .../models/fimcompletionrequest.py | 39 +- .../models/fimcompletionresponse.py | 8 +- .../models/fimcompletionstreamrequest.py | 39 +- .../src/mistralai_gcp/models/function.py | 5 +- .../src/mistralai_gcp/models/functioncall.py | 4 +- .../src/mistralai_gcp/models/functionname.py | 7 +- .../models/httpvalidationerror.py | 4 +- .../mistralai_gcp/models/responseformat.py | 3 +- .../src/mistralai_gcp/models/security.py | 15 +- .../src/mistralai_gcp/models/systemmessage.py | 6 +- .../src/mistralai_gcp/models/textchunk.py | 7 +- .../src/mistralai_gcp/models/tool.py | 8 +- .../src/mistralai_gcp/models/toolcall.py | 9 +- .../src/mistralai_gcp/models/toolchoice.py | 12 +- .../src/mistralai_gcp/models/toolmessage.py | 25 +- .../src/mistralai_gcp/models/usageinfo.py | 5 +- .../src/mistralai_gcp/models/usermessage.py | 6 +- .../mistralai_gcp/models/validationerror.py | 5 +- .../src/mistralai_gcp/sdkconfiguration.py | 14 +- .../src/mistralai_gcp/utils/__init__.py | 7 +- .../src/mistralai_gcp/utils/logger.py | 5 +- .../src/mistralai_gcp/utils/retries.py | 3 +- .../src/mistralai_gcp/utils/security.py | 18 +- pyproject.toml | 2 +- src/mistralai/_hooks/sdkhooks.py | 27 +- src/mistralai/_hooks/types.py | 36 +- src/mistralai/agents.py | 436 +++++++---- src/mistralai/basesdk.py | 95 ++- src/mistralai/chat.py | 404 +++++++---- src/mistralai/embeddings.py | 126 ++-- src/mistralai/files.py | 444 +++++++----- src/mistralai/fim.py | 284 +++++--- src/mistralai/fine_tuning.py | 5 +- src/mistralai/jobs.py | 635 +++++++++------- src/mistralai/models/__init__.py | 527 ++++++++++++-- .../models/agentscompletionrequest.py | 44 +- .../models/agentscompletionstreamrequest.py | 48 +- src/mistralai/models/archiveftmodelout.py | 8 +- src/mistralai/models/assistantmessage.py | 17 +- src/mistralai/models/basemodelcard.py | 35 +- src/mistralai/models/chatcompletionchoice.py | 6 +- src/mistralai/models/chatcompletionrequest.py | 54 +- .../models/chatcompletionresponse.py | 8 +- .../models/chatcompletionstreamrequest.py | 58 +- src/mistralai/models/checkpointout.py | 5 +- src/mistralai/models/completionchunk.py | 13 +- src/mistralai/models/completionevent.py | 3 +- .../models/completionresponsestreamchoice.py | 18 +- src/mistralai/models/contentchunk.py | 8 +- ...elete_model_v1_models_model_id_deleteop.py | 7 +- src/mistralai/models/deletefileout.py | 5 +- src/mistralai/models/deletemodelout.py | 5 +- src/mistralai/models/deltamessage.py | 13 +- src/mistralai/models/detailedjobout.py | 66 +- src/mistralai/models/embeddingrequest.py | 13 +- src/mistralai/models/embeddingresponse.py | 7 +- src/mistralai/models/embeddingresponsedata.py | 5 +- src/mistralai/models/eventout.py | 13 +- .../models/files_api_routes_delete_fileop.py | 7 +- .../files_api_routes_retrieve_fileop.py | 7 +- .../models/files_api_routes_upload_fileop.py | 35 +- src/mistralai/models/fileschema.py | 31 +- src/mistralai/models/fimcompletionrequest.py | 31 +- src/mistralai/models/fimcompletionresponse.py | 8 +- .../models/fimcompletionstreamrequest.py | 31 +- src/mistralai/models/finetuneablemodel.py | 8 +- .../models/ftmodelcapabilitiesout.py | 6 +- src/mistralai/models/ftmodelcard.py | 43 +- src/mistralai/models/ftmodelout.py | 38 +- src/mistralai/models/function.py | 5 +- src/mistralai/models/functioncall.py | 4 +- src/mistralai/models/functionname.py | 7 +- src/mistralai/models/githubrepositoryin.py | 19 +- src/mistralai/models/githubrepositoryout.py | 19 +- src/mistralai/models/httpvalidationerror.py | 4 +- src/mistralai/models/imageurl.py | 12 +- src/mistralai/models/imageurlchunk.py | 10 +- src/mistralai/models/jobin.py | 27 +- src/mistralai/models/jobmetadataout.py | 37 +- src/mistralai/models/jobout.py | 62 +- ..._fine_tuning_archive_fine_tuned_modelop.py | 7 +- ...es_fine_tuning_cancel_fine_tuning_jobop.py | 7 +- ...es_fine_tuning_create_fine_tuning_jobop.py | 5 +- ...outes_fine_tuning_get_fine_tuning_jobop.py | 7 +- ...utes_fine_tuning_get_fine_tuning_jobsop.py | 99 ++- ...tes_fine_tuning_start_fine_tuning_jobop.py | 7 +- ...ine_tuning_unarchive_fine_tuned_modelop.py | 7 +- ...s_fine_tuning_update_fine_tuned_modelop.py | 13 +- src/mistralai/models/jobsout.py | 8 +- src/mistralai/models/legacyjobmetadataout.py | 51 +- src/mistralai/models/listfilesout.py | 4 +- src/mistralai/models/metricout.py | 17 +- src/mistralai/models/modelcapabilities.py | 7 +- src/mistralai/models/modellist.py | 11 +- src/mistralai/models/responseformat.py | 3 +- ...retrieve_model_v1_models_model_id_getop.py | 20 +- src/mistralai/models/retrievefileout.py | 31 +- src/mistralai/models/security.py | 15 +- src/mistralai/models/systemmessage.py | 6 +- src/mistralai/models/textchunk.py | 7 +- src/mistralai/models/tool.py | 8 +- src/mistralai/models/toolcall.py | 9 +- src/mistralai/models/toolchoice.py | 12 +- src/mistralai/models/toolmessage.py | 17 +- src/mistralai/models/trainingfile.py | 4 +- src/mistralai/models/trainingparameters.py | 33 +- src/mistralai/models/trainingparametersin.py | 37 +- src/mistralai/models/unarchiveftmodelout.py | 8 +- src/mistralai/models/updateftmodelin.py | 12 +- src/mistralai/models/uploadfileout.py | 31 +- src/mistralai/models/usageinfo.py | 5 +- src/mistralai/models/usermessage.py | 6 +- src/mistralai/models/validationerror.py | 5 +- src/mistralai/models/wandbintegration.py | 18 +- src/mistralai/models/wandbintegrationout.py | 17 +- src/mistralai/models_.py | 686 ++++++++++-------- src/mistralai/sdk.py | 43 +- src/mistralai/sdkconfiguration.py | 14 +- src/mistralai/utils/__init__.py | 7 +- src/mistralai/utils/logger.py | 5 +- src/mistralai/utils/retries.py | 3 +- src/mistralai/utils/security.py | 19 +- 196 files changed, 6515 insertions(+), 3097 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 11e1048..f27ec5d 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -3,10 +3,10 @@ id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: docChecksum: b504694f524d70325c81c4bd7542c5cf docVersion: 0.0.2 - speakeasyVersion: 1.382.0 - generationVersion: 2.404.11 - releaseVersion: 1.0.4 - configChecksum: 713a7028fbef398b9a9d8e2d529f1e9a + speakeasyVersion: 1.396.6 + generationVersion: 2.415.6 + releaseVersion: 1.1.0 + configChecksum: 49094e0f156d020bd164f8b4bd41e97b repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -14,8 +14,9 @@ features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.2 - core: 5.4.1 + core: 5.5.3 defaultEnabledRetries: 0.2.0 + enumUnions: 0.1.0 envVarSecurityUsage: 0.3.1 examples: 3.0.0 flatRequests: 1.0.1 @@ -29,7 +30,7 @@ features: nullables: 1.0.0 openEnums: 1.0.0 responseFormat: 1.0.0 - retries: 3.0.0 + retries: 3.0.2 sdkHooks: 1.0.0 serverEvents: 1.0.2 serverEventsSentinels: 0.1.0 @@ -37,17 +38,159 @@ features: unions: 3.0.2 uploadStreams: 1.0.0 generatedFiles: - - src/mistralai/sdkconfiguration.py - - src/mistralai/models_.py - - src/mistralai/files.py - - src/mistralai/jobs.py - - src/mistralai/fine_tuning.py - - src/mistralai/chat.py - - src/mistralai/fim.py - - src/mistralai/agents.py - - src/mistralai/embeddings.py - - src/mistralai/sdk.py + - .gitattributes - .vscode/settings.json + - USAGE.md + - docs/models/agentscompletionrequest.md + - docs/models/agentscompletionrequestmessages.md + - docs/models/agentscompletionrequeststop.md + - docs/models/agentscompletionrequesttoolchoice.md + - docs/models/agentscompletionstreamrequest.md + - docs/models/agentscompletionstreamrequestmessages.md + - docs/models/agentscompletionstreamrequeststop.md + - docs/models/agentscompletionstreamrequesttoolchoice.md + - docs/models/archiveftmodelout.md + - docs/models/archiveftmodeloutobject.md + - docs/models/arguments.md + - docs/models/assistantmessage.md + - docs/models/assistantmessagerole.md + - docs/models/basemodelcard.md + - docs/models/chatcompletionchoice.md + - docs/models/chatcompletionrequest.md + - docs/models/chatcompletionrequesttoolchoice.md + - docs/models/chatcompletionresponse.md + - docs/models/chatcompletionstreamrequest.md + - docs/models/chatcompletionstreamrequestmessages.md + - docs/models/chatcompletionstreamrequeststop.md + - docs/models/chatcompletionstreamrequesttoolchoice.md + - docs/models/checkpointout.md + - docs/models/completionchunk.md + - docs/models/completionevent.md + - docs/models/completionresponsestreamchoice.md + - docs/models/completionresponsestreamchoicefinishreason.md + - docs/models/content.md + - docs/models/contentchunk.md + - docs/models/data.md + - docs/models/deletefileout.md + - docs/models/deletemodelout.md + - docs/models/deletemodelv1modelsmodeliddeleterequest.md + - docs/models/deltamessage.md + - docs/models/detailedjobout.md + - docs/models/detailedjoboutintegrations.md + - docs/models/detailedjoboutobject.md + - docs/models/detailedjoboutrepositories.md + - docs/models/detailedjoboutstatus.md + - docs/models/embeddingrequest.md + - docs/models/embeddingresponse.md + - docs/models/embeddingresponsedata.md + - docs/models/eventout.md + - docs/models/file.md + - docs/models/filesapiroutesdeletefilerequest.md + - docs/models/filesapiroutesretrievefilerequest.md + - docs/models/filesapiroutesuploadfilemultipartbodyparams.md + - docs/models/filesapiroutesuploadfilepurpose.md + - docs/models/fileschema.md + - docs/models/fileschemapurpose.md + - docs/models/fimcompletionrequest.md + - docs/models/fimcompletionrequeststop.md + - docs/models/fimcompletionresponse.md + - docs/models/fimcompletionstreamrequest.md + - docs/models/fimcompletionstreamrequeststop.md + - docs/models/finetuneablemodel.md + - docs/models/finishreason.md + - docs/models/ftmodelcapabilitiesout.md + - docs/models/ftmodelcard.md + - docs/models/ftmodelout.md + - docs/models/ftmodeloutobject.md + - docs/models/function.md + - docs/models/functioncall.md + - docs/models/functionname.md + - docs/models/githubrepositoryin.md + - docs/models/githubrepositoryintype.md + - docs/models/githubrepositoryout.md + - docs/models/githubrepositoryouttype.md + - docs/models/httpvalidationerror.md + - docs/models/imageurl.md + - docs/models/imageurlchunk.md + - docs/models/imageurlchunkimageurl.md + - docs/models/imageurlchunktype.md + - docs/models/inputs.md + - docs/models/integrations.md + - docs/models/jobin.md + - docs/models/jobinintegrations.md + - docs/models/jobinrepositories.md + - docs/models/jobmetadataout.md + - docs/models/jobout.md + - docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md + - docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md + - docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md + - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md + - docs/models/jobsout.md + - docs/models/jobsoutobject.md + - docs/models/legacyjobmetadataout.md + - docs/models/legacyjobmetadataoutobject.md + - docs/models/listfilesout.md + - docs/models/loc.md + - docs/models/messages.md + - docs/models/metricout.md + - docs/models/modelcapabilities.md + - docs/models/modellist.md + - docs/models/object.md + - docs/models/purpose.md + - docs/models/queryparamstatus.md + - docs/models/repositories.md + - docs/models/responseformat.md + - docs/models/responseformats.md + - docs/models/retrievefileout.md + - docs/models/retrievefileoutpurpose.md + - docs/models/retrievemodelv1modelsmodelidgetrequest.md + - docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md + - docs/models/role.md + - docs/models/sampletype.md + - docs/models/security.md + - docs/models/source.md + - docs/models/status.md + - docs/models/stop.md + - docs/models/systemmessage.md + - docs/models/textchunk.md + - docs/models/textchunktype.md + - docs/models/tool.md + - docs/models/toolcall.md + - docs/models/toolchoice.md + - docs/models/toolchoiceenum.md + - docs/models/toolmessage.md + - docs/models/toolmessagerole.md + - docs/models/tooltypes.md + - docs/models/trainingfile.md + - docs/models/trainingparameters.md + - docs/models/trainingparametersin.md + - docs/models/type.md + - docs/models/unarchiveftmodelout.md + - docs/models/unarchiveftmodeloutobject.md + - docs/models/updateftmodelin.md + - docs/models/uploadfileout.md + - docs/models/usageinfo.md + - docs/models/usermessage.md + - docs/models/usermessagecontent.md + - docs/models/usermessagerole.md + - docs/models/utils/retryconfig.md + - docs/models/validationerror.md + - docs/models/wandbintegration.md + - docs/models/wandbintegrationout.md + - docs/models/wandbintegrationtype.md + - docs/sdks/agents/README.md + - docs/sdks/chat/README.md + - docs/sdks/embeddings/README.md + - docs/sdks/files/README.md + - docs/sdks/fim/README.md + - docs/sdks/finetuning/README.md + - docs/sdks/jobs/README.md + - docs/sdks/mistral/README.md + - docs/sdks/models/README.md - poetry.toml - py.typed - pylintrc @@ -55,9 +198,109 @@ generatedFiles: - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai/__init__.py + - src/mistralai/_hooks/__init__.py + - src/mistralai/_hooks/sdkhooks.py + - src/mistralai/_hooks/types.py + - src/mistralai/agents.py - src/mistralai/basesdk.py + - src/mistralai/chat.py + - src/mistralai/embeddings.py + - src/mistralai/files.py + - src/mistralai/fim.py + - src/mistralai/fine_tuning.py - src/mistralai/httpclient.py + - src/mistralai/jobs.py + - src/mistralai/models/__init__.py + - src/mistralai/models/agentscompletionrequest.py + - src/mistralai/models/agentscompletionstreamrequest.py + - src/mistralai/models/archiveftmodelout.py + - src/mistralai/models/assistantmessage.py + - src/mistralai/models/basemodelcard.py + - src/mistralai/models/chatcompletionchoice.py + - src/mistralai/models/chatcompletionrequest.py + - src/mistralai/models/chatcompletionresponse.py + - src/mistralai/models/chatcompletionstreamrequest.py + - src/mistralai/models/checkpointout.py + - src/mistralai/models/completionchunk.py + - src/mistralai/models/completionevent.py + - src/mistralai/models/completionresponsestreamchoice.py + - src/mistralai/models/contentchunk.py + - src/mistralai/models/delete_model_v1_models_model_id_deleteop.py + - src/mistralai/models/deletefileout.py + - src/mistralai/models/deletemodelout.py + - src/mistralai/models/deltamessage.py + - src/mistralai/models/detailedjobout.py + - src/mistralai/models/embeddingrequest.py + - src/mistralai/models/embeddingresponse.py + - src/mistralai/models/embeddingresponsedata.py + - src/mistralai/models/eventout.py + - src/mistralai/models/files_api_routes_delete_fileop.py + - src/mistralai/models/files_api_routes_retrieve_fileop.py + - src/mistralai/models/files_api_routes_upload_fileop.py + - src/mistralai/models/fileschema.py + - src/mistralai/models/fimcompletionrequest.py + - src/mistralai/models/fimcompletionresponse.py + - src/mistralai/models/fimcompletionstreamrequest.py + - src/mistralai/models/finetuneablemodel.py + - src/mistralai/models/ftmodelcapabilitiesout.py + - src/mistralai/models/ftmodelcard.py + - src/mistralai/models/ftmodelout.py + - src/mistralai/models/function.py + - src/mistralai/models/functioncall.py + - src/mistralai/models/functionname.py + - src/mistralai/models/githubrepositoryin.py + - src/mistralai/models/githubrepositoryout.py + - src/mistralai/models/httpvalidationerror.py + - src/mistralai/models/imageurl.py + - src/mistralai/models/imageurlchunk.py + - src/mistralai/models/jobin.py + - src/mistralai/models/jobmetadataout.py + - src/mistralai/models/jobout.py + - src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py + - src/mistralai/models/jobsout.py + - src/mistralai/models/legacyjobmetadataout.py + - src/mistralai/models/listfilesout.py + - src/mistralai/models/metricout.py + - src/mistralai/models/modelcapabilities.py + - src/mistralai/models/modellist.py + - src/mistralai/models/responseformat.py + - src/mistralai/models/responseformats.py + - src/mistralai/models/retrieve_model_v1_models_model_id_getop.py + - src/mistralai/models/retrievefileout.py + - src/mistralai/models/sampletype.py + - src/mistralai/models/sdkerror.py + - src/mistralai/models/security.py + - src/mistralai/models/source.py + - src/mistralai/models/systemmessage.py + - src/mistralai/models/textchunk.py + - src/mistralai/models/tool.py + - src/mistralai/models/toolcall.py + - src/mistralai/models/toolchoice.py + - src/mistralai/models/toolchoiceenum.py + - src/mistralai/models/toolmessage.py + - src/mistralai/models/tooltypes.py + - src/mistralai/models/trainingfile.py + - src/mistralai/models/trainingparameters.py + - src/mistralai/models/trainingparametersin.py + - src/mistralai/models/unarchiveftmodelout.py + - src/mistralai/models/updateftmodelin.py + - src/mistralai/models/uploadfileout.py + - src/mistralai/models/usageinfo.py + - src/mistralai/models/usermessage.py + - src/mistralai/models/validationerror.py + - src/mistralai/models/wandbintegration.py + - src/mistralai/models/wandbintegrationout.py + - src/mistralai/models_.py - src/mistralai/py.typed + - src/mistralai/sdk.py + - src/mistralai/sdkconfiguration.py - src/mistralai/types/__init__.py - src/mistralai/types/basemodel.py - src/mistralai/utils/__init__.py @@ -75,245 +318,156 @@ generatedFiles: - src/mistralai/utils/serializers.py - src/mistralai/utils/url.py - src/mistralai/utils/values.py - - src/mistralai/models/sdkerror.py - - src/mistralai/models/modellist.py - - src/mistralai/models/basemodelcard.py - - src/mistralai/models/modelcapabilities.py - - src/mistralai/models/ftmodelcard.py - - src/mistralai/models/httpvalidationerror.py - - src/mistralai/models/validationerror.py - - src/mistralai/models/retrieve_model_v1_models_model_id_getop.py - - src/mistralai/models/deletemodelout.py - - src/mistralai/models/delete_model_v1_models_model_id_deleteop.py - - src/mistralai/models/ftmodelout.py - - src/mistralai/models/ftmodelcapabilitiesout.py - - src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py - - src/mistralai/models/updateftmodelin.py - - src/mistralai/models/archiveftmodelout.py - - src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py - - src/mistralai/models/unarchiveftmodelout.py - - src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py - - src/mistralai/models/uploadfileout.py - - src/mistralai/models/source.py - - src/mistralai/models/sampletype.py - - src/mistralai/models/files_api_routes_upload_fileop.py - - src/mistralai/models/listfilesout.py - - src/mistralai/models/fileschema.py - - src/mistralai/models/retrievefileout.py - - src/mistralai/models/files_api_routes_retrieve_fileop.py - - src/mistralai/models/deletefileout.py - - src/mistralai/models/files_api_routes_delete_fileop.py - - src/mistralai/models/jobsout.py - - src/mistralai/models/jobout.py - - src/mistralai/models/jobmetadataout.py - - src/mistralai/models/githubrepositoryout.py - - src/mistralai/models/wandbintegrationout.py - - src/mistralai/models/finetuneablemodel.py - - src/mistralai/models/trainingparameters.py - - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py - - src/mistralai/models/legacyjobmetadataout.py - - src/mistralai/models/jobin.py - - src/mistralai/models/githubrepositoryin.py - - src/mistralai/models/wandbintegration.py - - src/mistralai/models/trainingparametersin.py - - src/mistralai/models/trainingfile.py - - src/mistralai/models/detailedjobout.py - - src/mistralai/models/checkpointout.py - - src/mistralai/models/metricout.py - - src/mistralai/models/eventout.py - - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py - - src/mistralai/models/chatcompletionresponse.py - - src/mistralai/models/chatcompletionchoice.py - - src/mistralai/models/assistantmessage.py - - src/mistralai/models/toolcall.py - - src/mistralai/models/functioncall.py - - src/mistralai/models/tooltypes.py - - src/mistralai/models/usageinfo.py - - src/mistralai/models/chatcompletionrequest.py - - src/mistralai/models/toolchoice.py - - src/mistralai/models/functionname.py - - src/mistralai/models/toolchoiceenum.py - - src/mistralai/models/tool.py - - src/mistralai/models/function.py - - src/mistralai/models/responseformat.py - - src/mistralai/models/responseformats.py - - src/mistralai/models/systemmessage.py - - src/mistralai/models/textchunk.py - - src/mistralai/models/usermessage.py - - src/mistralai/models/contentchunk.py - - src/mistralai/models/imageurlchunk.py - - src/mistralai/models/imageurl.py - - src/mistralai/models/toolmessage.py - - src/mistralai/models/completionevent.py - - src/mistralai/models/completionchunk.py - - src/mistralai/models/completionresponsestreamchoice.py - - src/mistralai/models/deltamessage.py - - src/mistralai/models/chatcompletionstreamrequest.py - - src/mistralai/models/fimcompletionresponse.py - - src/mistralai/models/fimcompletionrequest.py - - src/mistralai/models/fimcompletionstreamrequest.py - - src/mistralai/models/agentscompletionrequest.py - - src/mistralai/models/agentscompletionstreamrequest.py - - src/mistralai/models/embeddingresponse.py - - src/mistralai/models/embeddingresponsedata.py - - src/mistralai/models/embeddingrequest.py - - src/mistralai/models/security.py - - src/mistralai/models/__init__.py - - docs/models/data.md - - docs/models/modellist.md - - docs/models/basemodelcard.md - - docs/models/modelcapabilities.md - - docs/models/ftmodelcard.md - - docs/models/httpvalidationerror.md - - docs/models/loc.md - - docs/models/validationerror.md - - docs/models/retrievemodelv1modelsmodelidgetrequest.md - - docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md - - docs/models/deletemodelout.md - - docs/models/deletemodelv1modelsmodeliddeleterequest.md - - docs/models/ftmodeloutobject.md - - docs/models/ftmodelout.md - - docs/models/ftmodelcapabilitiesout.md - - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md - - docs/models/updateftmodelin.md - - docs/models/archiveftmodeloutobject.md - - docs/models/archiveftmodelout.md - - docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md - - docs/models/unarchiveftmodeloutobject.md - - docs/models/unarchiveftmodelout.md - - docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md - - docs/models/purpose.md - - docs/models/uploadfileout.md - - docs/models/source.md - - docs/models/sampletype.md - - docs/models/filesapiroutesuploadfilepurpose.md - - docs/models/file.md - - docs/models/filesapiroutesuploadfilemultipartbodyparams.md - - docs/models/listfilesout.md - - docs/models/fileschemapurpose.md - - docs/models/fileschema.md - - docs/models/retrievefileoutpurpose.md - - docs/models/retrievefileout.md - - docs/models/filesapiroutesretrievefilerequest.md - - docs/models/deletefileout.md - - docs/models/filesapiroutesdeletefilerequest.md - - docs/models/jobsoutobject.md - - docs/models/jobsout.md - - docs/models/status.md - - docs/models/object.md - - docs/models/integrations.md - - docs/models/repositories.md - - docs/models/jobout.md - - docs/models/jobmetadataout.md - - docs/models/githubrepositoryouttype.md - - docs/models/githubrepositoryout.md - - docs/models/type.md - - docs/models/wandbintegrationout.md - - docs/models/finetuneablemodel.md - - docs/models/trainingparameters.md - - docs/models/queryparamstatus.md - - docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md - - docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md - - docs/models/legacyjobmetadataoutobject.md - - docs/models/legacyjobmetadataout.md - - docs/models/jobinintegrations.md - - docs/models/jobinrepositories.md - - docs/models/jobin.md - - docs/models/githubrepositoryintype.md - - docs/models/githubrepositoryin.md - - docs/models/wandbintegrationtype.md - - docs/models/wandbintegration.md - - docs/models/trainingparametersin.md - - docs/models/trainingfile.md - - docs/models/detailedjoboutstatus.md - - docs/models/detailedjoboutobject.md - - docs/models/detailedjoboutintegrations.md - - docs/models/detailedjoboutrepositories.md - - docs/models/detailedjobout.md - - docs/models/checkpointout.md - - docs/models/metricout.md - - docs/models/eventout.md - - docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md - - docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md - - docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md - - docs/models/chatcompletionresponse.md - - docs/models/finishreason.md - - docs/models/chatcompletionchoice.md - - docs/models/assistantmessagerole.md - - docs/models/assistantmessage.md - - docs/models/toolcall.md - - docs/models/arguments.md - - docs/models/functioncall.md - - docs/models/tooltypes.md - - docs/models/usageinfo.md - - docs/models/stop.md - - docs/models/messages.md - - docs/models/chatcompletionrequesttoolchoice.md - - docs/models/chatcompletionrequest.md - - docs/models/toolchoice.md - - docs/models/functionname.md - - docs/models/toolchoiceenum.md - - docs/models/tool.md - - docs/models/function.md - - docs/models/responseformat.md - - docs/models/responseformats.md - - docs/models/role.md - - docs/models/content.md - - docs/models/systemmessage.md - - docs/models/textchunktype.md - - docs/models/textchunk.md - - docs/models/usermessagerole.md - - docs/models/usermessagecontent.md - - docs/models/usermessage.md - - docs/models/contentchunk.md - - docs/models/imageurlchunktype.md - - docs/models/imageurlchunkimageurl.md - - docs/models/imageurlchunk.md - - docs/models/imageurl.md - - docs/models/toolmessagerole.md - - docs/models/toolmessage.md - - docs/models/completionevent.md - - docs/models/completionchunk.md - - docs/models/completionresponsestreamchoicefinishreason.md - - docs/models/completionresponsestreamchoice.md - - docs/models/deltamessage.md - - docs/models/chatcompletionstreamrequeststop.md - - docs/models/chatcompletionstreamrequestmessages.md - - docs/models/chatcompletionstreamrequesttoolchoice.md - - docs/models/chatcompletionstreamrequest.md - - docs/models/fimcompletionresponse.md - - docs/models/fimcompletionrequeststop.md - - docs/models/fimcompletionrequest.md - - docs/models/fimcompletionstreamrequeststop.md - - docs/models/fimcompletionstreamrequest.md - - docs/models/agentscompletionrequeststop.md - - docs/models/agentscompletionrequestmessages.md - - docs/models/agentscompletionrequesttoolchoice.md - - docs/models/agentscompletionrequest.md - - docs/models/agentscompletionstreamrequeststop.md - - docs/models/agentscompletionstreamrequestmessages.md - - docs/models/agentscompletionstreamrequesttoolchoice.md - - docs/models/agentscompletionstreamrequest.md - - docs/models/embeddingresponse.md - - docs/models/embeddingresponsedata.md - - docs/models/inputs.md - - docs/models/embeddingrequest.md - - docs/models/security.md - - docs/sdks/mistral/README.md - - docs/models/utils/retryconfig.md - - docs/sdks/models/README.md - - docs/sdks/files/README.md - - docs/sdks/finetuning/README.md - - docs/sdks/jobs/README.md - - docs/sdks/chat/README.md - - docs/sdks/fim/README.md - - docs/sdks/agents/README.md - - docs/sdks/embeddings/README.md - - USAGE.md - - .gitattributes - - src/mistralai/_hooks/sdkhooks.py - - src/mistralai/_hooks/types.py - - src/mistralai/_hooks/__init__.py +examples: + list_models_v1_models_get: + speakeasy-default-list-models-v1-models-get: {} + retrieve_model_v1_models__model_id__get: + "": + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": ""} + "422": {} + delete_model_v1_models__model_id__delete: + "": + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "deleted": true} + "422": {} + jobs_api_routes_fine_tuning_update_fine_tuned_model: + "": + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "created": 857478, "owned_by": "", "root": "", "archived": true, "job": "905bf4aa-77f2-404e-b754-c352acfe5407"} + jobs_api_routes_fine_tuning_archive_fine_tuned_model: + "": + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": ""} + jobs_api_routes_fine_tuning_unarchive_fine_tuned_model: + "": + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": ""} + files_api_routes_upload_file: + speakeasy-default-files-api-routes-upload-file: + requestBody: + multipart/form-data: {"file": {}} + responses: + "200": + application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "sample_type": "pretrain", "source": "upload"} + files_api_routes_list_files: + speakeasy-default-files-api-routes-list-files: + responses: + "200": + application/json: {"data": [], "object": ""} + files_api_routes_retrieve_file: + speakeasy-default-files-api-routes-retrieve-file: + parameters: + path: + file_id: "" + responses: + "200": + application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "sample_type": "pretrain", "source": "repository"} + files_api_routes_delete_file: + speakeasy-default-files-api-routes-delete-file: + parameters: + path: + file_id: "" + responses: + "200": + application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "deleted": false} + jobs_api_routes_fine_tuning_get_fine_tuning_jobs: + speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-jobs: + responses: + "200": + application/json: {"total": 768578} + jobs_api_routes_fine_tuning_create_fine_tuning_job: + speakeasy-default-jobs-api-routes-fine-tuning-create-fine-tuning-job: + requestBody: + application/json: {"model": "codestral-latest"} + responses: + "200": + application/json: {"id": "ad642c1f-c6fe-4072-81bc-dd89dc7fa504", "auto_start": false, "model": "open-mistral-7b", "status": "FAILED_VALIDATION", "job_type": "", "created_at": 230313, "modified_at": 207565, "training_files": []} + jobs_api_routes_fine_tuning_get_fine_tuning_job: + speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-job: + parameters: + path: + job_id: "b18d8d81-fd7b-4764-a31e-475cb1f36591" + responses: + "200": + application/json: {"id": "58ccc65b-c928-4154-952e-30c048b8c2b5", "auto_start": false, "model": "open-mistral-nemo", "status": "VALIDATED", "job_type": "", "created_at": 968091, "modified_at": 32069, "training_files": [], "checkpoints": []} + jobs_api_routes_fine_tuning_cancel_fine_tuning_job: + speakeasy-default-jobs-api-routes-fine-tuning-cancel-fine-tuning-job: + parameters: + path: + job_id: "03fa7112-315a-4072-a9f2-43f3f1ec962e" + responses: + "200": + application/json: {"id": "fb7dec95-f740-47b2-b8ee-d9b046936a67", "auto_start": true, "model": "mistral-large-latest", "status": "VALIDATED", "job_type": "", "created_at": 252151, "modified_at": 56775, "training_files": [], "checkpoints": []} + jobs_api_routes_fine_tuning_start_fine_tuning_job: + speakeasy-default-jobs-api-routes-fine-tuning-start-fine-tuning-job: + parameters: + path: + job_id: "0eb0f807-fb9f-4e46-9c13-4e257df6e1ba" + responses: + "200": + application/json: {"id": "bc3810ce-43e6-4fde-85a4-cd01d1f9cf8f", "auto_start": true, "model": "codestral-latest", "status": "RUNNING", "job_type": "", "created_at": 186591, "modified_at": 451468, "training_files": [], "checkpoints": []} + chat_completion_v1_chat_completions_post: + speakeasy-default-chat-completion-v1-chat-completions-post: + requestBody: + application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence."}]} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + "422": {} + stream_chat: + speakeasy-default-stream-chat: + requestBody: + application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence."}]} + fim_completion_v1_fim_completions_post: + speakeasy-default-fim-completion-v1-fim-completions-post: + requestBody: + application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + "422": {} + stream_fim: + speakeasy-default-stream-fim: + requestBody: + application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} + agents_completion_v1_agents_completions_post: + speakeasy-default-agents-completion-v1-agents-completions-post: + requestBody: + application/json: {"messages": [{"content": "Who is the best French painter? Answer in one short sentence."}], "agent_id": ""} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + "422": {} + stream_agents: + speakeasy-default-stream-agents: + requestBody: + application/json: {"messages": [{"content": "Who is the best French painter? Answer in one short sentence."}], "agent_id": ""} + embeddings_v1_embeddings_post: + speakeasy-default-embeddings-v1-embeddings-post: + requestBody: + application/json: {"input": "", "model": "Wrangler"} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "data": [{"object": "embedding", "embedding": [0.1, 0.2, 0.3], "index": 0}]} + "422": {} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index e9edc58..8fd69ab 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 1.0.4 + version: 1.1.0 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index a268967..5beb5f4 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,4 +1,4 @@ -speakeasyVersion: 1.382.0 +speakeasyVersion: 1.396.6 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure @@ -24,22 +24,19 @@ targets: sourceNamespace: mistral-openapi-azure sourceRevisionDigest: sha256:3dec9e900243dab5f6fecb4780a74e5cd26bf5660d1db0268964689cb4da043a sourceBlobDigest: sha256:867fabbb7c8662a2f10861eb9505990aea59e4677631291989d06afb3cf497bc - outLocation: ./packages/mistralai_azure mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud sourceRevisionDigest: sha256:caf6467696dddae2736fef96d8967b8a02a1e10e405d22d2901d0459172b739c sourceBlobDigest: sha256:d6650e668064690efa947a29ec1712566252e3283940b67a3c602323df461cf6 - outLocation: ./packages/mistralai_gcp mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi sourceRevisionDigest: sha256:4b17326b6b91a95870383242c971996d6c6671d180b40c96f75b09c7cb1cc9c0 sourceBlobDigest: sha256:6e345b52897fc37bf0cae3ff6ddd58a09484cc0f2130387ddf41af6a76eda19f - outLocation: /Users/gaspard/public-mistral/client-python workflow: workflowVersion: 1.0.0 - speakeasyVersion: 1.382.0 + speakeasyVersion: latest sources: mistral-azure-source: inputs: diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index e7491b4..4076ff3 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -1,5 +1,5 @@ workflowVersion: 1.0.0 -speakeasyVersion: 1.382.0 +speakeasyVersion: latest sources: mistral-azure-source: inputs: diff --git a/README.md b/README.md index 54b0577..f4017d3 100644 --- a/README.md +++ b/README.md @@ -19,15 +19,46 @@ $ echo 'export MISTRAL_API_KEY=[your_key_here]' >> ~/.zshenv $ source ~/.zshenv ``` + +## Summary + +Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + + + +## Table of Contents + +* [SDK Installation](#sdk-installation) +* [IDE Support](#ide-support) +* [SDK Example Usage](#sdk-example-usage) +* [Available Resources and Operations](#available-resources-and-operations) +* [Server-sent event streaming](#server-sent-event-streaming) +* [File uploads](#file-uploads) +* [Retries](#retries) +* [Error Handling](#error-handling) +* [Server Selection](#server-selection) +* [Custom HTTP Client](#custom-http-client) +* [Authentication](#authentication) +* [Debugging](#debugging) + + ## SDK Installation -PIP +The SDK can be installed with either *pip* or *poetry* package managers. + +### PIP + +*PIP* is the default package installer for Python, enabling easy installation and management of packages from PyPI via the command line. + ```bash pip install mistralai ``` -Poetry +### Poetry + +*Poetry* is a modern tool that simplifies dependency management and package publishing by using a single `pyproject.toml` file to handle project metadata and dependencies. + ```bash poetry add mistralai ``` @@ -49,7 +80,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.chat.complete(model="mistral-small-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", @@ -101,10 +131,9 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.files.upload(file={ - "file_name": "your_file_here", - "content": open("", "rb"), + "file_name": "example.file", + "content": open("example.file", "rb"), }) if res is not None: @@ -126,8 +155,8 @@ async def main(): api_key=os.getenv("MISTRAL_API_KEY", ""), ) res = await s.files.upload_async(file={ - "file_name": "your_file_here", - "content": open("", "rb"), + "file_name": "example.file", + "content": open("example.file", "rb"), }) if res is not None: # handle response @@ -149,11 +178,9 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.agents.complete(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", + "content": "", }, ], agent_id="") @@ -287,14 +314,22 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA ## Available Resources and Operations -### [models](docs/sdks/models/README.md) +
+Available methods -* [list](docs/sdks/models/README.md#list) - List Models -* [retrieve](docs/sdks/models/README.md#retrieve) - Retrieve Model -* [delete](docs/sdks/models/README.md#delete) - Delete Model -* [update](docs/sdks/models/README.md#update) - Update Fine Tuned Model -* [archive](docs/sdks/models/README.md#archive) - Archive Fine Tuned Model -* [unarchive](docs/sdks/models/README.md#unarchive) - Unarchive Fine Tuned Model +### [agents](docs/sdks/agents/README.md) + +* [complete](docs/sdks/agents/README.md#complete) - Agents Completion +* [stream](docs/sdks/agents/README.md#stream) - Stream Agents completion + +### [chat](docs/sdks/chat/README.md) + +* [complete](docs/sdks/chat/README.md#complete) - Chat Completion +* [stream](docs/sdks/chat/README.md#stream) - Stream chat completion + +### [embeddings](docs/sdks/embeddings/README.md) + +* [create](docs/sdks/embeddings/README.md#create) - Embeddings ### [files](docs/sdks/files/README.md) @@ -303,8 +338,15 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [retrieve](docs/sdks/files/README.md#retrieve) - Retrieve File * [delete](docs/sdks/files/README.md#delete) - Delete File +### [fim](docs/sdks/fim/README.md) -### [fine_tuning.jobs](docs/sdks/jobs/README.md) +* [complete](docs/sdks/fim/README.md#complete) - Fim Completion +* [stream](docs/sdks/fim/README.md#stream) - Stream fim completion + +### [fine_tuning](docs/sdks/finetuning/README.md) + + +#### [fine_tuning.jobs](docs/sdks/jobs/README.md) * [list](docs/sdks/jobs/README.md#list) - Get Fine Tuning Jobs * [create](docs/sdks/jobs/README.md#create) - Create Fine Tuning Job @@ -312,24 +354,17 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [cancel](docs/sdks/jobs/README.md#cancel) - Cancel Fine Tuning Job * [start](docs/sdks/jobs/README.md#start) - Start Fine Tuning Job -### [chat](docs/sdks/chat/README.md) - -* [complete](docs/sdks/chat/README.md#complete) - Chat Completion -* [stream](docs/sdks/chat/README.md#stream) - Stream chat completion - -### [fim](docs/sdks/fim/README.md) - -* [complete](docs/sdks/fim/README.md#complete) - Fim Completion -* [stream](docs/sdks/fim/README.md#stream) - Stream fim completion - -### [agents](docs/sdks/agents/README.md) -* [complete](docs/sdks/agents/README.md#complete) - Agents Completion -* [stream](docs/sdks/agents/README.md#stream) - Stream Agents completion +### [models](docs/sdks/models/README.md) -### [embeddings](docs/sdks/embeddings/README.md) +* [list](docs/sdks/models/README.md#list) - List Models +* [retrieve](docs/sdks/models/README.md#retrieve) - Retrieve Model +* [delete](docs/sdks/models/README.md#delete) - Delete Model +* [update](docs/sdks/models/README.md#update) - Update Fine Tuned Model +* [archive](docs/sdks/models/README.md#archive) - Archive Fine Tuned Model +* [unarchive](docs/sdks/models/README.md#unarchive) - Unarchive Fine Tuned Model -* [create](docs/sdks/embeddings/README.md#create) - Embeddings +
@@ -349,7 +384,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.chat.stream(model="mistral-small-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", @@ -386,10 +420,9 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.files.upload(file={ - "file_name": "your_file_here", - "content": open("", "rb"), + "file_name": "example.file", + "content": open("example.file", "rb"), }) if res is not None: @@ -414,7 +447,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.list(, RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) @@ -435,7 +467,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.list() if res is not None: @@ -450,10 +481,10 @@ if res is not None: Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. -| Error Object | Status Code | Content Type | -| -------------------------- | ----------- | ---------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | ### Example @@ -469,17 +500,16 @@ res = None try: res = s.models.list() + if res is not None: + # handle response + pass + except models.HTTPValidationError as e: - # handle exception + # handle e.data: models.HTTPValidationErrorData raise(e) except models.SDKError as e: # handle exception raise(e) - -if res is not None: - # handle response - pass - ``` @@ -490,9 +520,9 @@ if res is not None: You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: -| Name | Server | Variables | -| ------ | ------------------------ | --------- | -| `prod` | `https://api.mistral.ai` | None | +| Name | Server | Variables | +| ----- | ------ | --------- | +| `prod` | `https://api.mistral.ai` | None | #### Example @@ -505,7 +535,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.list() if res is not None: @@ -527,7 +556,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.list() if res is not None: @@ -625,9 +653,9 @@ s = Mistral(async_client=CustomClient(httpx.AsyncClient())) This SDK supports the following security scheme globally: -| Name | Type | Scheme | Environment Variable | -| --------- | ---- | ----------- | -------------------- | -| `api_key` | http | HTTP Bearer | `MISTRAL_API_KEY` | +| Name | Type | Scheme | Environment Variable | +| -------------------- | -------------------- | -------------------- | -------------------- | +| `api_key` | http | HTTP Bearer | `MISTRAL_API_KEY` | To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. For example: ```python @@ -638,7 +666,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.list() if res is not None: diff --git a/USAGE.md b/USAGE.md index fb2ca31..124126d 100644 --- a/USAGE.md +++ b/USAGE.md @@ -12,7 +12,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.chat.complete(model="mistral-small-latest", messages=[ ]) @@ -36,7 +35,10 @@ async def main(): api_key=os.getenv("MISTRAL_API_KEY", ""), ) res = await s.chat.complete_async(model="mistral-small-latest", messages=[ - + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, ]) if res is not None: # handle response @@ -58,10 +60,9 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.files.upload(file={ - "file_name": "your_file_here", - "content": open("", "rb"), + "file_name": "example.file", + "content": open("example.file", "rb"), }) if res is not None: @@ -83,8 +84,8 @@ async def main(): api_key=os.getenv("MISTRAL_API_KEY", ""), ) res = await s.files.upload_async(file={ - "file_name": "your_file_here", - "content": open("", "rb"), + "file_name": "example.file", + "content": open("example.file", "rb"), }) if res is not None: # handle response @@ -106,7 +107,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.agents.complete(messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 36a7107..279a13f 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -24,10 +24,9 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.agents.complete(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", + "content": "", }, ], agent_id="") @@ -79,10 +78,15 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.agents.stream(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", + "content": [ + { + "image_url": { + "url": "http://possible-veal.org", + }, + }, + ], }, ], agent_id="") diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 81e2e22..d5e85cc 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -24,7 +24,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.chat.complete(model="mistral-small-latest", messages=[ ]) @@ -80,7 +79,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.chat.stream(model="mistral-small-latest", messages=[ ]) diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 38b8b53..ae27086 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -23,8 +23,7 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - -res = s.embeddings.create(inputs="", model="") +res = s.embeddings.create(inputs="", model="Wrangler") if res is not None: # handle response diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index c931f17..41ed961 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -30,10 +30,9 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.files.upload(file={ - "file_name": "your_file_here", - "content": open("", "rb"), + "file_name": "example.file", + "content": open("example.file", "rb"), }) if res is not None: @@ -74,7 +73,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.files.list() if res is not None: @@ -114,7 +112,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.files.retrieve(file_id="") if res is not None: @@ -155,7 +152,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.files.delete(file_id="") if res is not None: diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index 2f3d8fe..cfb3d50 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -24,7 +24,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.fim.complete(model="codestral-2405", prompt="def", suffix="return a+b") if res is not None: @@ -75,7 +74,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.fim.stream(model="codestral-2405", prompt="def", suffix="return a+b") if res is not None: diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index cecff0e..0929c78 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -25,7 +25,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.fine_tuning.jobs.list() if res is not None: @@ -74,7 +73,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.fine_tuning.jobs.create(model="codestral-latest", hyperparameters={}) if res is not None: @@ -122,7 +120,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.fine_tuning.jobs.get(job_id="b18d8d81-fd7b-4764-a31e-475cb1f36591") if res is not None: @@ -163,7 +160,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.fine_tuning.jobs.cancel(job_id="03fa7112-315a-4072-a9f2-43f3f1ec962e") if res is not None: @@ -204,7 +200,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.fine_tuning.jobs.start(job_id="0eb0f807-fb9f-4e46-9c13-4e257df6e1ba") if res is not None: diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 101903d..1a54bbb 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -28,7 +28,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.list() if res is not None: @@ -69,7 +68,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.retrieve(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") if res is not None: @@ -111,7 +109,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.delete(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") if res is not None: @@ -153,7 +150,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.update(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") if res is not None: @@ -196,7 +192,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.archive(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") if res is not None: @@ -237,7 +232,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.unarchive(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") if res is not None: diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index e2a7b51..2388933 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -3,17 +3,18 @@ id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: docChecksum: 6060a8ebd8e32b0460521e578beac79d docVersion: 0.0.2 - speakeasyVersion: 1.382.0 - generationVersion: 2.404.11 - releaseVersion: 1.1.1 - configChecksum: 1f0c18691c9e1d661e1c13b35500246c + speakeasyVersion: 1.396.6 + generationVersion: 2.415.6 + releaseVersion: 1.2.1 + configChecksum: 85e942d8a35fb73cee2443337f7828f8 published: true features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.2 - core: 5.4.1 + core: 5.5.3 defaultEnabledRetries: 0.2.0 + enumUnions: 0.1.0 envVarSecurityUsage: 0.3.1 examples: 3.0.0 flatRequests: 1.0.1 @@ -25,16 +26,61 @@ features: nullables: 1.0.0 openEnums: 1.0.0 responseFormat: 1.0.0 - retries: 3.0.0 + retries: 3.0.2 sdkHooks: 1.0.0 serverEvents: 1.0.2 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 unions: 3.0.2 generatedFiles: - - src/mistralai_azure/sdkconfiguration.py - - src/mistralai_azure/chat.py + - .gitattributes - .vscode/settings.json + - docs/models/arguments.md + - docs/models/assistantmessage.md + - docs/models/assistantmessagerole.md + - docs/models/chatcompletionchoice.md + - docs/models/chatcompletionchoicefinishreason.md + - docs/models/chatcompletionrequest.md + - docs/models/chatcompletionrequestmessages.md + - docs/models/chatcompletionrequeststop.md + - docs/models/chatcompletionrequesttoolchoice.md + - docs/models/chatcompletionresponse.md + - docs/models/chatcompletionstreamrequest.md + - docs/models/chatcompletionstreamrequesttoolchoice.md + - docs/models/completionchunk.md + - docs/models/completionevent.md + - docs/models/completionresponsestreamchoice.md + - docs/models/content.md + - docs/models/contentchunk.md + - docs/models/deltamessage.md + - docs/models/finishreason.md + - docs/models/function.md + - docs/models/functioncall.md + - docs/models/functionname.md + - docs/models/httpvalidationerror.md + - docs/models/loc.md + - docs/models/messages.md + - docs/models/responseformat.md + - docs/models/responseformats.md + - docs/models/role.md + - docs/models/security.md + - docs/models/stop.md + - docs/models/systemmessage.md + - docs/models/textchunk.md + - docs/models/tool.md + - docs/models/toolcall.md + - docs/models/toolchoice.md + - docs/models/toolchoiceenum.md + - docs/models/toolmessage.md + - docs/models/toolmessagerole.md + - docs/models/tooltypes.md + - docs/models/type.md + - docs/models/usageinfo.md + - docs/models/usermessage.md + - docs/models/usermessagecontent.md + - docs/models/usermessagerole.md + - docs/models/utils/retryconfig.md + - docs/models/validationerror.md - poetry.toml - py.typed - pylintrc @@ -43,9 +89,44 @@ generatedFiles: - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai_azure/__init__.py + - src/mistralai_azure/_hooks/__init__.py + - src/mistralai_azure/_hooks/sdkhooks.py + - src/mistralai_azure/_hooks/types.py - src/mistralai_azure/basesdk.py + - src/mistralai_azure/chat.py - src/mistralai_azure/httpclient.py + - src/mistralai_azure/models/__init__.py + - src/mistralai_azure/models/assistantmessage.py + - src/mistralai_azure/models/chatcompletionchoice.py + - src/mistralai_azure/models/chatcompletionrequest.py + - src/mistralai_azure/models/chatcompletionresponse.py + - src/mistralai_azure/models/chatcompletionstreamrequest.py + - src/mistralai_azure/models/completionchunk.py + - src/mistralai_azure/models/completionevent.py + - src/mistralai_azure/models/completionresponsestreamchoice.py + - src/mistralai_azure/models/contentchunk.py + - src/mistralai_azure/models/deltamessage.py + - src/mistralai_azure/models/function.py + - src/mistralai_azure/models/functioncall.py + - src/mistralai_azure/models/functionname.py + - src/mistralai_azure/models/httpvalidationerror.py + - src/mistralai_azure/models/responseformat.py + - src/mistralai_azure/models/responseformats.py + - src/mistralai_azure/models/sdkerror.py + - src/mistralai_azure/models/security.py + - src/mistralai_azure/models/systemmessage.py + - src/mistralai_azure/models/textchunk.py + - src/mistralai_azure/models/tool.py + - src/mistralai_azure/models/toolcall.py + - src/mistralai_azure/models/toolchoice.py + - src/mistralai_azure/models/toolchoiceenum.py + - src/mistralai_azure/models/toolmessage.py + - src/mistralai_azure/models/tooltypes.py + - src/mistralai_azure/models/usageinfo.py + - src/mistralai_azure/models/usermessage.py + - src/mistralai_azure/models/validationerror.py - src/mistralai_azure/py.typed + - src/mistralai_azure/sdkconfiguration.py - src/mistralai_azure/types/__init__.py - src/mistralai_azure/types/basemodel.py - src/mistralai_azure/utils/__init__.py @@ -63,83 +144,16 @@ generatedFiles: - src/mistralai_azure/utils/serializers.py - src/mistralai_azure/utils/url.py - src/mistralai_azure/utils/values.py - - src/mistralai_azure/models/sdkerror.py - - src/mistralai_azure/models/completionevent.py - - src/mistralai_azure/models/completionchunk.py - - src/mistralai_azure/models/completionresponsestreamchoice.py - - src/mistralai_azure/models/deltamessage.py - - src/mistralai_azure/models/toolcall.py - - src/mistralai_azure/models/functioncall.py - - src/mistralai_azure/models/tooltypes.py - - src/mistralai_azure/models/usageinfo.py - - src/mistralai_azure/models/httpvalidationerror.py - - src/mistralai_azure/models/validationerror.py - - src/mistralai_azure/models/chatcompletionstreamrequest.py - - src/mistralai_azure/models/toolchoice.py - - src/mistralai_azure/models/functionname.py - - src/mistralai_azure/models/toolchoiceenum.py - - src/mistralai_azure/models/tool.py - - src/mistralai_azure/models/function.py - - src/mistralai_azure/models/responseformat.py - - src/mistralai_azure/models/responseformats.py - - src/mistralai_azure/models/systemmessage.py - - src/mistralai_azure/models/textchunk.py - - src/mistralai_azure/models/usermessage.py - - src/mistralai_azure/models/contentchunk.py - - src/mistralai_azure/models/assistantmessage.py - - src/mistralai_azure/models/toolmessage.py - - src/mistralai_azure/models/chatcompletionresponse.py - - src/mistralai_azure/models/chatcompletionchoice.py - - src/mistralai_azure/models/chatcompletionrequest.py - - src/mistralai_azure/models/security.py - - src/mistralai_azure/models/__init__.py - - docs/models/completionevent.md - - docs/models/completionchunk.md - - docs/models/finishreason.md - - docs/models/completionresponsestreamchoice.md - - docs/models/deltamessage.md - - docs/models/toolcall.md - - docs/models/arguments.md - - docs/models/functioncall.md - - docs/models/tooltypes.md - - docs/models/usageinfo.md - - docs/models/httpvalidationerror.md - - docs/models/loc.md - - docs/models/validationerror.md - - docs/models/stop.md - - docs/models/messages.md - - docs/models/chatcompletionstreamrequesttoolchoice.md - - docs/models/chatcompletionstreamrequest.md - - docs/models/toolchoice.md - - docs/models/functionname.md - - docs/models/toolchoiceenum.md - - docs/models/tool.md - - docs/models/function.md - - docs/models/responseformat.md - - docs/models/responseformats.md - - docs/models/role.md - - docs/models/content.md - - docs/models/systemmessage.md - - docs/models/type.md - - docs/models/textchunk.md - - docs/models/usermessagerole.md - - docs/models/usermessagecontent.md - - docs/models/usermessage.md - - docs/models/contentchunk.md - - docs/models/assistantmessagerole.md - - docs/models/assistantmessage.md - - docs/models/toolmessagerole.md - - docs/models/toolmessage.md - - docs/models/chatcompletionresponse.md - - docs/models/chatcompletionchoicefinishreason.md - - docs/models/chatcompletionchoice.md - - docs/models/chatcompletionrequeststop.md - - docs/models/chatcompletionrequestmessages.md - - docs/models/chatcompletionrequesttoolchoice.md - - docs/models/chatcompletionrequest.md - - docs/models/security.md - - docs/models/utils/retryconfig.md - - .gitattributes - - src/mistralai_azure/_hooks/sdkhooks.py - - src/mistralai_azure/_hooks/types.py - - src/mistralai_azure/_hooks/__init__.py +examples: + stream_chat: + speakeasy-default-stream-chat: + requestBody: + application/json: {"model": "azureai", "messages": [{"content": []}]} + chat_completion_v1_chat_completions_post: + speakeasy-default-chat-completion-v1-chat-completions-post: + requestBody: + application/json: {"model": "azureai", "messages": [{"content": ""}]} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + "422": {} diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index 88eaa18..84cd337 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 1.1.1 + version: 1.2.1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/mistralai_azure/poetry.lock b/packages/mistralai_azure/poetry.lock index 2e5fecf..b9a32d7 100644 --- a/packages/mistralai_azure/poetry.lock +++ b/packages/mistralai_azure/poetry.lock @@ -287,17 +287,6 @@ files = [ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] -[[package]] -name = "nodeenv" -version = "1.9.1" -description = "Node.js virtual environment builder" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -files = [ - {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, - {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, -] - [[package]] name = "packaging" version = "24.1" @@ -342,18 +331,18 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "pydantic" -version = "2.8.2" +version = "2.9.1" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, - {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, + {file = "pydantic-2.9.1-py3-none-any.whl", hash = "sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612"}, + {file = "pydantic-2.9.1.tar.gz", hash = "sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2"}, ] [package.dependencies] -annotated-types = ">=0.4.0" -pydantic-core = "2.20.1" +annotated-types = ">=0.6.0" +pydantic-core = "2.23.3" typing-extensions = [ {version = ">=4.6.1", markers = "python_version < \"3.13\""}, {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, @@ -361,103 +350,104 @@ typing-extensions = [ [package.extras] email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.20.1" +version = "2.23.3" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, - {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, - {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, - {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, - {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, - {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, - {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, - {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, - {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, - {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, - {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, - {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, - {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, - {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, + {file = "pydantic_core-2.23.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6"}, + {file = "pydantic_core-2.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba"}, + {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee"}, + {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe"}, + {file = "pydantic_core-2.23.3-cp310-none-win32.whl", hash = "sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b"}, + {file = "pydantic_core-2.23.3-cp310-none-win_amd64.whl", hash = "sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83"}, + {file = "pydantic_core-2.23.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27"}, + {file = "pydantic_core-2.23.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8"}, + {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48"}, + {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5"}, + {file = "pydantic_core-2.23.3-cp311-none-win32.whl", hash = "sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1"}, + {file = "pydantic_core-2.23.3-cp311-none-win_amd64.whl", hash = "sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa"}, + {file = "pydantic_core-2.23.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305"}, + {file = "pydantic_core-2.23.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c"}, + {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c"}, + {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab"}, + {file = "pydantic_core-2.23.3-cp312-none-win32.whl", hash = "sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c"}, + {file = "pydantic_core-2.23.3-cp312-none-win_amd64.whl", hash = "sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b"}, + {file = "pydantic_core-2.23.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f"}, + {file = "pydantic_core-2.23.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855"}, + {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4"}, + {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d"}, + {file = "pydantic_core-2.23.3-cp313-none-win32.whl", hash = "sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8"}, + {file = "pydantic_core-2.23.3-cp313-none-win_amd64.whl", hash = "sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1"}, + {file = "pydantic_core-2.23.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c"}, + {file = "pydantic_core-2.23.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295"}, + {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba"}, + {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e"}, + {file = "pydantic_core-2.23.3-cp38-none-win32.whl", hash = "sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710"}, + {file = "pydantic_core-2.23.3-cp38-none-win_amd64.whl", hash = "sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea"}, + {file = "pydantic_core-2.23.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8"}, + {file = "pydantic_core-2.23.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd"}, + {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835"}, + {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70"}, + {file = "pydantic_core-2.23.3-cp39-none-win32.whl", hash = "sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7"}, + {file = "pydantic_core-2.23.3-cp39-none-win_amd64.whl", hash = "sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab"}, + {file = "pydantic_core-2.23.3.tar.gz", hash = "sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690"}, ] [package.dependencies] @@ -493,24 +483,6 @@ typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\"" spelling = ["pyenchant (>=3.2,<4.0)"] testutils = ["gitpython (>3)"] -[[package]] -name = "pyright" -version = "1.1.374" -description = "Command line wrapper for pyright" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pyright-1.1.374-py3-none-any.whl", hash = "sha256:55752bcf7a3646d293cd76710a983b71e16f6128aab2d42468e6eb7e46c0a70d"}, - {file = "pyright-1.1.374.tar.gz", hash = "sha256:d01b2daf864ba5e0362e56b844984865970d7204158e61eb685e2dab7804cb82"}, -] - -[package.dependencies] -nodeenv = ">=1.6.0" - -[package.extras] -all = ["twine (>=3.4.1)"] -dev = ["twine (>=3.4.1)"] - [[package]] name = "pytest" version = "8.3.2" @@ -649,4 +621,4 @@ typing-extensions = ">=3.7.4" [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "85499d03f45cd26302b8b267be44478c701581e8a56a3df0907bb38897fdb2e4" +content-hash = "4dfa1b4612afda308a6d0df6d282f34b7020cf4639d6668ac7c63e40807d9e0b" diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index 34598ba..08f615e 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai_azure" -version = "1.1.1" +version = "1.2.1" description = "Python Client SDK for the Mistral AI API in Azure." authors = ["Mistral",] readme = "README-PYPI.md" @@ -20,14 +20,13 @@ python = "^3.8" eval-type-backport = "^0.2.0" httpx = "^0.27.0" jsonpath-python = "^1.0.6" -pydantic = "~2.8.2" +pydantic = "~2.9.0" python-dateutil = "2.8.2" typing-inspect = "^0.9.0" [tool.poetry.group.dev.dependencies] mypy = "==1.10.1" pylint = "==3.2.3" -pyright = "==1.1.374" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" types-python-dateutil = "^2.9.0.20240316" diff --git a/packages/mistralai_azure/scripts/prepare-readme.py b/packages/mistralai_azure/scripts/prepare-readme.py index a8ef8ea..825d9de 100644 --- a/packages/mistralai_azure/scripts/prepare-readme.py +++ b/packages/mistralai_azure/scripts/prepare-readme.py @@ -3,7 +3,7 @@ import shutil try: - shutil.copyfile('README.md', 'README-PYPI.md') + shutil.copyfile("README.md", "README-PYPI.md") except Exception as e: print("Failed to copy README.md to README-PYPI.md") print(e) diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py index c8e9631..37ff4e9 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py @@ -1,11 +1,21 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" import httpx -from .types import SDKInitHook, BeforeRequestContext, BeforeRequestHook, AfterSuccessContext, AfterSuccessHook, AfterErrorContext, AfterErrorHook, Hooks +from .types import ( + SDKInitHook, + BeforeRequestContext, + BeforeRequestHook, + AfterSuccessContext, + AfterSuccessHook, + AfterErrorContext, + AfterErrorHook, + Hooks, +) from .registration import init_hooks from typing import List, Optional, Tuple from mistralai_azure.httpclient import HttpClient + class SDKHooks(Hooks): def __init__(self) -> None: self.sdk_init_hooks: List[SDKInitHook] = [] @@ -31,7 +41,9 @@ def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: base_url, client = hook.sdk_init(base_url, client) return base_url, client - def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> httpx.Request: + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> httpx.Request: for hook in self.before_request_hooks: out = hook.before_request(hook_ctx, request) if isinstance(out, Exception): @@ -40,7 +52,9 @@ def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) return request - def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> httpx.Response: + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> httpx.Response: for hook in self.after_success_hooks: out = hook.after_success(hook_ctx, response) if isinstance(out, Exception): @@ -48,7 +62,12 @@ def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) response = out return response - def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Tuple[Optional[httpx.Response], Optional[Exception]]: + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Tuple[Optional[httpx.Response], Optional[Exception]]: for hook in self.after_error_hooks: result = hook.after_error(hook_ctx, response, error) if isinstance(result, Exception): diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py index 3076b41..5e34da2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py @@ -1,6 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - from abc import ABC, abstractmethod import httpx from mistralai_azure.httpclient import HttpClient @@ -12,7 +11,12 @@ class HookContext: oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None - def __init__(self, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]]): + def __init__( + self, + operation_id: str, + oauth2_scopes: Optional[List[str]], + security_source: Optional[Union[Any, Callable[[], Any]]], + ): self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -20,18 +24,23 @@ def __init__(self, operation_id: str, oauth2_scopes: Optional[List[str]], securi class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): - super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + super().__init__( + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): - super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) - + super().__init__( + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): - super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + super().__init__( + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + ) class SDKInitHook(ABC): @@ -42,19 +51,28 @@ def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: class BeforeRequestHook(ABC): @abstractmethod - def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> Union[httpx.Request, Exception]: + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: pass class AfterSuccessHook(ABC): @abstractmethod - def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> Union[httpx.Response, Exception]: + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> Union[httpx.Response, Exception]: pass class AfterErrorHook(ABC): @abstractmethod - def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: pass diff --git a/packages/mistralai_azure/src/mistralai_azure/basesdk.py b/packages/mistralai_azure/src/mistralai_azure/basesdk.py index 1f22dbc..772b44c 100644 --- a/packages/mistralai_azure/src/mistralai_azure/basesdk.py +++ b/packages/mistralai_azure/src/mistralai_azure/basesdk.py @@ -3,10 +3,15 @@ from .sdkconfiguration import SDKConfiguration import httpx from mistralai_azure import models, utils -from mistralai_azure._hooks import AfterErrorContext, AfterSuccessContext, BeforeRequestContext +from mistralai_azure._hooks import ( + AfterErrorContext, + AfterSuccessContext, + BeforeRequestContext, +) from mistralai_azure.utils import RetryConfig, SerializedRequestBody, get_body_content from typing import Callable, List, Optional, Tuple + class BaseSDK: sdk_configuration: SDKConfiguration @@ -24,6 +29,46 @@ def get_url(self, base_url, url_variables): return utils.template_url(base_url, url_variables) + def build_request_async( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + ) -> httpx.Request: + client = self.sdk_configuration.async_client + return self.build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + ) + def build_request( self, method, @@ -45,7 +90,46 @@ def build_request( url_override: Optional[str] = None, ) -> httpx.Request: client = self.sdk_configuration.client + return self.build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + ) + def build_request_with_client( + self, + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + ) -> httpx.Request: query_params = {} url = url_override @@ -69,7 +153,7 @@ def build_request( if security is not None: if callable(security): security = security() - + if security is not None: security_headers, security_query_params = utils.get_security(security) headers = {**headers, **security_headers} @@ -129,7 +213,7 @@ def do(): req.method, req.url, req.headers, - get_body_content(req) + get_body_content(req), ) http_res = client.send(req, stream=stream) except Exception as e: @@ -149,7 +233,7 @@ def do(): http_res.status_code, http_res.url, http_res.headers, - "" if stream else http_res.text + "" if stream else http_res.text, ) if utils.match_status_codes(error_status_codes, http_res.status_code): @@ -189,6 +273,7 @@ async def do_request_async( ) -> httpx.Response: client = self.sdk_configuration.async_client logger = self.sdk_configuration.debug_logger + async def do(): http_res = None try: @@ -200,7 +285,7 @@ async def do(): req.method, req.url, req.headers, - get_body_content(req) + get_body_content(req), ) http_res = await client.send(req, stream=stream) except Exception as e: @@ -220,7 +305,7 @@ async def do(): http_res.status_code, http_res.url, http_res.headers, - "" if stream else http_res.text + "" if stream else http_res.text, ) if utils.match_status_codes(error_status_codes, http_res.status_code): diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/mistralai_azure/src/mistralai_azure/chat.py index bcb82ce..5f1e539 100644 --- a/packages/mistralai_azure/src/mistralai_azure/chat.py +++ b/packages/mistralai_azure/src/mistralai_azure/chat.py @@ -7,12 +7,13 @@ from mistralai_azure.utils import eventstreaming from typing import Any, AsyncGenerator, Generator, List, Optional, Union + class Chat(BaseSDK): r"""Chat Completion API.""" - - + def stream( - self, *, + self, + *, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], model: OptionalNullable[str] = "azureai", temperature: Optional[float] = 0.7, @@ -22,9 +23,18 @@ def stream( stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[Union[models.ChatCompletionStreamRequestToolChoice, models.ChatCompletionStreamRequestToolChoiceTypedDict]] = None, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -40,12 +50,12 @@ def stream( :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -55,10 +65,10 @@ def stream( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionStreamRequest( model=model, temperature=temperature, @@ -69,12 +79,16 @@ def stream( stop=stop, random_seed=random_seed, messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model(tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), safe_prompt=safe_prompt, ) - + req = self.build_request( method="POST", path="/chat/completions#stream", @@ -87,48 +101,58 @@ def stream( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="stream_chat", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def stream_async( - self, *, + self, + *, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], model: OptionalNullable[str] = "azureai", temperature: Optional[float] = 0.7, @@ -138,9 +162,18 @@ async def stream_async( stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[Union[models.ChatCompletionStreamRequestToolChoice, models.ChatCompletionStreamRequestToolChoiceTypedDict]] = None, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -156,12 +189,12 @@ async def stream_async( :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -171,10 +204,10 @@ async def stream_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionStreamRequest( model=model, temperature=temperature, @@ -185,13 +218,17 @@ async def stream_async( stop=stop, random_seed=random_seed, messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model(tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), safe_prompt=safe_prompt, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/chat/completions#stream", base_url=base_url, @@ -203,60 +240,87 @@ async def stream_async( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="stream_chat", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events_async( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def complete( - self, *, - messages: Union[List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict]], + self, + *, + messages: Union[ + List[models.ChatCompletionRequestMessages], + List[models.ChatCompletionRequestMessagesTypedDict], + ], model: OptionalNullable[str] = "azureai", temperature: Optional[float] = 0.7, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.ChatCompletionRequestStop, models.ChatCompletionRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[Union[models.ChatCompletionRequestToolChoice, models.ChatCompletionRequestToolChoiceTypedDict]] = None, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -273,9 +337,9 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -285,10 +349,10 @@ def complete( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionRequest( model=model, temperature=temperature, @@ -298,13 +362,19 @@ def complete( stream=stream, stop=stop, random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.ChatCompletionRequestMessages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model(tool_choice, Optional[models.ChatCompletionRequestToolChoice]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), safe_prompt=safe_prompt, ) - + req = self.build_request( method="POST", path="/chat/completions", @@ -317,59 +387,84 @@ def complete( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.ChatCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def complete_async( - self, *, - messages: Union[List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict]], + self, + *, + messages: Union[ + List[models.ChatCompletionRequestMessages], + List[models.ChatCompletionRequestMessagesTypedDict], + ], model: OptionalNullable[str] = "azureai", temperature: Optional[float] = 0.7, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.ChatCompletionRequestStop, models.ChatCompletionRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[Union[models.ChatCompletionRequestToolChoice, models.ChatCompletionRequestToolChoiceTypedDict]] = None, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -386,9 +481,9 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -398,10 +493,10 @@ async def complete_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionRequest( model=model, temperature=temperature, @@ -411,14 +506,20 @@ async def complete_async( stream=stream, stop=stop, random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.ChatCompletionRequestMessages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model(tool_choice, Optional[models.ChatCompletionRequestToolChoice]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), safe_prompt=safe_prompt, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/chat/completions", base_url=base_url, @@ -430,41 +531,48 @@ async def complete_async( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.ChatCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - - content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) - + content_type = http_res.headers.get("Content-Type") + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py index b1fe6f9..70f0799 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py @@ -1,24 +1,68 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .assistantmessage import AssistantMessage, AssistantMessageRole, AssistantMessageTypedDict -from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceFinishReason, ChatCompletionChoiceTypedDict -from .chatcompletionrequest import ChatCompletionRequest, ChatCompletionRequestMessages, ChatCompletionRequestMessagesTypedDict, ChatCompletionRequestStop, ChatCompletionRequestStopTypedDict, ChatCompletionRequestToolChoice, ChatCompletionRequestToolChoiceTypedDict, ChatCompletionRequestTypedDict -from .chatcompletionresponse import ChatCompletionResponse, ChatCompletionResponseTypedDict -from .chatcompletionstreamrequest import ChatCompletionStreamRequest, ChatCompletionStreamRequestToolChoice, ChatCompletionStreamRequestToolChoiceTypedDict, ChatCompletionStreamRequestTypedDict, Messages, MessagesTypedDict, Stop, StopTypedDict +from .assistantmessage import ( + AssistantMessage, + AssistantMessageRole, + AssistantMessageTypedDict, +) +from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceFinishReason, + ChatCompletionChoiceTypedDict, +) +from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestMessages, + ChatCompletionRequestMessagesTypedDict, + ChatCompletionRequestStop, + ChatCompletionRequestStopTypedDict, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, +) +from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, +) +from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, + Messages, + MessagesTypedDict, + Stop, + StopTypedDict, +) from .completionchunk import CompletionChunk, CompletionChunkTypedDict from .completionevent import CompletionEvent, CompletionEventTypedDict -from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict, FinishReason +from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, + FinishReason, +) from .contentchunk import ContentChunk, ContentChunkTypedDict from .deltamessage import DeltaMessage, DeltaMessageTypedDict from .function import Function, FunctionTypedDict -from .functioncall import Arguments, ArgumentsTypedDict, FunctionCall, FunctionCallTypedDict +from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, +) from .functionname import FunctionName, FunctionNameTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats from .sdkerror import SDKError from .security import Security, SecurityTypedDict -from .systemmessage import Content, ContentTypedDict, Role, SystemMessage, SystemMessageTypedDict +from .systemmessage import ( + Content, + ContentTypedDict, + Role, + SystemMessage, + SystemMessageTypedDict, +) from .textchunk import TextChunk, TextChunkTypedDict, Type from .tool import Tool, ToolTypedDict from .toolcall import ToolCall, ToolCallTypedDict @@ -27,7 +71,100 @@ from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict from .tooltypes import ToolTypes from .usageinfo import UsageInfo, UsageInfoTypedDict -from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict -from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict +from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageRole, + UserMessageTypedDict, +) +from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, +) -__all__ = ["Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestToolChoiceTypedDict", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestToolChoiceTypedDict", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionName", "FunctionNameTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolChoiceEnum", "ToolChoiceTypedDict", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "ToolTypes", "Type", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"] +__all__ = [ + "Arguments", + "ArgumentsTypedDict", + "AssistantMessage", + "AssistantMessageRole", + "AssistantMessageTypedDict", + "ChatCompletionChoice", + "ChatCompletionChoiceFinishReason", + "ChatCompletionChoiceTypedDict", + "ChatCompletionRequest", + "ChatCompletionRequestMessages", + "ChatCompletionRequestMessagesTypedDict", + "ChatCompletionRequestStop", + "ChatCompletionRequestStopTypedDict", + "ChatCompletionRequestToolChoice", + "ChatCompletionRequestToolChoiceTypedDict", + "ChatCompletionRequestTypedDict", + "ChatCompletionResponse", + "ChatCompletionResponseTypedDict", + "ChatCompletionStreamRequest", + "ChatCompletionStreamRequestToolChoice", + "ChatCompletionStreamRequestToolChoiceTypedDict", + "ChatCompletionStreamRequestTypedDict", + "CompletionChunk", + "CompletionChunkTypedDict", + "CompletionEvent", + "CompletionEventTypedDict", + "CompletionResponseStreamChoice", + "CompletionResponseStreamChoiceTypedDict", + "Content", + "ContentChunk", + "ContentChunkTypedDict", + "ContentTypedDict", + "DeltaMessage", + "DeltaMessageTypedDict", + "FinishReason", + "Function", + "FunctionCall", + "FunctionCallTypedDict", + "FunctionName", + "FunctionNameTypedDict", + "FunctionTypedDict", + "HTTPValidationError", + "HTTPValidationErrorData", + "Loc", + "LocTypedDict", + "Messages", + "MessagesTypedDict", + "ResponseFormat", + "ResponseFormatTypedDict", + "ResponseFormats", + "Role", + "SDKError", + "Security", + "SecurityTypedDict", + "Stop", + "StopTypedDict", + "SystemMessage", + "SystemMessageTypedDict", + "TextChunk", + "TextChunkTypedDict", + "Tool", + "ToolCall", + "ToolCallTypedDict", + "ToolChoice", + "ToolChoiceEnum", + "ToolChoiceTypedDict", + "ToolMessage", + "ToolMessageRole", + "ToolMessageTypedDict", + "ToolTypedDict", + "ToolTypes", + "Type", + "UsageInfo", + "UsageInfoTypedDict", + "UserMessage", + "UserMessageContent", + "UserMessageContentTypedDict", + "UserMessageRole", + "UserMessageTypedDict", + "ValidationError", + "ValidationErrorTypedDict", +] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py index bca7c14..06c5ae9 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py @@ -2,7 +2,13 @@ from __future__ import annotations from .toolcall import ToolCall, ToolCallTypedDict -from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) import pydantic from pydantic import model_serializer from typing import Final, List, Literal, Optional, TypedDict @@ -11,20 +17,26 @@ AssistantMessageRole = Literal["assistant"] + class AssistantMessageTypedDict(TypedDict): content: NotRequired[Nullable[str]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - + class AssistantMessage(BaseModel): + # fmt: off ROLE: Annotated[Final[Optional[AssistantMessageRole]], pydantic.Field(alias="role")] = "assistant" # type: ignore + # fmt: on + content: OptionalNullable[str] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + prefix: Optional[bool] = False r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "content", "tool_calls", "prefix"] @@ -38,9 +50,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -50,4 +66,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py index 9199545..a71cd08 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py @@ -6,16 +6,20 @@ from typing import Literal, TypedDict -ChatCompletionChoiceFinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"] +ChatCompletionChoiceFinishReason = Literal[ + "stop", "length", "model_length", "error", "tool_calls" +] + class ChatCompletionChoiceTypedDict(TypedDict): index: int message: AssistantMessageTypedDict finish_reason: ChatCompletionChoiceFinishReason - + class ChatCompletionChoice(BaseModel): index: int + message: AssistantMessage + finish_reason: ChatCompletionChoiceFinishReason - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py index 08a8254..f2ba234 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py @@ -9,7 +9,13 @@ from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict -from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai_azure.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer from typing import List, Optional, TypedDict, Union @@ -24,10 +30,23 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -ChatCompletionRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] +ChatCompletionRequestMessagesTypedDict = Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, +] -ChatCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] +ChatCompletionRequestMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] ChatCompletionRequestToolChoiceTypedDict = Union[ToolChoiceTypedDict, ToolChoiceEnum] @@ -60,36 +79,61 @@ class ChatCompletionRequestTypedDict(TypedDict): tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" - + class ChatCompletionRequest(BaseModel): messages: List[ChatCompletionRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + model: OptionalNullable[str] = "azureai" r"""The ID of the model to use for this request.""" + temperature: Optional[float] = 0.7 r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[ChatCompletionRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[ChatCompletionRequestToolChoice] = None + safe_prompt: Optional[bool] = False r"""Whether to inject a safety prompt before all conversations.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["model", "temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", "safe_prompt"] + optional_fields = [ + "model", + "temperature", + "top_p", + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "response_format", + "tools", + "tool_choice", + "safe_prompt", + ] nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] @@ -100,9 +144,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -112,4 +160,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py index 8859121..0a02e46 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py @@ -15,13 +15,17 @@ class ChatCompletionResponseTypedDict(TypedDict): usage: UsageInfoTypedDict created: NotRequired[int] choices: NotRequired[List[ChatCompletionChoiceTypedDict]] - + class ChatCompletionResponse(BaseModel): id: str + object: str + model: str + usage: UsageInfo + created: Optional[int] = None + choices: Optional[List[ChatCompletionChoice]] = None - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py index f5e4c55..28abddb 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py @@ -9,7 +9,13 @@ from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict -from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai_azure.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer from typing import List, Optional, TypedDict, Union @@ -24,13 +30,28 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] +MessagesTypedDict = Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, +] -Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] +Messages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] -ChatCompletionStreamRequestToolChoiceTypedDict = Union[ToolChoiceTypedDict, ToolChoiceEnum] +ChatCompletionStreamRequestToolChoiceTypedDict = Union[ + ToolChoiceTypedDict, ToolChoiceEnum +] ChatCompletionStreamRequestToolChoice = Union[ToolChoice, ToolChoiceEnum] @@ -59,35 +80,60 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" - + class ChatCompletionStreamRequest(BaseModel): messages: List[Messages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + model: OptionalNullable[str] = "azureai" r"""The ID of the model to use for this request.""" + temperature: Optional[float] = 0.7 r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[Stop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + safe_prompt: Optional[bool] = False r"""Whether to inject a safety prompt before all conversations.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["model", "temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", "safe_prompt"] + optional_fields = [ + "model", + "temperature", + "top_p", + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "response_format", + "tools", + "tool_choice", + "safe_prompt", + ] nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] @@ -98,9 +144,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -110,4 +160,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py index f51aca3..d2f334d 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py @@ -1,7 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict +from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, +) from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_azure.types import BaseModel from typing import List, Optional, TypedDict @@ -15,13 +18,17 @@ class CompletionChunkTypedDict(TypedDict): object: NotRequired[str] created: NotRequired[int] usage: NotRequired[UsageInfoTypedDict] - + class CompletionChunk(BaseModel): id: str + model: str + choices: List[CompletionResponseStreamChoice] + object: Optional[str] = None + created: Optional[int] = None + usage: Optional[UsageInfo] = None - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py b/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py index 2f8f4b9..b9b68db 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py @@ -8,8 +8,7 @@ class CompletionEventTypedDict(TypedDict): data: CompletionChunkTypedDict - + class CompletionEvent(BaseModel): data: CompletionChunk - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py index 76f7fce..c220a51 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py @@ -9,17 +9,20 @@ FinishReason = Literal["stop", "length", "error", "tool_calls"] + class CompletionResponseStreamChoiceTypedDict(TypedDict): index: int delta: DeltaMessageTypedDict finish_reason: Nullable[FinishReason] - + class CompletionResponseStreamChoice(BaseModel): index: int + delta: DeltaMessage + finish_reason: Nullable[FinishReason] - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [] @@ -33,9 +36,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -45,4 +52,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py index 2fb781e..49aeba4 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py @@ -8,4 +8,3 @@ ContentChunk = TextChunk - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py index 4f9f395..5e8011d 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py @@ -2,7 +2,13 @@ from __future__ import annotations from .toolcall import ToolCall, ToolCallTypedDict -from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from pydantic import model_serializer from typing import List, Optional, TypedDict from typing_extensions import NotRequired @@ -12,13 +18,15 @@ class DeltaMessageTypedDict(TypedDict): role: NotRequired[str] content: NotRequired[Nullable[str]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] - + class DeltaMessage(BaseModel): role: Optional[str] = None + content: OptionalNullable[str] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "content", "tool_calls"] @@ -32,9 +40,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -44,4 +56,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/function.py b/packages/mistralai_azure/src/mistralai_azure/models/function.py index 6ffcacf..081ce1d 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/function.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/function.py @@ -10,10 +10,11 @@ class FunctionTypedDict(TypedDict): name: str parameters: Dict[str, Any] description: NotRequired[str] - + class Function(BaseModel): name: str + parameters: Dict[str, Any] + description: Optional[str] = "" - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py b/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py index 3259ad9..0afa590 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py @@ -14,9 +14,9 @@ class FunctionCallTypedDict(TypedDict): name: str arguments: ArgumentsTypedDict - + class FunctionCall(BaseModel): name: str + arguments: Arguments - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/functionname.py b/packages/mistralai_azure/src/mistralai_azure/models/functionname.py index b3af46e..c825a5a 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/functionname.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/functionname.py @@ -7,12 +7,11 @@ class FunctionNameTypedDict(TypedDict): r"""this restriction of `Function` is used to select a specific function to call""" - + name: str - + class FunctionName(BaseModel): r"""this restriction of `Function` is used to select a specific function to call""" - + name: str - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py index de07a3d..28f9b4e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py @@ -6,13 +6,14 @@ from mistralai_azure.types import BaseModel from typing import List, Optional + class HTTPValidationErrorData(BaseModel): detail: Optional[List[ValidationError]] = None - class HTTPValidationError(Exception): r"""Validation Error""" + data: HTTPValidationErrorData def __init__(self, data: HTTPValidationErrorData): @@ -20,4 +21,3 @@ def __init__(self, data: HTTPValidationErrorData): def __str__(self) -> str: return utils.marshal_json(self.data, HTTPValidationErrorData) - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py index d159947..c692033 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py @@ -10,9 +10,8 @@ class ResponseFormatTypedDict(TypedDict): type: NotRequired[ResponseFormats] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - + class ResponseFormat(BaseModel): type: Optional[ResponseFormats] = None r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/security.py b/packages/mistralai_azure/src/mistralai_azure/models/security.py index 94d9e64..1245881 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/security.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/security.py @@ -9,8 +9,17 @@ class SecurityTypedDict(TypedDict): api_key: str - + class Security(BaseModel): - api_key: Annotated[str, FieldMetadata(security=SecurityMetadata(scheme=True, scheme_type="http", sub_type="bearer", field_name="Authorization"))] - + api_key: Annotated[ + str, + FieldMetadata( + security=SecurityMetadata( + scheme=True, + scheme_type="http", + sub_type="bearer", + field_name="Authorization", + ) + ), + ] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py index 228b6e1..cfef227 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py @@ -18,9 +18,11 @@ class SystemMessageTypedDict(TypedDict): content: ContentTypedDict - + class SystemMessage(BaseModel): content: Content + + # fmt: off ROLE: Annotated[Final[Optional[Role]], pydantic.Field(alias="role")] = "system" # type: ignore - + # fmt: on diff --git a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py index 501e0b1..75cc949 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py @@ -9,11 +9,14 @@ Type = Literal["text"] + class TextChunkTypedDict(TypedDict): text: str - + class TextChunk(BaseModel): text: str + + # fmt: off TYPE: Annotated[Final[Optional[Type]], pydantic.Field(alias="type")] = "text" # type: ignore - + # fmt: on diff --git a/packages/mistralai_azure/src/mistralai_azure/models/tool.py b/packages/mistralai_azure/src/mistralai_azure/models/tool.py index 7afb91d..3a02ed7 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/tool.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/tool.py @@ -13,9 +13,11 @@ class ToolTypedDict(TypedDict): function: FunctionTypedDict type: NotRequired[ToolTypes] - + class Tool(BaseModel): function: Function - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = None - + + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( + None + ) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py index 7e50f88..2a768a2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py @@ -14,10 +14,13 @@ class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] - + class ToolCall(BaseModel): function: FunctionCall + id: Optional[str] = "null" - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = None - + + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( + None + ) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py index c956c95..2d3d87f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py @@ -12,16 +12,18 @@ class ToolChoiceTypedDict(TypedDict): r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" - + function: FunctionNameTypedDict r"""this restriction of `Function` is used to select a specific function to call""" type: NotRequired[ToolTypes] - + class ToolChoice(BaseModel): r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" - + function: FunctionName r"""this restriction of `Function` is used to select a specific function to call""" - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = None - + + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( + None + ) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py index 0a83235..cc524c1 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py @@ -1,7 +1,13 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) import pydantic from pydantic import model_serializer from typing import Final, Literal, Optional, TypedDict @@ -10,18 +16,24 @@ ToolMessageRole = Literal["tool"] + class ToolMessageTypedDict(TypedDict): content: str tool_call_id: NotRequired[Nullable[str]] name: NotRequired[Nullable[str]] - + class ToolMessage(BaseModel): content: str + + # fmt: off ROLE: Annotated[Final[Optional[ToolMessageRole]], pydantic.Field(alias="role")] = "tool" # type: ignore + # fmt: on + tool_call_id: OptionalNullable[str] = UNSET + name: OptionalNullable[str] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "tool_call_id", "name"] @@ -35,9 +47,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -47,4 +63,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py b/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py index f30c1eb..2a92648 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py @@ -9,10 +9,11 @@ class UsageInfoTypedDict(TypedDict): prompt_tokens: int completion_tokens: int total_tokens: int - + class UsageInfo(BaseModel): prompt_tokens: int + completion_tokens: int + total_tokens: int - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py index defabb2..9065230 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py @@ -18,9 +18,11 @@ class UserMessageTypedDict(TypedDict): content: UserMessageContentTypedDict - + class UserMessage(BaseModel): content: UserMessageContent + + # fmt: off ROLE: Annotated[Final[Optional[UserMessageRole]], pydantic.Field(alias="role")] = "user" # type: ignore - + # fmt: on diff --git a/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py index 9b7b9a9..6ab66a1 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py @@ -15,10 +15,11 @@ class ValidationErrorTypedDict(TypedDict): loc: List[LocTypedDict] msg: str type: str - + class ValidationError(BaseModel): loc: List[Loc] + msg: str + type: str - diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py index 0a9bbf8..44d379e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py @@ -1,6 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - from ._hooks import SDKHooks from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix @@ -14,7 +13,7 @@ SERVER_PROD = "prod" r"""Production server""" SERVERS = { - SERVER_PROD: "https://api.mistral.ai", + SERVER_PROD: "https://api.mistral.ai", } """Contains the list of servers available to the SDK""" @@ -24,14 +23,14 @@ class SDKConfiguration: client: HttpClient async_client: AsyncHttpClient debug_logger: Logger - security: Optional[Union[models.Security,Callable[[], models.Security]]] = None + security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.1.1" - gen_version: str = "2.404.11" - user_agent: str = "speakeasy-sdk/python 1.1.1 2.404.11 0.0.2 mistralai_azure" + sdk_version: str = "1.2.1" + gen_version: str = "2.415.6" + user_agent: str = "speakeasy-sdk/python 1.2.1 2.415.6 0.0.2 mistralai_azure" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None @@ -45,10 +44,9 @@ def get_server_details(self) -> Tuple[str, Dict[str, str]]: self.server = SERVER_PROD if self.server not in SERVERS: - raise ValueError(f"Invalid server \"{self.server}\"") + raise ValueError(f'Invalid server "{self.server}"') return SERVERS[self.server], {} - def get_hooks(self) -> SDKHooks: return self._hooks diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py index 507a600..6c26aeb 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py @@ -33,7 +33,12 @@ validate_open_enum, ) from .url import generate_url, template_url, remove_suffix -from .values import get_global_from_env, match_content_type, match_status_codes, match_response +from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, +) from .logger import Logger, get_body_content, get_default_logger __all__ = [ diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/logger.py b/packages/mistralai_azure/src/mistralai_azure/utils/logger.py index 15d12ac..b661aff 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/logger.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/logger.py @@ -3,17 +3,20 @@ import httpx from typing import Any, Protocol + class Logger(Protocol): def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: pass + class NoOpLogger: def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: pass + def get_body_content(req: httpx.Request) -> str: return "" if not hasattr(req, "_content") else str(req.content) + def get_default_logger() -> Logger: return NoOpLogger() - diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/retries.py b/packages/mistralai_azure/src/mistralai_azure/utils/retries.py index a06f927..4d60867 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/retries.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/retries.py @@ -1,5 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +import asyncio import random import time from typing import List @@ -212,5 +213,5 @@ async def retry_with_backoff_async( raise sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) sleep = min(sleep, max_interval / 1000) - time.sleep(sleep) + await asyncio.sleep(sleep) retries += 1 diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/security.py b/packages/mistralai_azure/src/mistralai_azure/utils/security.py index aab4cb6..295a3f4 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/security.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/security.py @@ -16,7 +16,6 @@ ) - def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: headers: Dict[str, str] = {} query_params: Dict[str, List[str]] = {} @@ -42,8 +41,10 @@ def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: _parse_security_option(headers, query_params, value) return headers, query_params if metadata.scheme: - # Special case for basic auth which could be a flattened model - if metadata.sub_type == "basic" and not isinstance(value, BaseModel): + # Special case for basic auth or custom auth which could be a flattened model + if metadata.sub_type in ["basic", "custom"] and not isinstance( + value, BaseModel + ): _parse_security_scheme(headers, query_params, metadata, name, security) else: _parse_security_scheme(headers, query_params, metadata, name, value) @@ -80,9 +81,12 @@ def _parse_security_scheme( sub_type = scheme_metadata.sub_type if isinstance(scheme, BaseModel): - if scheme_type == "http" and sub_type == "basic": - _parse_basic_auth_scheme(headers, scheme) - return + if scheme_type == "http": + if sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + if sub_type == "custom": + return scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields for name in scheme_fields: @@ -131,6 +135,8 @@ def _parse_security_scheme_value( elif scheme_type == "http": if sub_type == "bearer": headers[header_name] = _apply_bearer(value) + elif sub_type == "custom": + return else: raise ValueError("sub type {sub_type} not supported") else: diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index 9dcb52c..ac69c9d 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -3,17 +3,18 @@ id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: docChecksum: 4a618945cf92a8df093d014f459ac2e3 docVersion: 0.0.2 - speakeasyVersion: 1.382.0 - generationVersion: 2.404.11 - releaseVersion: 1.1.1 - configChecksum: 3eeb8fe3e93b38f0d1fbdc2d5ca848d7 + speakeasyVersion: 1.396.6 + generationVersion: 2.415.6 + releaseVersion: 1.2.1 + configChecksum: f6bc2b1d34a982b070eb0e242c37b2af published: true features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.2 - core: 5.4.1 + core: 5.5.3 defaultEnabledRetries: 0.2.0 + enumUnions: 0.1.0 envVarSecurityUsage: 0.3.1 examples: 3.0.0 flatRequests: 1.0.1 @@ -25,17 +26,66 @@ features: nullables: 1.0.0 openEnums: 1.0.0 responseFormat: 1.0.0 - retries: 3.0.0 + retries: 3.0.2 sdkHooks: 1.0.0 serverEvents: 1.0.2 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 unions: 3.0.2 generatedFiles: - - src/mistralai_gcp/sdkconfiguration.py - - src/mistralai_gcp/chat.py - - src/mistralai_gcp/fim.py + - .gitattributes - .vscode/settings.json + - docs/models/arguments.md + - docs/models/assistantmessage.md + - docs/models/assistantmessagerole.md + - docs/models/chatcompletionchoice.md + - docs/models/chatcompletionchoicefinishreason.md + - docs/models/chatcompletionrequest.md + - docs/models/chatcompletionrequestmessages.md + - docs/models/chatcompletionrequeststop.md + - docs/models/chatcompletionrequesttoolchoice.md + - docs/models/chatcompletionresponse.md + - docs/models/chatcompletionstreamrequest.md + - docs/models/chatcompletionstreamrequesttoolchoice.md + - docs/models/completionchunk.md + - docs/models/completionevent.md + - docs/models/completionresponsestreamchoice.md + - docs/models/content.md + - docs/models/contentchunk.md + - docs/models/deltamessage.md + - docs/models/fimcompletionrequest.md + - docs/models/fimcompletionrequeststop.md + - docs/models/fimcompletionresponse.md + - docs/models/fimcompletionstreamrequest.md + - docs/models/fimcompletionstreamrequeststop.md + - docs/models/finishreason.md + - docs/models/function.md + - docs/models/functioncall.md + - docs/models/functionname.md + - docs/models/httpvalidationerror.md + - docs/models/loc.md + - docs/models/messages.md + - docs/models/responseformat.md + - docs/models/responseformats.md + - docs/models/role.md + - docs/models/security.md + - docs/models/stop.md + - docs/models/systemmessage.md + - docs/models/textchunk.md + - docs/models/tool.md + - docs/models/toolcall.md + - docs/models/toolchoice.md + - docs/models/toolchoiceenum.md + - docs/models/toolmessage.md + - docs/models/toolmessagerole.md + - docs/models/tooltypes.md + - docs/models/type.md + - docs/models/usageinfo.md + - docs/models/usermessage.md + - docs/models/usermessagecontent.md + - docs/models/usermessagerole.md + - docs/models/utils/retryconfig.md + - docs/models/validationerror.md - poetry.toml - py.typed - pylintrc @@ -44,9 +94,48 @@ generatedFiles: - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai_gcp/__init__.py + - src/mistralai_gcp/_hooks/__init__.py + - src/mistralai_gcp/_hooks/sdkhooks.py + - src/mistralai_gcp/_hooks/types.py - src/mistralai_gcp/basesdk.py + - src/mistralai_gcp/chat.py + - src/mistralai_gcp/fim.py - src/mistralai_gcp/httpclient.py + - src/mistralai_gcp/models/__init__.py + - src/mistralai_gcp/models/assistantmessage.py + - src/mistralai_gcp/models/chatcompletionchoice.py + - src/mistralai_gcp/models/chatcompletionrequest.py + - src/mistralai_gcp/models/chatcompletionresponse.py + - src/mistralai_gcp/models/chatcompletionstreamrequest.py + - src/mistralai_gcp/models/completionchunk.py + - src/mistralai_gcp/models/completionevent.py + - src/mistralai_gcp/models/completionresponsestreamchoice.py + - src/mistralai_gcp/models/contentchunk.py + - src/mistralai_gcp/models/deltamessage.py + - src/mistralai_gcp/models/fimcompletionrequest.py + - src/mistralai_gcp/models/fimcompletionresponse.py + - src/mistralai_gcp/models/fimcompletionstreamrequest.py + - src/mistralai_gcp/models/function.py + - src/mistralai_gcp/models/functioncall.py + - src/mistralai_gcp/models/functionname.py + - src/mistralai_gcp/models/httpvalidationerror.py + - src/mistralai_gcp/models/responseformat.py + - src/mistralai_gcp/models/responseformats.py + - src/mistralai_gcp/models/sdkerror.py + - src/mistralai_gcp/models/security.py + - src/mistralai_gcp/models/systemmessage.py + - src/mistralai_gcp/models/textchunk.py + - src/mistralai_gcp/models/tool.py + - src/mistralai_gcp/models/toolcall.py + - src/mistralai_gcp/models/toolchoice.py + - src/mistralai_gcp/models/toolchoiceenum.py + - src/mistralai_gcp/models/toolmessage.py + - src/mistralai_gcp/models/tooltypes.py + - src/mistralai_gcp/models/usageinfo.py + - src/mistralai_gcp/models/usermessage.py + - src/mistralai_gcp/models/validationerror.py - src/mistralai_gcp/py.typed + - src/mistralai_gcp/sdkconfiguration.py - src/mistralai_gcp/types/__init__.py - src/mistralai_gcp/types/basemodel.py - src/mistralai_gcp/utils/__init__.py @@ -64,91 +153,28 @@ generatedFiles: - src/mistralai_gcp/utils/serializers.py - src/mistralai_gcp/utils/url.py - src/mistralai_gcp/utils/values.py - - src/mistralai_gcp/models/sdkerror.py - - src/mistralai_gcp/models/completionevent.py - - src/mistralai_gcp/models/completionchunk.py - - src/mistralai_gcp/models/completionresponsestreamchoice.py - - src/mistralai_gcp/models/deltamessage.py - - src/mistralai_gcp/models/toolcall.py - - src/mistralai_gcp/models/functioncall.py - - src/mistralai_gcp/models/tooltypes.py - - src/mistralai_gcp/models/usageinfo.py - - src/mistralai_gcp/models/httpvalidationerror.py - - src/mistralai_gcp/models/validationerror.py - - src/mistralai_gcp/models/chatcompletionstreamrequest.py - - src/mistralai_gcp/models/toolchoice.py - - src/mistralai_gcp/models/functionname.py - - src/mistralai_gcp/models/toolchoiceenum.py - - src/mistralai_gcp/models/tool.py - - src/mistralai_gcp/models/function.py - - src/mistralai_gcp/models/responseformat.py - - src/mistralai_gcp/models/responseformats.py - - src/mistralai_gcp/models/systemmessage.py - - src/mistralai_gcp/models/textchunk.py - - src/mistralai_gcp/models/usermessage.py - - src/mistralai_gcp/models/contentchunk.py - - src/mistralai_gcp/models/assistantmessage.py - - src/mistralai_gcp/models/toolmessage.py - - src/mistralai_gcp/models/chatcompletionresponse.py - - src/mistralai_gcp/models/chatcompletionchoice.py - - src/mistralai_gcp/models/chatcompletionrequest.py - - src/mistralai_gcp/models/fimcompletionstreamrequest.py - - src/mistralai_gcp/models/fimcompletionresponse.py - - src/mistralai_gcp/models/fimcompletionrequest.py - - src/mistralai_gcp/models/security.py - - src/mistralai_gcp/models/__init__.py - - docs/models/completionevent.md - - docs/models/completionchunk.md - - docs/models/finishreason.md - - docs/models/completionresponsestreamchoice.md - - docs/models/deltamessage.md - - docs/models/toolcall.md - - docs/models/arguments.md - - docs/models/functioncall.md - - docs/models/tooltypes.md - - docs/models/usageinfo.md - - docs/models/httpvalidationerror.md - - docs/models/loc.md - - docs/models/validationerror.md - - docs/models/stop.md - - docs/models/messages.md - - docs/models/chatcompletionstreamrequesttoolchoice.md - - docs/models/chatcompletionstreamrequest.md - - docs/models/toolchoice.md - - docs/models/functionname.md - - docs/models/toolchoiceenum.md - - docs/models/tool.md - - docs/models/function.md - - docs/models/responseformat.md - - docs/models/responseformats.md - - docs/models/role.md - - docs/models/content.md - - docs/models/systemmessage.md - - docs/models/type.md - - docs/models/textchunk.md - - docs/models/usermessagerole.md - - docs/models/usermessagecontent.md - - docs/models/usermessage.md - - docs/models/contentchunk.md - - docs/models/assistantmessagerole.md - - docs/models/assistantmessage.md - - docs/models/toolmessagerole.md - - docs/models/toolmessage.md - - docs/models/chatcompletionresponse.md - - docs/models/chatcompletionchoicefinishreason.md - - docs/models/chatcompletionchoice.md - - docs/models/chatcompletionrequeststop.md - - docs/models/chatcompletionrequestmessages.md - - docs/models/chatcompletionrequesttoolchoice.md - - docs/models/chatcompletionrequest.md - - docs/models/fimcompletionstreamrequeststop.md - - docs/models/fimcompletionstreamrequest.md - - docs/models/fimcompletionresponse.md - - docs/models/fimcompletionrequeststop.md - - docs/models/fimcompletionrequest.md - - docs/models/security.md - - docs/models/utils/retryconfig.md - - .gitattributes - - src/mistralai_gcp/_hooks/sdkhooks.py - - src/mistralai_gcp/_hooks/types.py - - src/mistralai_gcp/_hooks/__init__.py +examples: + stream_chat: + speakeasy-default-stream-chat: + requestBody: + application/json: {"model": "mistral-small-latest", "messages": [{"content": []}]} + chat_completion_v1_chat_completions_post: + speakeasy-default-chat-completion-v1-chat-completions-post: + requestBody: + application/json: {"model": "mistral-small-latest", "messages": [{"content": ""}]} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + "422": {} + stream_fim: + speakeasy-default-stream-fim: + requestBody: + application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} + fim_completion_v1_fim_completions_post: + speakeasy-default-fim-completion-v1-fim-completions-post: + requestBody: + application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + "422": {} diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index 8cc5fa9..fdca4c7 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 1.1.1 + version: 1.2.1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/mistralai_gcp/poetry.lock b/packages/mistralai_gcp/poetry.lock index 67c9cec..5483cc8 100644 --- a/packages/mistralai_gcp/poetry.lock +++ b/packages/mistralai_gcp/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "annotated-types" @@ -420,17 +420,6 @@ files = [ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] -[[package]] -name = "nodeenv" -version = "1.9.1" -description = "Node.js virtual environment builder" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -files = [ - {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, - {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, -] - [[package]] name = "packaging" version = "24.1" @@ -500,18 +489,18 @@ pyasn1 = ">=0.4.6,<0.7.0" [[package]] name = "pydantic" -version = "2.8.2" +version = "2.9.1" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, - {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, + {file = "pydantic-2.9.1-py3-none-any.whl", hash = "sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612"}, + {file = "pydantic-2.9.1.tar.gz", hash = "sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2"}, ] [package.dependencies] -annotated-types = ">=0.4.0" -pydantic-core = "2.20.1" +annotated-types = ">=0.6.0" +pydantic-core = "2.23.3" typing-extensions = [ {version = ">=4.6.1", markers = "python_version < \"3.13\""}, {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, @@ -519,103 +508,104 @@ typing-extensions = [ [package.extras] email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.20.1" +version = "2.23.3" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, - {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, - {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, - {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, - {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, - {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, - {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, - {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, - {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, - {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, - {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, - {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, - {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, - {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, + {file = "pydantic_core-2.23.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6"}, + {file = "pydantic_core-2.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba"}, + {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee"}, + {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe"}, + {file = "pydantic_core-2.23.3-cp310-none-win32.whl", hash = "sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b"}, + {file = "pydantic_core-2.23.3-cp310-none-win_amd64.whl", hash = "sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83"}, + {file = "pydantic_core-2.23.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27"}, + {file = "pydantic_core-2.23.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8"}, + {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48"}, + {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5"}, + {file = "pydantic_core-2.23.3-cp311-none-win32.whl", hash = "sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1"}, + {file = "pydantic_core-2.23.3-cp311-none-win_amd64.whl", hash = "sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa"}, + {file = "pydantic_core-2.23.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305"}, + {file = "pydantic_core-2.23.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c"}, + {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c"}, + {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab"}, + {file = "pydantic_core-2.23.3-cp312-none-win32.whl", hash = "sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c"}, + {file = "pydantic_core-2.23.3-cp312-none-win_amd64.whl", hash = "sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b"}, + {file = "pydantic_core-2.23.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f"}, + {file = "pydantic_core-2.23.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855"}, + {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4"}, + {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d"}, + {file = "pydantic_core-2.23.3-cp313-none-win32.whl", hash = "sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8"}, + {file = "pydantic_core-2.23.3-cp313-none-win_amd64.whl", hash = "sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1"}, + {file = "pydantic_core-2.23.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c"}, + {file = "pydantic_core-2.23.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295"}, + {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba"}, + {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e"}, + {file = "pydantic_core-2.23.3-cp38-none-win32.whl", hash = "sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710"}, + {file = "pydantic_core-2.23.3-cp38-none-win_amd64.whl", hash = "sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea"}, + {file = "pydantic_core-2.23.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8"}, + {file = "pydantic_core-2.23.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd"}, + {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835"}, + {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70"}, + {file = "pydantic_core-2.23.3-cp39-none-win32.whl", hash = "sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7"}, + {file = "pydantic_core-2.23.3-cp39-none-win_amd64.whl", hash = "sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab"}, + {file = "pydantic_core-2.23.3.tar.gz", hash = "sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690"}, ] [package.dependencies] @@ -651,24 +641,6 @@ typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\"" spelling = ["pyenchant (>=3.2,<4.0)"] testutils = ["gitpython (>3)"] -[[package]] -name = "pyright" -version = "1.1.374" -description = "Command line wrapper for pyright" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pyright-1.1.374-py3-none-any.whl", hash = "sha256:55752bcf7a3646d293cd76710a983b71e16f6128aab2d42468e6eb7e46c0a70d"}, - {file = "pyright-1.1.374.tar.gz", hash = "sha256:d01b2daf864ba5e0362e56b844984865970d7204158e61eb685e2dab7804cb82"}, -] - -[package.dependencies] -nodeenv = ">=1.6.0" - -[package.extras] -all = ["twine (>=3.4.1)"] -dev = ["twine (>=3.4.1)"] - [[package]] name = "pytest" version = "8.3.2" @@ -859,4 +831,4 @@ zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "6a01b3944f3e2b62891369e56c6e0e00815d65e9a137f0558ee13fd17f674669" +content-hash = "fc4716156ed5774ad5090ce141d42d8081750f92e5d1e3ef3192b5f13ef8e815" diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index c16f2be..0138e6b 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai-gcp" -version = "1.1.1" +version = "1.2.1" description = "Python Client SDK for the Mistral AI API in GCP." authors = ["Mistral",] readme = "README-PYPI.md" @@ -21,7 +21,7 @@ eval-type-backport = "^0.2.0" google-auth = "2.27.0" httpx = "^0.27.0" jsonpath-python = "^1.0.6" -pydantic = "~2.8.2" +pydantic = "~2.9.0" python-dateutil = "2.8.2" requests = "^2.32.3" typing-inspect = "^0.9.0" @@ -29,7 +29,6 @@ typing-inspect = "^0.9.0" [tool.poetry.group.dev.dependencies] mypy = "==1.10.1" pylint = "==3.2.3" -pyright = "==1.1.374" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" types-python-dateutil = "^2.9.0.20240316" diff --git a/packages/mistralai_gcp/scripts/prepare-readme.py b/packages/mistralai_gcp/scripts/prepare-readme.py index a8ef8ea..825d9de 100644 --- a/packages/mistralai_gcp/scripts/prepare-readme.py +++ b/packages/mistralai_gcp/scripts/prepare-readme.py @@ -3,7 +3,7 @@ import shutil try: - shutil.copyfile('README.md', 'README-PYPI.md') + shutil.copyfile("README.md", "README-PYPI.md") except Exception as e: print("Failed to copy README.md to README-PYPI.md") print(e) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py index ca3b7b3..b81c2a2 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py @@ -1,11 +1,21 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" import httpx -from .types import SDKInitHook, BeforeRequestContext, BeforeRequestHook, AfterSuccessContext, AfterSuccessHook, AfterErrorContext, AfterErrorHook, Hooks +from .types import ( + SDKInitHook, + BeforeRequestContext, + BeforeRequestHook, + AfterSuccessContext, + AfterSuccessHook, + AfterErrorContext, + AfterErrorHook, + Hooks, +) from .registration import init_hooks from typing import List, Optional, Tuple from mistralai_gcp.httpclient import HttpClient + class SDKHooks(Hooks): def __init__(self) -> None: self.sdk_init_hooks: List[SDKInitHook] = [] @@ -31,7 +41,9 @@ def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: base_url, client = hook.sdk_init(base_url, client) return base_url, client - def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> httpx.Request: + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> httpx.Request: for hook in self.before_request_hooks: out = hook.before_request(hook_ctx, request) if isinstance(out, Exception): @@ -40,7 +52,9 @@ def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) return request - def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> httpx.Response: + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> httpx.Response: for hook in self.after_success_hooks: out = hook.after_success(hook_ctx, response) if isinstance(out, Exception): @@ -48,7 +62,12 @@ def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) response = out return response - def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Tuple[Optional[httpx.Response], Optional[Exception]]: + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Tuple[Optional[httpx.Response], Optional[Exception]]: for hook in self.after_error_hooks: result = hook.after_error(hook_ctx, response, error) if isinstance(result, Exception): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py index f4ee7f3..417126f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py @@ -1,6 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - from abc import ABC, abstractmethod import httpx from mistralai_gcp.httpclient import HttpClient @@ -12,7 +11,12 @@ class HookContext: oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None - def __init__(self, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]]): + def __init__( + self, + operation_id: str, + oauth2_scopes: Optional[List[str]], + security_source: Optional[Union[Any, Callable[[], Any]]], + ): self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -20,18 +24,23 @@ def __init__(self, operation_id: str, oauth2_scopes: Optional[List[str]], securi class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): - super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + super().__init__( + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): - super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) - + super().__init__( + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): - super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + super().__init__( + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + ) class SDKInitHook(ABC): @@ -42,19 +51,28 @@ def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: class BeforeRequestHook(ABC): @abstractmethod - def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> Union[httpx.Request, Exception]: + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: pass class AfterSuccessHook(ABC): @abstractmethod - def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> Union[httpx.Response, Exception]: + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> Union[httpx.Response, Exception]: pass class AfterErrorHook(ABC): @abstractmethod - def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py index fd4854f..c647eba 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py @@ -3,10 +3,15 @@ from .sdkconfiguration import SDKConfiguration import httpx from mistralai_gcp import models, utils -from mistralai_gcp._hooks import AfterErrorContext, AfterSuccessContext, BeforeRequestContext +from mistralai_gcp._hooks import ( + AfterErrorContext, + AfterSuccessContext, + BeforeRequestContext, +) from mistralai_gcp.utils import RetryConfig, SerializedRequestBody, get_body_content from typing import Callable, List, Optional, Tuple + class BaseSDK: sdk_configuration: SDKConfiguration @@ -24,6 +29,46 @@ def get_url(self, base_url, url_variables): return utils.template_url(base_url, url_variables) + def build_request_async( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + ) -> httpx.Request: + client = self.sdk_configuration.async_client + return self.build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + ) + def build_request( self, method, @@ -45,7 +90,46 @@ def build_request( url_override: Optional[str] = None, ) -> httpx.Request: client = self.sdk_configuration.client + return self.build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + ) + def build_request_with_client( + self, + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + ) -> httpx.Request: query_params = {} url = url_override @@ -69,7 +153,7 @@ def build_request( if security is not None: if callable(security): security = security() - + if security is not None: security_headers, security_query_params = utils.get_security(security) headers = {**headers, **security_headers} @@ -129,7 +213,7 @@ def do(): req.method, req.url, req.headers, - get_body_content(req) + get_body_content(req), ) http_res = client.send(req, stream=stream) except Exception as e: @@ -149,7 +233,7 @@ def do(): http_res.status_code, http_res.url, http_res.headers, - "" if stream else http_res.text + "" if stream else http_res.text, ) if utils.match_status_codes(error_status_codes, http_res.status_code): @@ -189,6 +273,7 @@ async def do_request_async( ) -> httpx.Response: client = self.sdk_configuration.async_client logger = self.sdk_configuration.debug_logger + async def do(): http_res = None try: @@ -200,7 +285,7 @@ async def do(): req.method, req.url, req.headers, - get_body_content(req) + get_body_content(req), ) http_res = await client.send(req, stream=stream) except Exception as e: @@ -220,7 +305,7 @@ async def do(): http_res.status_code, http_res.url, http_res.headers, - "" if stream else http_res.text + "" if stream else http_res.text, ) if utils.match_status_codes(error_status_codes, http_res.status_code): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py index a4be67d..044dd19 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py @@ -7,12 +7,13 @@ from mistralai_gcp.utils import eventstreaming from typing import Any, AsyncGenerator, Generator, List, Optional, Union + class Chat(BaseSDK): r"""Chat Completion API.""" - - + def stream( - self, *, + self, + *, model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: Optional[float] = 0.7, @@ -22,9 +23,18 @@ def stream( stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[Union[models.ChatCompletionStreamRequestToolChoice, models.ChatCompletionStreamRequestToolChoiceTypedDict]] = None, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -39,12 +49,12 @@ def stream( :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -53,10 +63,10 @@ def stream( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionStreamRequest( model=model, temperature=temperature, @@ -67,11 +77,15 @@ def stream( stop=stop, random_seed=random_seed, messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model(tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), ) - + req = self.build_request( method="POST", path="/streamRawPredict", @@ -84,48 +98,58 @@ def stream( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="stream_chat", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def stream_async( - self, *, + self, + *, model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: Optional[float] = 0.7, @@ -135,9 +159,18 @@ async def stream_async( stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[Union[models.ChatCompletionStreamRequestToolChoice, models.ChatCompletionStreamRequestToolChoiceTypedDict]] = None, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -152,12 +185,12 @@ async def stream_async( :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -166,10 +199,10 @@ async def stream_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionStreamRequest( model=model, temperature=temperature, @@ -180,12 +213,16 @@ async def stream_async( stop=stop, random_seed=random_seed, messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model(tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/streamRawPredict", base_url=base_url, @@ -197,60 +234,87 @@ async def stream_async( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="stream_chat", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events_async( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def complete( - self, *, + self, + *, model: Nullable[str], - messages: Union[List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict]], + messages: Union[ + List[models.ChatCompletionRequestMessages], + List[models.ChatCompletionRequestMessagesTypedDict], + ], temperature: Optional[float] = 0.7, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.ChatCompletionRequestStop, models.ChatCompletionRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[Union[models.ChatCompletionRequestToolChoice, models.ChatCompletionRequestToolChoiceTypedDict]] = None, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -266,9 +330,9 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -277,10 +341,10 @@ def complete( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionRequest( model=model, temperature=temperature, @@ -290,12 +354,18 @@ def complete( stream=stream, stop=stop, random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.ChatCompletionRequestMessages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model(tool_choice, Optional[models.ChatCompletionRequestToolChoice]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), ) - + req = self.build_request( method="POST", path="/rawPredict", @@ -308,59 +378,84 @@ def complete( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.ChatCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def complete_async( - self, *, + self, + *, model: Nullable[str], - messages: Union[List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict]], + messages: Union[ + List[models.ChatCompletionRequestMessages], + List[models.ChatCompletionRequestMessagesTypedDict], + ], temperature: Optional[float] = 0.7, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.ChatCompletionRequestStop, models.ChatCompletionRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[Union[models.ChatCompletionRequestToolChoice, models.ChatCompletionRequestToolChoiceTypedDict]] = None, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -376,9 +471,9 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -387,10 +482,10 @@ async def complete_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionRequest( model=model, temperature=temperature, @@ -400,13 +495,19 @@ async def complete_async( stream=stream, stop=stop, random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.ChatCompletionRequestMessages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model(tool_choice, Optional[models.ChatCompletionRequestToolChoice]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/rawPredict", base_url=base_url, @@ -418,41 +519,48 @@ async def complete_async( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.ChatCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - - content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) - + content_type = http_res.headers.get("Content-Type") + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/mistralai_gcp/src/mistralai_gcp/fim.py index 47d8c9a..cda380c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/fim.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/fim.py @@ -7,12 +7,13 @@ from mistralai_gcp.utils import eventstreaming from typing import Any, AsyncGenerator, Generator, Optional, Union + class Fim(BaseSDK): r"""Fill-in-the-middle API.""" - - + def stream( - self, *, + self, + *, model: Nullable[str], prompt: str, temperature: Optional[float] = 0.7, @@ -20,7 +21,12 @@ def stream( max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.FIMCompletionStreamRequestStop, models.FIMCompletionStreamRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.FIMCompletionStreamRequestStop, + models.FIMCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -37,7 +43,7 @@ def stream( :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. @@ -49,10 +55,10 @@ def stream( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FIMCompletionStreamRequest( model=model, temperature=temperature, @@ -65,7 +71,7 @@ def stream( prompt=prompt, suffix=suffix, ) - + req = self.build_request( method="POST", path="/streamRawPredict#fim", @@ -78,48 +84,58 @@ def stream( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="stream_fim", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="stream_fim", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def stream_async( - self, *, + self, + *, model: Nullable[str], prompt: str, temperature: Optional[float] = 0.7, @@ -127,7 +143,12 @@ async def stream_async( max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.FIMCompletionStreamRequestStop, models.FIMCompletionStreamRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.FIMCompletionStreamRequestStop, + models.FIMCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -144,7 +165,7 @@ async def stream_async( :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. @@ -156,10 +177,10 @@ async def stream_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FIMCompletionStreamRequest( model=model, temperature=temperature, @@ -172,8 +193,8 @@ async def stream_async( prompt=prompt, suffix=suffix, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/streamRawPredict#fim", base_url=base_url, @@ -185,48 +206,58 @@ async def stream_async( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="stream_fim", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="stream_fim", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events_async( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def complete( - self, *, + self, + *, model: Nullable[str], prompt: str, temperature: Optional[float] = 0.7, @@ -234,7 +265,12 @@ def complete( max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.FIMCompletionRequestStop, models.FIMCompletionRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.FIMCompletionRequestStop, + models.FIMCompletionRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -263,10 +299,10 @@ def complete( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FIMCompletionRequest( model=model, temperature=temperature, @@ -279,7 +315,7 @@ def complete( prompt=prompt, suffix=suffix, ) - + req = self.build_request( method="POST", path="/rawPredict#fim", @@ -292,47 +328,55 @@ def complete( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="fim_completion_v1_fim_completions_post", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.FIMCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.FIMCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def complete_async( - self, *, + self, + *, model: Nullable[str], prompt: str, temperature: Optional[float] = 0.7, @@ -340,7 +384,12 @@ async def complete_async( max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.FIMCompletionRequestStop, models.FIMCompletionRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.FIMCompletionRequestStop, + models.FIMCompletionRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -369,10 +418,10 @@ async def complete_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FIMCompletionRequest( model=model, temperature=temperature, @@ -385,8 +434,8 @@ async def complete_async( prompt=prompt, suffix=suffix, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/rawPredict#fim", base_url=base_url, @@ -398,41 +447,48 @@ async def complete_async( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="fim_completion_v1_fim_completions_post", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.FIMCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.FIMCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - - content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) - + content_type = http_res.headers.get("Content-Type") + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py index 95df4e9..84acf24 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py @@ -1,27 +1,81 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .assistantmessage import AssistantMessage, AssistantMessageRole, AssistantMessageTypedDict -from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceFinishReason, ChatCompletionChoiceTypedDict -from .chatcompletionrequest import ChatCompletionRequest, ChatCompletionRequestMessages, ChatCompletionRequestMessagesTypedDict, ChatCompletionRequestStop, ChatCompletionRequestStopTypedDict, ChatCompletionRequestToolChoice, ChatCompletionRequestToolChoiceTypedDict, ChatCompletionRequestTypedDict -from .chatcompletionresponse import ChatCompletionResponse, ChatCompletionResponseTypedDict -from .chatcompletionstreamrequest import ChatCompletionStreamRequest, ChatCompletionStreamRequestToolChoice, ChatCompletionStreamRequestToolChoiceTypedDict, ChatCompletionStreamRequestTypedDict, Messages, MessagesTypedDict, Stop, StopTypedDict +from .assistantmessage import ( + AssistantMessage, + AssistantMessageRole, + AssistantMessageTypedDict, +) +from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceFinishReason, + ChatCompletionChoiceTypedDict, +) +from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestMessages, + ChatCompletionRequestMessagesTypedDict, + ChatCompletionRequestStop, + ChatCompletionRequestStopTypedDict, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, +) +from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, +) +from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, + Messages, + MessagesTypedDict, + Stop, + StopTypedDict, +) from .completionchunk import CompletionChunk, CompletionChunkTypedDict from .completionevent import CompletionEvent, CompletionEventTypedDict -from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict, FinishReason +from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, + FinishReason, +) from .contentchunk import ContentChunk, ContentChunkTypedDict from .deltamessage import DeltaMessage, DeltaMessageTypedDict -from .fimcompletionrequest import FIMCompletionRequest, FIMCompletionRequestStop, FIMCompletionRequestStopTypedDict, FIMCompletionRequestTypedDict +from .fimcompletionrequest import ( + FIMCompletionRequest, + FIMCompletionRequestStop, + FIMCompletionRequestStopTypedDict, + FIMCompletionRequestTypedDict, +) from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict -from .fimcompletionstreamrequest import FIMCompletionStreamRequest, FIMCompletionStreamRequestStop, FIMCompletionStreamRequestStopTypedDict, FIMCompletionStreamRequestTypedDict +from .fimcompletionstreamrequest import ( + FIMCompletionStreamRequest, + FIMCompletionStreamRequestStop, + FIMCompletionStreamRequestStopTypedDict, + FIMCompletionStreamRequestTypedDict, +) from .function import Function, FunctionTypedDict -from .functioncall import Arguments, ArgumentsTypedDict, FunctionCall, FunctionCallTypedDict +from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, +) from .functionname import FunctionName, FunctionNameTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats from .sdkerror import SDKError from .security import Security, SecurityTypedDict -from .systemmessage import Content, ContentTypedDict, Role, SystemMessage, SystemMessageTypedDict +from .systemmessage import ( + Content, + ContentTypedDict, + Role, + SystemMessage, + SystemMessageTypedDict, +) from .textchunk import TextChunk, TextChunkTypedDict, Type from .tool import Tool, ToolTypedDict from .toolcall import ToolCall, ToolCallTypedDict @@ -30,7 +84,110 @@ from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict from .tooltypes import ToolTypes from .usageinfo import UsageInfo, UsageInfoTypedDict -from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict -from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict +from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageRole, + UserMessageTypedDict, +) +from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, +) -__all__ = ["Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestToolChoiceTypedDict", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestToolChoiceTypedDict", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionName", "FunctionNameTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolChoiceEnum", "ToolChoiceTypedDict", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "ToolTypes", "Type", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"] +__all__ = [ + "Arguments", + "ArgumentsTypedDict", + "AssistantMessage", + "AssistantMessageRole", + "AssistantMessageTypedDict", + "ChatCompletionChoice", + "ChatCompletionChoiceFinishReason", + "ChatCompletionChoiceTypedDict", + "ChatCompletionRequest", + "ChatCompletionRequestMessages", + "ChatCompletionRequestMessagesTypedDict", + "ChatCompletionRequestStop", + "ChatCompletionRequestStopTypedDict", + "ChatCompletionRequestToolChoice", + "ChatCompletionRequestToolChoiceTypedDict", + "ChatCompletionRequestTypedDict", + "ChatCompletionResponse", + "ChatCompletionResponseTypedDict", + "ChatCompletionStreamRequest", + "ChatCompletionStreamRequestToolChoice", + "ChatCompletionStreamRequestToolChoiceTypedDict", + "ChatCompletionStreamRequestTypedDict", + "CompletionChunk", + "CompletionChunkTypedDict", + "CompletionEvent", + "CompletionEventTypedDict", + "CompletionResponseStreamChoice", + "CompletionResponseStreamChoiceTypedDict", + "Content", + "ContentChunk", + "ContentChunkTypedDict", + "ContentTypedDict", + "DeltaMessage", + "DeltaMessageTypedDict", + "FIMCompletionRequest", + "FIMCompletionRequestStop", + "FIMCompletionRequestStopTypedDict", + "FIMCompletionRequestTypedDict", + "FIMCompletionResponse", + "FIMCompletionResponseTypedDict", + "FIMCompletionStreamRequest", + "FIMCompletionStreamRequestStop", + "FIMCompletionStreamRequestStopTypedDict", + "FIMCompletionStreamRequestTypedDict", + "FinishReason", + "Function", + "FunctionCall", + "FunctionCallTypedDict", + "FunctionName", + "FunctionNameTypedDict", + "FunctionTypedDict", + "HTTPValidationError", + "HTTPValidationErrorData", + "Loc", + "LocTypedDict", + "Messages", + "MessagesTypedDict", + "ResponseFormat", + "ResponseFormatTypedDict", + "ResponseFormats", + "Role", + "SDKError", + "Security", + "SecurityTypedDict", + "Stop", + "StopTypedDict", + "SystemMessage", + "SystemMessageTypedDict", + "TextChunk", + "TextChunkTypedDict", + "Tool", + "ToolCall", + "ToolCallTypedDict", + "ToolChoice", + "ToolChoiceEnum", + "ToolChoiceTypedDict", + "ToolMessage", + "ToolMessageRole", + "ToolMessageTypedDict", + "ToolTypedDict", + "ToolTypes", + "Type", + "UsageInfo", + "UsageInfoTypedDict", + "UserMessage", + "UserMessageContent", + "UserMessageContentTypedDict", + "UserMessageRole", + "UserMessageTypedDict", + "ValidationError", + "ValidationErrorTypedDict", +] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py index 2f37f03..8e6212b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py @@ -2,7 +2,13 @@ from __future__ import annotations from .toolcall import ToolCall, ToolCallTypedDict -from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) import pydantic from pydantic import model_serializer from typing import Final, List, Literal, Optional, TypedDict @@ -11,20 +17,26 @@ AssistantMessageRole = Literal["assistant"] + class AssistantMessageTypedDict(TypedDict): content: NotRequired[Nullable[str]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - + class AssistantMessage(BaseModel): + # fmt: off ROLE: Annotated[Final[Optional[AssistantMessageRole]], pydantic.Field(alias="role")] = "assistant" # type: ignore + # fmt: on + content: OptionalNullable[str] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + prefix: Optional[bool] = False r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "content", "tool_calls", "prefix"] @@ -38,9 +50,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -50,4 +66,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py index 67ff1f5..c585e1e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py @@ -6,16 +6,20 @@ from typing import Literal, TypedDict -ChatCompletionChoiceFinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"] +ChatCompletionChoiceFinishReason = Literal[ + "stop", "length", "model_length", "error", "tool_calls" +] + class ChatCompletionChoiceTypedDict(TypedDict): index: int message: AssistantMessageTypedDict finish_reason: ChatCompletionChoiceFinishReason - + class ChatCompletionChoice(BaseModel): index: int + message: AssistantMessage + finish_reason: ChatCompletionChoiceFinishReason - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py index 90b8546..dbe6f55 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -9,7 +9,13 @@ from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict -from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai_gcp.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer from typing import List, Optional, TypedDict, Union @@ -24,10 +30,23 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -ChatCompletionRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] +ChatCompletionRequestMessagesTypedDict = Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, +] -ChatCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] +ChatCompletionRequestMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] ChatCompletionRequestToolChoiceTypedDict = Union[ToolChoiceTypedDict, ToolChoiceEnum] @@ -58,34 +77,56 @@ class ChatCompletionRequestTypedDict(TypedDict): response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] - + class ChatCompletionRequest(BaseModel): model: Nullable[str] r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: Optional[float] = 0.7 r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[ChatCompletionRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[ChatCompletionRequestToolChoice] = None - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"] + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "response_format", + "tools", + "tool_choice", + ] nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] @@ -96,9 +137,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -108,4 +153,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py index c8ccdfc..5fb1044 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py @@ -15,13 +15,17 @@ class ChatCompletionResponseTypedDict(TypedDict): usage: UsageInfoTypedDict created: NotRequired[int] choices: NotRequired[List[ChatCompletionChoiceTypedDict]] - + class ChatCompletionResponse(BaseModel): id: str + object: str + model: str + usage: UsageInfo + created: Optional[int] = None + choices: Optional[List[ChatCompletionChoice]] = None - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py index afe95c6..5bb7059 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -9,7 +9,13 @@ from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict -from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai_gcp.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer from typing import List, Optional, TypedDict, Union @@ -24,13 +30,28 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] +MessagesTypedDict = Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, +] -Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] +Messages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] -ChatCompletionStreamRequestToolChoiceTypedDict = Union[ToolChoiceTypedDict, ToolChoiceEnum] +ChatCompletionStreamRequestToolChoiceTypedDict = Union[ + ToolChoiceTypedDict, ToolChoiceEnum +] ChatCompletionStreamRequestToolChoice = Union[ToolChoice, ToolChoiceEnum] @@ -57,33 +78,55 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] - + class ChatCompletionStreamRequest(BaseModel): model: Nullable[str] r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[Messages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: Optional[float] = 0.7 r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[Stop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"] + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "response_format", + "tools", + "tool_choice", + ] nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] @@ -94,9 +137,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -106,4 +153,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py index 52266f4..f0561ef 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py @@ -1,7 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict +from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, +) from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_gcp.types import BaseModel from typing import List, Optional, TypedDict @@ -15,13 +18,17 @@ class CompletionChunkTypedDict(TypedDict): object: NotRequired[str] created: NotRequired[int] usage: NotRequired[UsageInfoTypedDict] - + class CompletionChunk(BaseModel): id: str + model: str + choices: List[CompletionResponseStreamChoice] + object: Optional[str] = None + created: Optional[int] = None + usage: Optional[UsageInfo] = None - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py index 5a6e3c2..7086fce 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py @@ -8,8 +8,7 @@ class CompletionEventTypedDict(TypedDict): data: CompletionChunkTypedDict - + class CompletionEvent(BaseModel): data: CompletionChunk - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py index 83a0b02..a09f67f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py @@ -9,17 +9,20 @@ FinishReason = Literal["stop", "length", "error", "tool_calls"] + class CompletionResponseStreamChoiceTypedDict(TypedDict): index: int delta: DeltaMessageTypedDict finish_reason: Nullable[FinishReason] - + class CompletionResponseStreamChoice(BaseModel): index: int + delta: DeltaMessage + finish_reason: Nullable[FinishReason] - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [] @@ -33,9 +36,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -45,4 +52,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py index 2fb781e..49aeba4 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py @@ -8,4 +8,3 @@ ContentChunk = TextChunk - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py index 763b48e..314e52a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py @@ -2,7 +2,13 @@ from __future__ import annotations from .toolcall import ToolCall, ToolCallTypedDict -from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from pydantic import model_serializer from typing import List, Optional, TypedDict from typing_extensions import NotRequired @@ -12,13 +18,15 @@ class DeltaMessageTypedDict(TypedDict): role: NotRequired[str] content: NotRequired[Nullable[str]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] - + class DeltaMessage(BaseModel): role: Optional[str] = None + content: OptionalNullable[str] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "content", "tool_calls"] @@ -32,9 +40,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -44,4 +56,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py index 7e0e1b5..8693e34 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py @@ -1,7 +1,13 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from pydantic import model_serializer from typing import List, Optional, TypedDict, Union from typing_extensions import NotRequired @@ -39,7 +45,7 @@ class FIMCompletionRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - + class FIMCompletionRequest(BaseModel): model: Nullable[str] @@ -47,28 +53,46 @@ class FIMCompletionRequest(BaseModel): - `codestral-2405` - `codestral-latest` """ + prompt: str r"""The text/code to complete.""" + temperature: Optional[float] = 0.7 r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[FIMCompletionRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"] + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "suffix", + ] nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] null_default_fields = [] @@ -79,9 +103,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -91,4 +119,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py index 27fcc4f..ad28515 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py @@ -15,13 +15,17 @@ class FIMCompletionResponseTypedDict(TypedDict): usage: UsageInfoTypedDict created: NotRequired[int] choices: NotRequired[List[ChatCompletionChoiceTypedDict]] - + class FIMCompletionResponse(BaseModel): id: str + object: str + model: str + usage: UsageInfo + created: Optional[int] = None + choices: Optional[List[ChatCompletionChoice]] = None - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py index 3f2dc80..d05918c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py @@ -1,7 +1,13 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from pydantic import model_serializer from typing import List, Optional, TypedDict, Union from typing_extensions import NotRequired @@ -38,7 +44,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - + class FIMCompletionStreamRequest(BaseModel): model: Nullable[str] @@ -46,27 +52,45 @@ class FIMCompletionStreamRequest(BaseModel): - `codestral-2405` - `codestral-latest` """ + prompt: str r"""The text/code to complete.""" + temperature: Optional[float] = 0.7 r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[FIMCompletionStreamRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"] + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "suffix", + ] nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] null_default_fields = [] @@ -77,9 +101,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -89,4 +117,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py index 235eb34..533c3de 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py @@ -10,10 +10,11 @@ class FunctionTypedDict(TypedDict): name: str parameters: Dict[str, Any] description: NotRequired[str] - + class Function(BaseModel): name: str + parameters: Dict[str, Any] + description: Optional[str] = "" - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py index a036ad7..d8daaef 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py @@ -14,9 +14,9 @@ class FunctionCallTypedDict(TypedDict): name: str arguments: ArgumentsTypedDict - + class FunctionCall(BaseModel): name: str + arguments: Arguments - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py b/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py index c7b8c55..47af74a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py @@ -7,12 +7,11 @@ class FunctionNameTypedDict(TypedDict): r"""this restriction of `Function` is used to select a specific function to call""" - + name: str - + class FunctionName(BaseModel): r"""this restriction of `Function` is used to select a specific function to call""" - + name: str - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py index 0347dc1..68b1f78 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py @@ -6,13 +6,14 @@ from mistralai_gcp.types import BaseModel from typing import List, Optional + class HTTPValidationErrorData(BaseModel): detail: Optional[List[ValidationError]] = None - class HTTPValidationError(Exception): r"""Validation Error""" + data: HTTPValidationErrorData def __init__(self, data: HTTPValidationErrorData): @@ -20,4 +21,3 @@ def __init__(self, data: HTTPValidationErrorData): def __str__(self) -> str: return utils.marshal_json(self.data, HTTPValidationErrorData) - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py index 06d9261..0398e9b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py @@ -10,9 +10,8 @@ class ResponseFormatTypedDict(TypedDict): type: NotRequired[ResponseFormats] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - + class ResponseFormat(BaseModel): type: Optional[ResponseFormats] = None r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/security.py b/packages/mistralai_gcp/src/mistralai_gcp/models/security.py index cd4d8f3..c9c0e0f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/security.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/security.py @@ -9,8 +9,17 @@ class SecurityTypedDict(TypedDict): api_key: str - + class Security(BaseModel): - api_key: Annotated[str, FieldMetadata(security=SecurityMetadata(scheme=True, scheme_type="http", sub_type="bearer", field_name="Authorization"))] - + api_key: Annotated[ + str, + FieldMetadata( + security=SecurityMetadata( + scheme=True, + scheme_type="http", + sub_type="bearer", + field_name="Authorization", + ) + ), + ] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py index 8aa9833..f2578a7 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py @@ -18,9 +18,11 @@ class SystemMessageTypedDict(TypedDict): content: ContentTypedDict - + class SystemMessage(BaseModel): content: Content + + # fmt: off ROLE: Annotated[Final[Optional[Role]], pydantic.Field(alias="role")] = "system" # type: ignore - + # fmt: on diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py index f77230e..5c3774c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py @@ -9,11 +9,14 @@ Type = Literal["text"] + class TextChunkTypedDict(TypedDict): text: str - + class TextChunk(BaseModel): text: str + + # fmt: off TYPE: Annotated[Final[Optional[Type]], pydantic.Field(alias="type")] = "text" # type: ignore - + # fmt: on diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py b/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py index ff1b774..24e1a9f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py @@ -13,9 +13,11 @@ class ToolTypedDict(TypedDict): function: FunctionTypedDict type: NotRequired[ToolTypes] - + class Tool(BaseModel): function: Function - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = None - + + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( + None + ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py index c5b4c16..6374f2c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py @@ -14,10 +14,13 @@ class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] - + class ToolCall(BaseModel): function: FunctionCall + id: Optional[str] = "null" - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = None - + + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( + None + ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py index b6e3688..bd6dbe7 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py @@ -12,16 +12,18 @@ class ToolChoiceTypedDict(TypedDict): r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" - + function: FunctionNameTypedDict r"""this restriction of `Function` is used to select a specific function to call""" type: NotRequired[ToolTypes] - + class ToolChoice(BaseModel): r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" - + function: FunctionName r"""this restriction of `Function` is used to select a specific function to call""" - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = None - + + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( + None + ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py index ec231f2..27ac1db 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py @@ -1,7 +1,13 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) import pydantic from pydantic import model_serializer from typing import Final, Literal, Optional, TypedDict @@ -10,18 +16,24 @@ ToolMessageRole = Literal["tool"] + class ToolMessageTypedDict(TypedDict): content: str tool_call_id: NotRequired[Nullable[str]] name: NotRequired[Nullable[str]] - + class ToolMessage(BaseModel): content: str + + # fmt: off ROLE: Annotated[Final[Optional[ToolMessageRole]], pydantic.Field(alias="role")] = "tool" # type: ignore + # fmt: on + tool_call_id: OptionalNullable[str] = UNSET + name: OptionalNullable[str] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "tool_call_id", "name"] @@ -35,9 +47,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -47,4 +63,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py index 43877c9..d63486b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py @@ -9,10 +9,11 @@ class UsageInfoTypedDict(TypedDict): prompt_tokens: int completion_tokens: int total_tokens: int - + class UsageInfo(BaseModel): prompt_tokens: int + completion_tokens: int + total_tokens: int - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py index 7e6ac85..d3e931b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py @@ -18,9 +18,11 @@ class UserMessageTypedDict(TypedDict): content: UserMessageContentTypedDict - + class UserMessage(BaseModel): content: UserMessageContent + + # fmt: off ROLE: Annotated[Final[Optional[UserMessageRole]], pydantic.Field(alias="role")] = "user" # type: ignore - + # fmt: on diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py index 23008f4..23e9595 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py @@ -15,10 +15,11 @@ class ValidationErrorTypedDict(TypedDict): loc: List[LocTypedDict] msg: str type: str - + class ValidationError(BaseModel): loc: List[Loc] + msg: str + type: str - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py index f0b8e35..2e1d046 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -1,6 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - from ._hooks import SDKHooks from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix @@ -14,7 +13,7 @@ SERVER_PROD = "prod" r"""Production server""" SERVERS = { - SERVER_PROD: "https://api.mistral.ai", + SERVER_PROD: "https://api.mistral.ai", } """Contains the list of servers available to the SDK""" @@ -24,14 +23,14 @@ class SDKConfiguration: client: HttpClient async_client: AsyncHttpClient debug_logger: Logger - security: Optional[Union[models.Security,Callable[[], models.Security]]] = None + security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.1.1" - gen_version: str = "2.404.11" - user_agent: str = "speakeasy-sdk/python 1.1.1 2.404.11 0.0.2 mistralai-gcp" + sdk_version: str = "1.2.1" + gen_version: str = "2.415.6" + user_agent: str = "speakeasy-sdk/python 1.2.1 2.415.6 0.0.2 mistralai-gcp" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None @@ -45,10 +44,9 @@ def get_server_details(self) -> Tuple[str, Dict[str, str]]: self.server = SERVER_PROD if self.server not in SERVERS: - raise ValueError(f"Invalid server \"{self.server}\"") + raise ValueError(f'Invalid server "{self.server}"') return SERVERS[self.server], {} - def get_hooks(self) -> SDKHooks: return self._hooks diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py index 507a600..6c26aeb 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py @@ -33,7 +33,12 @@ validate_open_enum, ) from .url import generate_url, template_url, remove_suffix -from .values import get_global_from_env, match_content_type, match_status_codes, match_response +from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, +) from .logger import Logger, get_body_content, get_default_logger __all__ = [ diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py index 15d12ac..b661aff 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py @@ -3,17 +3,20 @@ import httpx from typing import Any, Protocol + class Logger(Protocol): def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: pass + class NoOpLogger: def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: pass + def get_body_content(req: httpx.Request) -> str: return "" if not hasattr(req, "_content") else str(req.content) + def get_default_logger() -> Logger: return NoOpLogger() - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py index a06f927..4d60867 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py @@ -1,5 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +import asyncio import random import time from typing import List @@ -212,5 +213,5 @@ async def retry_with_backoff_async( raise sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) sleep = min(sleep, max_interval / 1000) - time.sleep(sleep) + await asyncio.sleep(sleep) retries += 1 diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/security.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/security.py index aab4cb6..295a3f4 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/security.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/security.py @@ -16,7 +16,6 @@ ) - def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: headers: Dict[str, str] = {} query_params: Dict[str, List[str]] = {} @@ -42,8 +41,10 @@ def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: _parse_security_option(headers, query_params, value) return headers, query_params if metadata.scheme: - # Special case for basic auth which could be a flattened model - if metadata.sub_type == "basic" and not isinstance(value, BaseModel): + # Special case for basic auth or custom auth which could be a flattened model + if metadata.sub_type in ["basic", "custom"] and not isinstance( + value, BaseModel + ): _parse_security_scheme(headers, query_params, metadata, name, security) else: _parse_security_scheme(headers, query_params, metadata, name, value) @@ -80,9 +81,12 @@ def _parse_security_scheme( sub_type = scheme_metadata.sub_type if isinstance(scheme, BaseModel): - if scheme_type == "http" and sub_type == "basic": - _parse_basic_auth_scheme(headers, scheme) - return + if scheme_type == "http": + if sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + if sub_type == "custom": + return scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields for name in scheme_fields: @@ -131,6 +135,8 @@ def _parse_security_scheme_value( elif scheme_type == "http": if sub_type == "bearer": headers[header_name] = _apply_bearer(value) + elif sub_type == "custom": + return else: raise ValueError("sub type {sub_type} not supported") else: diff --git a/pyproject.toml b/pyproject.toml index ead6358..8ec2799 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.0.4" +version = "1.1.0" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README.md" diff --git a/src/mistralai/_hooks/sdkhooks.py b/src/mistralai/_hooks/sdkhooks.py index 24b0d08..1f9a931 100644 --- a/src/mistralai/_hooks/sdkhooks.py +++ b/src/mistralai/_hooks/sdkhooks.py @@ -1,11 +1,21 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" import httpx -from .types import SDKInitHook, BeforeRequestContext, BeforeRequestHook, AfterSuccessContext, AfterSuccessHook, AfterErrorContext, AfterErrorHook, Hooks +from .types import ( + SDKInitHook, + BeforeRequestContext, + BeforeRequestHook, + AfterSuccessContext, + AfterSuccessHook, + AfterErrorContext, + AfterErrorHook, + Hooks, +) from .registration import init_hooks from typing import List, Optional, Tuple from mistralai.httpclient import HttpClient + class SDKHooks(Hooks): def __init__(self) -> None: self.sdk_init_hooks: List[SDKInitHook] = [] @@ -31,7 +41,9 @@ def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: base_url, client = hook.sdk_init(base_url, client) return base_url, client - def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> httpx.Request: + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> httpx.Request: for hook in self.before_request_hooks: out = hook.before_request(hook_ctx, request) if isinstance(out, Exception): @@ -40,7 +52,9 @@ def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) return request - def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> httpx.Response: + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> httpx.Response: for hook in self.after_success_hooks: out = hook.after_success(hook_ctx, response) if isinstance(out, Exception): @@ -48,7 +62,12 @@ def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) response = out return response - def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Tuple[Optional[httpx.Response], Optional[Exception]]: + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Tuple[Optional[httpx.Response], Optional[Exception]]: for hook in self.after_error_hooks: result = hook.after_error(hook_ctx, response, error) if isinstance(result, Exception): diff --git a/src/mistralai/_hooks/types.py b/src/mistralai/_hooks/types.py index e9391f3..fe448e9 100644 --- a/src/mistralai/_hooks/types.py +++ b/src/mistralai/_hooks/types.py @@ -1,6 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - from abc import ABC, abstractmethod import httpx from mistralai.httpclient import HttpClient @@ -12,7 +11,12 @@ class HookContext: oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None - def __init__(self, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]]): + def __init__( + self, + operation_id: str, + oauth2_scopes: Optional[List[str]], + security_source: Optional[Union[Any, Callable[[], Any]]], + ): self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -20,18 +24,23 @@ def __init__(self, operation_id: str, oauth2_scopes: Optional[List[str]], securi class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): - super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + super().__init__( + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): - super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) - + super().__init__( + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): - super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + super().__init__( + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + ) class SDKInitHook(ABC): @@ -42,19 +51,28 @@ def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: class BeforeRequestHook(ABC): @abstractmethod - def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> Union[httpx.Request, Exception]: + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: pass class AfterSuccessHook(ABC): @abstractmethod - def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> Union[httpx.Response, Exception]: + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> Union[httpx.Response, Exception]: pass class AfterErrorHook(ABC): @abstractmethod - def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: pass diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index 8c0ffb8..05d1775 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -7,22 +7,40 @@ from mistralai.utils import eventstreaming, get_security_from_env from typing import Any, AsyncGenerator, Generator, List, Optional, Union + class Agents(BaseSDK): r"""Agents API.""" - - + def complete( - self, *, - messages: Union[List[models.AgentsCompletionRequestMessages], List[models.AgentsCompletionRequestMessagesTypedDict]], + self, + *, + messages: Union[ + List[models.AgentsCompletionRequestMessages], + List[models.AgentsCompletionRequestMessagesTypedDict], + ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.AgentsCompletionRequestStop, models.AgentsCompletionRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.AgentsCompletionRequestStop, + models.AgentsCompletionRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[Union[models.AgentsCompletionRequestToolChoice, models.AgentsCompletionRequestToolChoiceTypedDict]] = None, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.AgentsCompletionRequestToolChoice, + models.AgentsCompletionRequestToolChoiceTypedDict, + ] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -36,9 +54,9 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -47,23 +65,29 @@ def complete( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.AgentsCompletionRequest( max_tokens=max_tokens, min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.AgentsCompletionRequestMessages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + messages=utils.get_pydantic_model( + messages, List[models.AgentsCompletionRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model(tool_choice, Optional[models.AgentsCompletionRequestToolChoice]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.AgentsCompletionRequestToolChoice] + ), agent_id=agent_id, ) - + req = self.build_request( method="POST", path="/v1/agents/completions", @@ -76,57 +100,84 @@ def complete( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.AgentsCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentsCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="agents_completion_v1_agents_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="agents_completion_v1_agents_completions_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.ChatCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def complete_async( - self, *, - messages: Union[List[models.AgentsCompletionRequestMessages], List[models.AgentsCompletionRequestMessagesTypedDict]], + self, + *, + messages: Union[ + List[models.AgentsCompletionRequestMessages], + List[models.AgentsCompletionRequestMessagesTypedDict], + ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.AgentsCompletionRequestStop, models.AgentsCompletionRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.AgentsCompletionRequestStop, + models.AgentsCompletionRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[Union[models.AgentsCompletionRequestToolChoice, models.AgentsCompletionRequestToolChoiceTypedDict]] = None, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.AgentsCompletionRequestToolChoice, + models.AgentsCompletionRequestToolChoiceTypedDict, + ] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -140,9 +191,9 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -151,24 +202,30 @@ async def complete_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.AgentsCompletionRequest( max_tokens=max_tokens, min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.AgentsCompletionRequestMessages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + messages=utils.get_pydantic_model( + messages, List[models.AgentsCompletionRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model(tool_choice, Optional[models.AgentsCompletionRequestToolChoice]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.AgentsCompletionRequestToolChoice] + ), agent_id=agent_id, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/agents/completions", base_url=base_url, @@ -180,57 +237,84 @@ async def complete_async( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.AgentsCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentsCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="agents_completion_v1_agents_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="agents_completion_v1_agents_completions_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.ChatCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def stream( - self, *, - messages: Union[List[models.AgentsCompletionStreamRequestMessages], List[models.AgentsCompletionStreamRequestMessagesTypedDict]], + self, + *, + messages: Union[ + List[models.AgentsCompletionStreamRequestMessages], + List[models.AgentsCompletionStreamRequestMessagesTypedDict], + ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.AgentsCompletionStreamRequestStop, models.AgentsCompletionStreamRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.AgentsCompletionStreamRequestStop, + models.AgentsCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[Union[models.AgentsCompletionStreamRequestToolChoice, models.AgentsCompletionStreamRequestToolChoiceTypedDict]] = None, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.AgentsCompletionStreamRequestToolChoice, + models.AgentsCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -243,12 +327,12 @@ def stream( :param agent_id: The ID of the agent to use for this completion. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -257,23 +341,29 @@ def stream( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.AgentsCompletionStreamRequest( max_tokens=max_tokens, min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.AgentsCompletionStreamRequestMessages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + messages=utils.get_pydantic_model( + messages, List[models.AgentsCompletionStreamRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model(tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice] + ), agent_id=agent_id, ) - + req = self.build_request( method="POST", path="/v1/agents/completions#stream", @@ -286,58 +376,87 @@ def stream( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.AgentsCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentsCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="stream_agents", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="stream_agents", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def stream_async( - self, *, - messages: Union[List[models.AgentsCompletionStreamRequestMessages], List[models.AgentsCompletionStreamRequestMessagesTypedDict]], + self, + *, + messages: Union[ + List[models.AgentsCompletionStreamRequestMessages], + List[models.AgentsCompletionStreamRequestMessagesTypedDict], + ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.AgentsCompletionStreamRequestStop, models.AgentsCompletionStreamRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.AgentsCompletionStreamRequestStop, + models.AgentsCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[Union[models.AgentsCompletionStreamRequestToolChoice, models.AgentsCompletionStreamRequestToolChoiceTypedDict]] = None, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.AgentsCompletionStreamRequestToolChoice, + models.AgentsCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -350,12 +469,12 @@ async def stream_async( :param agent_id: The ID of the agent to use for this completion. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -364,24 +483,30 @@ async def stream_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.AgentsCompletionStreamRequest( max_tokens=max_tokens, min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.AgentsCompletionStreamRequestMessages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + messages=utils.get_pydantic_model( + messages, List[models.AgentsCompletionStreamRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model(tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice] + ), agent_id=agent_id, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/agents/completions#stream", base_url=base_url, @@ -393,42 +518,53 @@ async def stream_async( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.AgentsCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentsCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="stream_agents", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="stream_agents", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events_async( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - - content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) - + content_type = http_res.headers.get("Content-Type") + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/src/mistralai/basesdk.py b/src/mistralai/basesdk.py index f9e54c5..3fc2bdd 100644 --- a/src/mistralai/basesdk.py +++ b/src/mistralai/basesdk.py @@ -3,10 +3,15 @@ from .sdkconfiguration import SDKConfiguration import httpx from mistralai import models, utils -from mistralai._hooks import AfterErrorContext, AfterSuccessContext, BeforeRequestContext +from mistralai._hooks import ( + AfterErrorContext, + AfterSuccessContext, + BeforeRequestContext, +) from mistralai.utils import RetryConfig, SerializedRequestBody, get_body_content from typing import Callable, List, Optional, Tuple + class BaseSDK: sdk_configuration: SDKConfiguration @@ -24,6 +29,46 @@ def get_url(self, base_url, url_variables): return utils.template_url(base_url, url_variables) + def build_request_async( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + ) -> httpx.Request: + client = self.sdk_configuration.async_client + return self.build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + ) + def build_request( self, method, @@ -45,7 +90,46 @@ def build_request( url_override: Optional[str] = None, ) -> httpx.Request: client = self.sdk_configuration.client + return self.build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + ) + def build_request_with_client( + self, + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + ) -> httpx.Request: query_params = {} url = url_override @@ -129,7 +213,7 @@ def do(): req.method, req.url, req.headers, - get_body_content(req) + get_body_content(req), ) http_res = client.send(req, stream=stream) except Exception as e: @@ -149,7 +233,7 @@ def do(): http_res.status_code, http_res.url, http_res.headers, - "" if stream else http_res.text + "" if stream else http_res.text, ) if utils.match_status_codes(error_status_codes, http_res.status_code): @@ -189,6 +273,7 @@ async def do_request_async( ) -> httpx.Response: client = self.sdk_configuration.async_client logger = self.sdk_configuration.debug_logger + async def do(): http_res = None try: @@ -200,7 +285,7 @@ async def do(): req.method, req.url, req.headers, - get_body_content(req) + get_body_content(req), ) http_res = await client.send(req, stream=stream) except Exception as e: @@ -220,7 +305,7 @@ async def do(): http_res.status_code, http_res.url, http_res.headers, - "" if stream else http_res.text + "" if stream else http_res.text, ) if utils.match_status_codes(error_status_codes, http_res.status_code): diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 9cdeac1..3e770f1 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -7,12 +7,13 @@ from mistralai.utils import eventstreaming, get_security_from_env from typing import Any, AsyncGenerator, Generator, List, Optional, Union + class Chat(BaseSDK): r"""Chat Completion API.""" - - + def complete( - self, *, + self, + *, model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: Optional[float] = 0.7, @@ -22,9 +23,18 @@ def complete( stream: Optional[bool] = False, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[Union[models.ChatCompletionRequestToolChoice, models.ChatCompletionRequestToolChoiceTypedDict]] = None, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -41,9 +51,9 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -53,10 +63,10 @@ def complete( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionRequest( model=model, temperature=temperature, @@ -67,12 +77,16 @@ def complete( stop=stop, random_seed=random_seed, messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model(tool_choice, Optional[models.ChatCompletionRequestToolChoice]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), safe_prompt=safe_prompt, ) - + req = self.build_request( method="POST", path="/v1/chat/completions", @@ -85,47 +99,57 @@ def complete( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.ChatCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def complete_async( - self, *, + self, + *, model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: Optional[float] = 0.7, @@ -135,9 +159,18 @@ async def complete_async( stream: Optional[bool] = False, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[Union[models.ChatCompletionRequestToolChoice, models.ChatCompletionRequestToolChoiceTypedDict]] = None, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -154,9 +187,9 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -166,10 +199,10 @@ async def complete_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionRequest( model=model, temperature=temperature, @@ -180,13 +213,17 @@ async def complete_async( stop=stop, random_seed=random_seed, messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model(tool_choice, Optional[models.ChatCompletionRequestToolChoice]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), safe_prompt=safe_prompt, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/chat/completions", base_url=base_url, @@ -198,59 +235,86 @@ async def complete_async( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.ChatCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def stream( - self, *, + self, + *, model: Nullable[str], - messages: Union[List[models.ChatCompletionStreamRequestMessages], List[models.ChatCompletionStreamRequestMessagesTypedDict]], + messages: Union[ + List[models.ChatCompletionStreamRequestMessages], + List[models.ChatCompletionStreamRequestMessagesTypedDict], + ], temperature: Optional[float] = 0.7, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.ChatCompletionStreamRequestStop, models.ChatCompletionStreamRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[Union[models.ChatCompletionStreamRequestToolChoice, models.ChatCompletionStreamRequestToolChoiceTypedDict]] = None, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -266,12 +330,12 @@ def stream( :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -281,10 +345,10 @@ def stream( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionStreamRequest( model=model, temperature=temperature, @@ -294,13 +358,19 @@ def stream( stream=stream, stop=stop, random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.ChatCompletionStreamRequestMessages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model(tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), safe_prompt=safe_prompt, ) - + req = self.build_request( method="POST", path="/v1/chat/completions#stream", @@ -313,60 +383,89 @@ def stream( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="stream_chat", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def stream_async( - self, *, + self, + *, model: Nullable[str], - messages: Union[List[models.ChatCompletionStreamRequestMessages], List[models.ChatCompletionStreamRequestMessagesTypedDict]], + messages: Union[ + List[models.ChatCompletionStreamRequestMessages], + List[models.ChatCompletionStreamRequestMessagesTypedDict], + ], temperature: Optional[float] = 0.7, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.ChatCompletionStreamRequestStop, models.ChatCompletionStreamRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[Union[models.ChatCompletionStreamRequestToolChoice, models.ChatCompletionStreamRequestToolChoiceTypedDict]] = None, + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -382,12 +481,12 @@ async def stream_async( :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -397,10 +496,10 @@ async def stream_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionStreamRequest( model=model, temperature=temperature, @@ -410,14 +509,20 @@ async def stream_async( stream=stream, stop=stop, random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.ChatCompletionStreamRequestMessages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model(tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), safe_prompt=safe_prompt, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/chat/completions#stream", base_url=base_url, @@ -429,42 +534,53 @@ async def stream_async( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="stream_chat", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events_async( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - - content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) - + content_type = http_res.headers.get("Content-Type") + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index 193758e..c19a9e3 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -7,12 +7,13 @@ from mistralai.utils import get_security_from_env from typing import Any, Optional, Union + class Embeddings(BaseSDK): r"""Embeddings API.""" - - + def create( - self, *, + self, + *, inputs: Union[models.Inputs, models.InputsTypedDict], model: str, encoding_format: OptionalNullable[str] = UNSET, @@ -35,16 +36,16 @@ def create( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.EmbeddingRequest( inputs=inputs, model=model, encoding_format=encoding_format, ) - + req = self.build_request( method="POST", path="/v1/embeddings", @@ -57,47 +58,57 @@ def create( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.EmbeddingRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.EmbeddingRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="embeddings_v1_embeddings_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.EmbeddingResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.EmbeddingResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def create_async( - self, *, + self, + *, inputs: Union[models.Inputs, models.InputsTypedDict], model: str, encoding_format: OptionalNullable[str] = UNSET, @@ -120,17 +131,17 @@ async def create_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.EmbeddingRequest( inputs=inputs, model=model, encoding_format=encoding_format, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/embeddings", base_url=base_url, @@ -142,41 +153,50 @@ async def create_async( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.EmbeddingRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.EmbeddingRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="embeddings_v1_embeddings_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.EmbeddingResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.EmbeddingResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - - content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) - + content_type = http_res.headers.get("Content-Type") + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/src/mistralai/files.py b/src/mistralai/files.py index 2aa3765..0672405 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -7,12 +7,13 @@ from mistralai.utils import get_security_from_env from typing import Optional, Union + class Files(BaseSDK): r"""Files API""" - - + def upload( - self, *, + self, + *, file: Union[models.File, models.FileTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -35,14 +36,14 @@ def upload( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( file=utils.get_pydantic_model(file, models.File), ) - + req = self.build_request( method="POST", path="/v1/files", @@ -55,43 +56,55 @@ def upload( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "multipart", models.FilesAPIRoutesUploadFileMultiPartBodyParams), + get_serialized_body=lambda: utils.serialize_request_body( + request, + False, + False, + "multipart", + models.FilesAPIRoutesUploadFileMultiPartBodyParams, + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="files_api_routes_upload_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="files_api_routes_upload_file", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.UploadFileOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def upload_async( - self, *, + self, + *, file: Union[models.File, models.FileTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -114,15 +127,15 @@ async def upload_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( file=utils.get_pydantic_model(file, models.File), ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/files", base_url=base_url, @@ -134,43 +147,55 @@ async def upload_async( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "multipart", models.FilesAPIRoutesUploadFileMultiPartBodyParams), + get_serialized_body=lambda: utils.serialize_request_body( + request, + False, + False, + "multipart", + models.FilesAPIRoutesUploadFileMultiPartBodyParams, + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="files_api_routes_upload_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="files_api_routes_upload_file", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.UploadFileOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def list( - self, *, + self, + *, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -187,7 +212,7 @@ def list( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url req = self.build_request( @@ -204,40 +229,46 @@ def list( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="files_api_routes_list_files", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="files_api_routes_list_files", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.ListFilesOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def list_async( - self, *, + self, + *, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -254,10 +285,10 @@ async def list_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - req = self.build_request( + req = self.build_request_async( method="GET", path="/v1/files", base_url=base_url, @@ -271,40 +302,46 @@ async def list_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="files_api_routes_list_files", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="files_api_routes_list_files", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.ListFilesOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def retrieve( - self, *, + self, + *, file_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -314,7 +351,7 @@ def retrieve( Returns information about a specific file. - :param file_id: + :param file_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -323,14 +360,14 @@ def retrieve( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FilesAPIRoutesRetrieveFileRequest( file_id=file_id, ) - + req = self.build_request( method="GET", path="/v1/files/{file_id}", @@ -345,40 +382,46 @@ def retrieve( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="files_api_routes_retrieve_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="files_api_routes_retrieve_file", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.RetrieveFileOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def retrieve_async( - self, *, + self, + *, file_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -388,7 +431,7 @@ async def retrieve_async( Returns information about a specific file. - :param file_id: + :param file_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -397,15 +440,15 @@ async def retrieve_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FilesAPIRoutesRetrieveFileRequest( file_id=file_id, ) - - req = self.build_request( + + req = self.build_request_async( method="GET", path="/v1/files/{file_id}", base_url=base_url, @@ -419,40 +462,46 @@ async def retrieve_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="files_api_routes_retrieve_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="files_api_routes_retrieve_file", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.RetrieveFileOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def delete( - self, *, + self, + *, file_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -462,7 +511,7 @@ def delete( Delete a file. - :param file_id: + :param file_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -471,14 +520,14 @@ def delete( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FilesAPIRoutesDeleteFileRequest( file_id=file_id, ) - + req = self.build_request( method="DELETE", path="/v1/files/{file_id}", @@ -493,40 +542,46 @@ def delete( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="files_api_routes_delete_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="files_api_routes_delete_file", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DeleteFileOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def delete_async( - self, *, + self, + *, file_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -536,7 +591,7 @@ async def delete_async( Delete a file. - :param file_id: + :param file_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -545,15 +600,15 @@ async def delete_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FilesAPIRoutesDeleteFileRequest( file_id=file_id, ) - - req = self.build_request( + + req = self.build_request_async( method="DELETE", path="/v1/files/{file_id}", base_url=base_url, @@ -567,34 +622,39 @@ async def delete_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="files_api_routes_delete_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="files_api_routes_delete_file", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DeleteFileOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - - content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) - + content_type = http_res.headers.get("Content-Type") + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py index 19090d9..5239e90 100644 --- a/src/mistralai/fim.py +++ b/src/mistralai/fim.py @@ -7,12 +7,13 @@ from mistralai.utils import eventstreaming, get_security_from_env from typing import Any, AsyncGenerator, Generator, Optional, Union + class Fim(BaseSDK): r"""Fill-in-the-middle API.""" - - + def complete( - self, *, + self, + *, model: Nullable[str], prompt: str, temperature: Optional[float] = 0.7, @@ -20,7 +21,12 @@ def complete( max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.FIMCompletionRequestStop, models.FIMCompletionRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.FIMCompletionRequestStop, + models.FIMCompletionRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -49,10 +55,10 @@ def complete( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FIMCompletionRequest( model=model, temperature=temperature, @@ -65,7 +71,7 @@ def complete( prompt=prompt, suffix=suffix, ) - + req = self.build_request( method="POST", path="/v1/fim/completions", @@ -78,47 +84,57 @@ def complete( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="fim_completion_v1_fim_completions_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.FIMCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.FIMCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def complete_async( - self, *, + self, + *, model: Nullable[str], prompt: str, temperature: Optional[float] = 0.7, @@ -126,7 +142,12 @@ async def complete_async( max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.FIMCompletionRequestStop, models.FIMCompletionRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.FIMCompletionRequestStop, + models.FIMCompletionRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -155,10 +176,10 @@ async def complete_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FIMCompletionRequest( model=model, temperature=temperature, @@ -171,8 +192,8 @@ async def complete_async( prompt=prompt, suffix=suffix, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/fim/completions", base_url=base_url, @@ -184,47 +205,57 @@ async def complete_async( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="fim_completion_v1_fim_completions_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.FIMCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.FIMCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def stream( - self, *, + self, + *, model: Nullable[str], prompt: str, temperature: Optional[float] = 0.7, @@ -232,7 +263,12 @@ def stream( max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.FIMCompletionStreamRequestStop, models.FIMCompletionStreamRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.FIMCompletionStreamRequestStop, + models.FIMCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -249,7 +285,7 @@ def stream( :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. @@ -261,10 +297,10 @@ def stream( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FIMCompletionStreamRequest( model=model, temperature=temperature, @@ -277,7 +313,7 @@ def stream( prompt=prompt, suffix=suffix, ) - + req = self.build_request( method="POST", path="/v1/fim/completions#stream", @@ -290,48 +326,60 @@ def stream( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="stream_fim", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="stream_fim", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def stream_async( - self, *, + self, + *, model: Nullable[str], prompt: str, temperature: Optional[float] = 0.7, @@ -339,7 +387,12 @@ async def stream_async( max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.FIMCompletionStreamRequestStop, models.FIMCompletionStreamRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.FIMCompletionStreamRequestStop, + models.FIMCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -356,7 +409,7 @@ async def stream_async( :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. @@ -368,10 +421,10 @@ async def stream_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FIMCompletionStreamRequest( model=model, temperature=temperature, @@ -384,8 +437,8 @@ async def stream_async( prompt=prompt, suffix=suffix, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/fim/completions#stream", base_url=base_url, @@ -397,42 +450,53 @@ async def stream_async( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="stream_fim", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="stream_fim", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events_async( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - - content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) - + content_type = http_res.headers.get("Content-Type") + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/src/mistralai/fine_tuning.py b/src/mistralai/fine_tuning.py index 998100a..ce3d138 100644 --- a/src/mistralai/fine_tuning.py +++ b/src/mistralai/fine_tuning.py @@ -4,13 +4,14 @@ from .sdkconfiguration import SDKConfiguration from mistralai.jobs import Jobs + class FineTuning(BaseSDK): jobs: Jobs + def __init__(self, sdk_config: SDKConfiguration) -> None: BaseSDK.__init__(self, sdk_config) self.sdk_configuration = sdk_config self._init_sdks() - + def _init_sdks(self): self.jobs = Jobs(self.sdk_configuration) - diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index 2ea3e4a..b6c1b24 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -8,11 +8,11 @@ from mistralai.utils import get_security_from_env from typing import List, Optional, Union + class Jobs(BaseSDK): - - def list( - self, *, + self, + *, page: Optional[int] = 0, page_size: Optional[int] = 100, model: OptionalNullable[str] = UNSET, @@ -47,10 +47,10 @@ def list( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, page_size=page_size, @@ -62,7 +62,7 @@ def list( wandb_name=wandb_name, suffix=suffix, ) - + req = self.build_request( method="GET", path="/v1/fine_tuning/jobs", @@ -77,40 +77,46 @@ def list( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.JobsOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def list_async( - self, *, + self, + *, page: Optional[int] = 0, page_size: Optional[int] = 100, model: OptionalNullable[str] = UNSET, @@ -145,10 +151,10 @@ async def list_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, page_size=page_size, @@ -160,8 +166,8 @@ async def list_async( wandb_name=wandb_name, suffix=suffix, ) - - req = self.build_request( + + req = self.build_request_async( method="GET", path="/v1/fine_tuning/jobs", base_url=base_url, @@ -175,47 +181,65 @@ async def list_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.JobsOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def create( - self, *, + self, + *, model: models.FineTuneableModel, - hyperparameters: Union[models.TrainingParametersIn, models.TrainingParametersInTypedDict], - training_files: Optional[Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]]] = None, + hyperparameters: Union[ + models.TrainingParametersIn, models.TrainingParametersInTypedDict + ], + training_files: Optional[ + Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]] + ] = None, validation_files: OptionalNullable[List[str]] = UNSET, suffix: OptionalNullable[str] = UNSET, - integrations: OptionalNullable[Union[List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict]]] = UNSET, - repositories: Optional[Union[List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict]]] = None, + integrations: OptionalNullable[ + Union[ + List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict] + ] + ] = UNSET, + repositories: Optional[ + Union[ + List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict] + ] + ] = None, auto_start: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -227,11 +251,11 @@ def create( :param model: The name of the model to fine-tune. :param hyperparameters: The fine-tuning hyperparameter settings used in a fine-tune job. - :param training_files: + :param training_files: :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` :param integrations: A list of integrations to enable for your fine-tuning job. - :param repositories: + :param repositories: :param auto_start: This field will be required in a future release. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -241,21 +265,29 @@ def create( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobIn( model=model, - training_files=utils.get_pydantic_model(training_files, Optional[List[models.TrainingFile]]), + training_files=utils.get_pydantic_model( + training_files, Optional[List[models.TrainingFile]] + ), validation_files=validation_files, - hyperparameters=utils.get_pydantic_model(hyperparameters, models.TrainingParametersIn), + hyperparameters=utils.get_pydantic_model( + hyperparameters, models.TrainingParametersIn + ), suffix=suffix, - integrations=utils.get_pydantic_model(integrations, OptionalNullable[List[models.JobInIntegrations]]), - repositories=utils.get_pydantic_model(repositories, Optional[List[models.JobInRepositories]]), + integrations=utils.get_pydantic_model( + integrations, OptionalNullable[List[models.JobInIntegrations]] + ), + repositories=utils.get_pydantic_model( + repositories, Optional[List[models.JobInRepositories]] + ), auto_start=auto_start, ) - + req = self.build_request( method="POST", path="/v1/fine_tuning/jobs", @@ -268,50 +300,73 @@ def create( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.JobIn), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.JobIn + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + return utils.unmarshal_json( + http_res.text, + Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse], + ) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def create_async( - self, *, + self, + *, model: models.FineTuneableModel, - hyperparameters: Union[models.TrainingParametersIn, models.TrainingParametersInTypedDict], - training_files: Optional[Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]]] = None, + hyperparameters: Union[ + models.TrainingParametersIn, models.TrainingParametersInTypedDict + ], + training_files: Optional[ + Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]] + ] = None, validation_files: OptionalNullable[List[str]] = UNSET, suffix: OptionalNullable[str] = UNSET, - integrations: OptionalNullable[Union[List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict]]] = UNSET, - repositories: Optional[Union[List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict]]] = None, + integrations: OptionalNullable[ + Union[ + List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict] + ] + ] = UNSET, + repositories: Optional[ + Union[ + List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict] + ] + ] = None, auto_start: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -323,11 +378,11 @@ async def create_async( :param model: The name of the model to fine-tune. :param hyperparameters: The fine-tuning hyperparameter settings used in a fine-tune job. - :param training_files: + :param training_files: :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` :param integrations: A list of integrations to enable for your fine-tuning job. - :param repositories: + :param repositories: :param auto_start: This field will be required in a future release. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -337,22 +392,30 @@ async def create_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobIn( model=model, - training_files=utils.get_pydantic_model(training_files, Optional[List[models.TrainingFile]]), + training_files=utils.get_pydantic_model( + training_files, Optional[List[models.TrainingFile]] + ), validation_files=validation_files, - hyperparameters=utils.get_pydantic_model(hyperparameters, models.TrainingParametersIn), + hyperparameters=utils.get_pydantic_model( + hyperparameters, models.TrainingParametersIn + ), suffix=suffix, - integrations=utils.get_pydantic_model(integrations, OptionalNullable[List[models.JobInIntegrations]]), - repositories=utils.get_pydantic_model(repositories, Optional[List[models.JobInRepositories]]), + integrations=utils.get_pydantic_model( + integrations, OptionalNullable[List[models.JobInIntegrations]] + ), + repositories=utils.get_pydantic_model( + repositories, Optional[List[models.JobInRepositories]] + ), auto_start=auto_start, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/fine_tuning/jobs", base_url=base_url, @@ -364,43 +427,54 @@ async def create_async( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.JobIn), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.JobIn + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + return utils.unmarshal_json( + http_res.text, + Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse], + ) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def get( - self, *, + self, + *, job_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -419,14 +493,14 @@ def get( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( job_id=job_id, ) - + req = self.build_request( method="GET", path="/v1/fine_tuning/jobs/{job_id}", @@ -441,40 +515,46 @@ def get( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def get_async( - self, *, + self, + *, job_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -493,15 +573,15 @@ async def get_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( job_id=job_id, ) - - req = self.build_request( + + req = self.build_request_async( method="GET", path="/v1/fine_tuning/jobs/{job_id}", base_url=base_url, @@ -515,40 +595,46 @@ async def get_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def cancel( - self, *, + self, + *, job_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -567,14 +653,14 @@ def cancel( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( job_id=job_id, ) - + req = self.build_request( method="POST", path="/v1/fine_tuning/jobs/{job_id}/cancel", @@ -589,40 +675,46 @@ def cancel( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def cancel_async( - self, *, + self, + *, job_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -641,15 +733,15 @@ async def cancel_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( job_id=job_id, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/fine_tuning/jobs/{job_id}/cancel", base_url=base_url, @@ -663,40 +755,46 @@ async def cancel_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def start( - self, *, + self, + *, job_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -706,7 +804,7 @@ def start( Request the start of a validated fine tuning job. - :param job_id: + :param job_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -715,14 +813,14 @@ def start( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( job_id=job_id, ) - + req = self.build_request( method="POST", path="/v1/fine_tuning/jobs/{job_id}/start", @@ -737,40 +835,46 @@ def start( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def start_async( - self, *, + self, + *, job_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -780,7 +884,7 @@ async def start_async( Request the start of a validated fine tuning job. - :param job_id: + :param job_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -789,15 +893,15 @@ async def start_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( job_id=job_id, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/fine_tuning/jobs/{job_id}/start", base_url=base_url, @@ -811,34 +915,39 @@ async def start_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - - content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) - + content_type = http_res.headers.get("Content-Type") + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index cefcd63..8b7f1a2 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -1,73 +1,249 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .agentscompletionrequest import AgentsCompletionRequest, AgentsCompletionRequestMessages, AgentsCompletionRequestMessagesTypedDict, AgentsCompletionRequestStop, AgentsCompletionRequestStopTypedDict, AgentsCompletionRequestToolChoice, AgentsCompletionRequestToolChoiceTypedDict, AgentsCompletionRequestTypedDict -from .agentscompletionstreamrequest import AgentsCompletionStreamRequest, AgentsCompletionStreamRequestMessages, AgentsCompletionStreamRequestMessagesTypedDict, AgentsCompletionStreamRequestStop, AgentsCompletionStreamRequestStopTypedDict, AgentsCompletionStreamRequestToolChoice, AgentsCompletionStreamRequestToolChoiceTypedDict, AgentsCompletionStreamRequestTypedDict -from .archiveftmodelout import ArchiveFTModelOut, ArchiveFTModelOutObject, ArchiveFTModelOutTypedDict -from .assistantmessage import AssistantMessage, AssistantMessageRole, AssistantMessageTypedDict +from .agentscompletionrequest import ( + AgentsCompletionRequest, + AgentsCompletionRequestMessages, + AgentsCompletionRequestMessagesTypedDict, + AgentsCompletionRequestStop, + AgentsCompletionRequestStopTypedDict, + AgentsCompletionRequestToolChoice, + AgentsCompletionRequestToolChoiceTypedDict, + AgentsCompletionRequestTypedDict, +) +from .agentscompletionstreamrequest import ( + AgentsCompletionStreamRequest, + AgentsCompletionStreamRequestMessages, + AgentsCompletionStreamRequestMessagesTypedDict, + AgentsCompletionStreamRequestStop, + AgentsCompletionStreamRequestStopTypedDict, + AgentsCompletionStreamRequestToolChoice, + AgentsCompletionStreamRequestToolChoiceTypedDict, + AgentsCompletionStreamRequestTypedDict, +) +from .archiveftmodelout import ( + ArchiveFTModelOut, + ArchiveFTModelOutObject, + ArchiveFTModelOutTypedDict, +) +from .assistantmessage import ( + AssistantMessage, + AssistantMessageRole, + AssistantMessageTypedDict, +) from .basemodelcard import BaseModelCard, BaseModelCardTypedDict -from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict, FinishReason -from .chatcompletionrequest import ChatCompletionRequest, ChatCompletionRequestToolChoice, ChatCompletionRequestToolChoiceTypedDict, ChatCompletionRequestTypedDict, Messages, MessagesTypedDict, Stop, StopTypedDict -from .chatcompletionresponse import ChatCompletionResponse, ChatCompletionResponseTypedDict -from .chatcompletionstreamrequest import ChatCompletionStreamRequest, ChatCompletionStreamRequestMessages, ChatCompletionStreamRequestMessagesTypedDict, ChatCompletionStreamRequestStop, ChatCompletionStreamRequestStopTypedDict, ChatCompletionStreamRequestToolChoice, ChatCompletionStreamRequestToolChoiceTypedDict, ChatCompletionStreamRequestTypedDict +from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceTypedDict, + FinishReason, +) +from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, + Messages, + MessagesTypedDict, + Stop, + StopTypedDict, +) +from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, +) +from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestMessages, + ChatCompletionStreamRequestMessagesTypedDict, + ChatCompletionStreamRequestStop, + ChatCompletionStreamRequestStopTypedDict, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, +) from .checkpointout import CheckpointOut, CheckpointOutTypedDict from .completionchunk import CompletionChunk, CompletionChunkTypedDict from .completionevent import CompletionEvent, CompletionEventTypedDict -from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceFinishReason, CompletionResponseStreamChoiceTypedDict +from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceFinishReason, + CompletionResponseStreamChoiceTypedDict, +) from .contentchunk import ContentChunk, ContentChunkTypedDict -from .delete_model_v1_models_model_id_deleteop import DeleteModelV1ModelsModelIDDeleteRequest, DeleteModelV1ModelsModelIDDeleteRequestTypedDict +from .delete_model_v1_models_model_id_deleteop import ( + DeleteModelV1ModelsModelIDDeleteRequest, + DeleteModelV1ModelsModelIDDeleteRequestTypedDict, +) from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict from .deltamessage import DeltaMessage, DeltaMessageTypedDict -from .detailedjobout import DetailedJobOut, DetailedJobOutIntegrations, DetailedJobOutIntegrationsTypedDict, DetailedJobOutObject, DetailedJobOutRepositories, DetailedJobOutRepositoriesTypedDict, DetailedJobOutStatus, DetailedJobOutTypedDict -from .embeddingrequest import EmbeddingRequest, EmbeddingRequestTypedDict, Inputs, InputsTypedDict +from .detailedjobout import ( + DetailedJobOut, + DetailedJobOutIntegrations, + DetailedJobOutIntegrationsTypedDict, + DetailedJobOutObject, + DetailedJobOutRepositories, + DetailedJobOutRepositoriesTypedDict, + DetailedJobOutStatus, + DetailedJobOutTypedDict, +) +from .embeddingrequest import ( + EmbeddingRequest, + EmbeddingRequestTypedDict, + Inputs, + InputsTypedDict, +) from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict from .eventout import EventOut, EventOutTypedDict -from .files_api_routes_delete_fileop import FilesAPIRoutesDeleteFileRequest, FilesAPIRoutesDeleteFileRequestTypedDict -from .files_api_routes_retrieve_fileop import FilesAPIRoutesRetrieveFileRequest, FilesAPIRoutesRetrieveFileRequestTypedDict -from .files_api_routes_upload_fileop import File, FileTypedDict, FilesAPIRoutesUploadFileMultiPartBodyParams, FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict, FilesAPIRoutesUploadFilePurpose +from .files_api_routes_delete_fileop import ( + FilesAPIRoutesDeleteFileRequest, + FilesAPIRoutesDeleteFileRequestTypedDict, +) +from .files_api_routes_retrieve_fileop import ( + FilesAPIRoutesRetrieveFileRequest, + FilesAPIRoutesRetrieveFileRequestTypedDict, +) +from .files_api_routes_upload_fileop import ( + File, + FileTypedDict, + FilesAPIRoutesUploadFileMultiPartBodyParams, + FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict, + FilesAPIRoutesUploadFilePurpose, +) from .fileschema import FileSchema, FileSchemaPurpose, FileSchemaTypedDict -from .fimcompletionrequest import FIMCompletionRequest, FIMCompletionRequestStop, FIMCompletionRequestStopTypedDict, FIMCompletionRequestTypedDict +from .fimcompletionrequest import ( + FIMCompletionRequest, + FIMCompletionRequestStop, + FIMCompletionRequestStopTypedDict, + FIMCompletionRequestTypedDict, +) from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict -from .fimcompletionstreamrequest import FIMCompletionStreamRequest, FIMCompletionStreamRequestStop, FIMCompletionStreamRequestStopTypedDict, FIMCompletionStreamRequestTypedDict +from .fimcompletionstreamrequest import ( + FIMCompletionStreamRequest, + FIMCompletionStreamRequestStop, + FIMCompletionStreamRequestStopTypedDict, + FIMCompletionStreamRequestTypedDict, +) from .finetuneablemodel import FineTuneableModel -from .ftmodelcapabilitiesout import FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict +from .ftmodelcapabilitiesout import ( + FTModelCapabilitiesOut, + FTModelCapabilitiesOutTypedDict, +) from .ftmodelcard import FTModelCard, FTModelCardTypedDict from .ftmodelout import FTModelOut, FTModelOutObject, FTModelOutTypedDict from .function import Function, FunctionTypedDict -from .functioncall import Arguments, ArgumentsTypedDict, FunctionCall, FunctionCallTypedDict +from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, +) from .functionname import FunctionName, FunctionNameTypedDict -from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInType, GithubRepositoryInTypedDict -from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutType, GithubRepositoryOutTypedDict +from .githubrepositoryin import ( + GithubRepositoryIn, + GithubRepositoryInType, + GithubRepositoryInTypedDict, +) +from .githubrepositoryout import ( + GithubRepositoryOut, + GithubRepositoryOutType, + GithubRepositoryOutTypedDict, +) from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData from .imageurl import ImageURL, ImageURLTypedDict -from .imageurlchunk import ImageURLChunk, ImageURLChunkImageURL, ImageURLChunkImageURLTypedDict, ImageURLChunkType, ImageURLChunkTypedDict -from .jobin import JobIn, JobInIntegrations, JobInIntegrationsTypedDict, JobInRepositories, JobInRepositoriesTypedDict, JobInTypedDict +from .imageurlchunk import ( + ImageURLChunk, + ImageURLChunkImageURL, + ImageURLChunkImageURLTypedDict, + ImageURLChunkType, + ImageURLChunkTypedDict, +) +from .jobin import ( + JobIn, + JobInIntegrations, + JobInIntegrationsTypedDict, + JobInRepositories, + JobInRepositoriesTypedDict, + JobInTypedDict, +) from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .jobout import Integrations, IntegrationsTypedDict, JobOut, JobOutTypedDict, Object, Repositories, RepositoriesTypedDict, Status -from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict -from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import JobsAPIRoutesFineTuningCancelFineTuningJobRequest, JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict -from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import JobsAPIRoutesFineTuningCreateFineTuningJobResponse, JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict -from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import JobsAPIRoutesFineTuningGetFineTuningJobRequest, JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict -from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import JobsAPIRoutesFineTuningGetFineTuningJobsRequest, JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, QueryParamStatus -from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import JobsAPIRoutesFineTuningStartFineTuningJobRequest, JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict -from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict -from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict +from .jobout import ( + Integrations, + IntegrationsTypedDict, + JobOut, + JobOutTypedDict, + Object, + Repositories, + RepositoriesTypedDict, + Status, +) +from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, + JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict, +) +from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningCancelFineTuningJobRequest, + JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict, +) +from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningCreateFineTuningJobResponse, + JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict, +) +from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningGetFineTuningJobRequest, + JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict, +) +from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import ( + JobsAPIRoutesFineTuningGetFineTuningJobsRequest, + JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, + QueryParamStatus, +) +from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningStartFineTuningJobRequest, + JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict, +) +from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, + JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict, +) +from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, + JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict, +) from .jobsout import JobsOut, JobsOutObject, JobsOutTypedDict -from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutObject, LegacyJobMetadataOutTypedDict +from .legacyjobmetadataout import ( + LegacyJobMetadataOut, + LegacyJobMetadataOutObject, + LegacyJobMetadataOutTypedDict, +) from .listfilesout import ListFilesOut, ListFilesOutTypedDict from .metricout import MetricOut, MetricOutTypedDict from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats -from .retrieve_model_v1_models_model_id_getop import RetrieveModelV1ModelsModelIDGetRequest, RetrieveModelV1ModelsModelIDGetRequestTypedDict, RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict -from .retrievefileout import RetrieveFileOut, RetrieveFileOutPurpose, RetrieveFileOutTypedDict +from .retrieve_model_v1_models_model_id_getop import ( + RetrieveModelV1ModelsModelIDGetRequest, + RetrieveModelV1ModelsModelIDGetRequestTypedDict, + RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, + RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict, +) +from .retrievefileout import ( + RetrieveFileOut, + RetrieveFileOutPurpose, + RetrieveFileOutTypedDict, +) from .sampletype import SampleType from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .source import Source -from .systemmessage import Content, ContentTypedDict, Role, SystemMessage, SystemMessageTypedDict +from .systemmessage import ( + Content, + ContentTypedDict, + Role, + SystemMessage, + SystemMessageTypedDict, +) from .textchunk import TextChunk, TextChunkType, TextChunkTypedDict from .tool import Tool, ToolTypedDict from .toolcall import ToolCall, ToolCallTypedDict @@ -78,13 +254,282 @@ from .trainingfile import TrainingFile, TrainingFileTypedDict from .trainingparameters import TrainingParameters, TrainingParametersTypedDict from .trainingparametersin import TrainingParametersIn, TrainingParametersInTypedDict -from .unarchiveftmodelout import UnarchiveFTModelOut, UnarchiveFTModelOutObject, UnarchiveFTModelOutTypedDict +from .unarchiveftmodelout import ( + UnarchiveFTModelOut, + UnarchiveFTModelOutObject, + UnarchiveFTModelOutTypedDict, +) from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict from .uploadfileout import Purpose, UploadFileOut, UploadFileOutTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict -from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict -from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict -from .wandbintegration import WandbIntegration, WandbIntegrationType, WandbIntegrationTypedDict +from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageRole, + UserMessageTypedDict, +) +from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, +) +from .wandbintegration import ( + WandbIntegration, + WandbIntegrationType, + WandbIntegrationTypedDict, +) from .wandbintegrationout import Type, WandbIntegrationOut, WandbIntegrationOutTypedDict -__all__ = ["AgentsCompletionRequest", "AgentsCompletionRequestMessages", "AgentsCompletionRequestMessagesTypedDict", "AgentsCompletionRequestStop", "AgentsCompletionRequestStopTypedDict", "AgentsCompletionRequestToolChoice", "AgentsCompletionRequestToolChoiceTypedDict", "AgentsCompletionRequestTypedDict", "AgentsCompletionStreamRequest", "AgentsCompletionStreamRequestMessages", "AgentsCompletionStreamRequestMessagesTypedDict", "AgentsCompletionStreamRequestStop", "AgentsCompletionStreamRequestStopTypedDict", "AgentsCompletionStreamRequestToolChoice", "AgentsCompletionStreamRequestToolChoiceTypedDict", "AgentsCompletionStreamRequestTypedDict", "ArchiveFTModelOut", "ArchiveFTModelOutObject", "ArchiveFTModelOutTypedDict", "Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "BaseModelCard", "BaseModelCardTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestToolChoice", "ChatCompletionRequestToolChoiceTypedDict", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestMessages", "ChatCompletionStreamRequestMessagesTypedDict", "ChatCompletionStreamRequestStop", "ChatCompletionStreamRequestStopTypedDict", "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestToolChoiceTypedDict", "ChatCompletionStreamRequestTypedDict", "CheckpointOut", "CheckpointOutTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceFinishReason", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "Data", "DataTypedDict", "DeleteFileOut", "DeleteFileOutTypedDict", "DeleteModelOut", "DeleteModelOutTypedDict", "DeleteModelV1ModelsModelIDDeleteRequest", "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "DetailedJobOut", "DetailedJobOutIntegrations", "DetailedJobOutIntegrationsTypedDict", "DetailedJobOutObject", "DetailedJobOutRepositories", "DetailedJobOutRepositoriesTypedDict", "DetailedJobOutStatus", "DetailedJobOutTypedDict", "EmbeddingRequest", "EmbeddingRequestTypedDict", "EmbeddingResponse", "EmbeddingResponseData", "EmbeddingResponseDataTypedDict", "EmbeddingResponseTypedDict", "EventOut", "EventOutTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelCard", "FTModelCardTypedDict", "FTModelOut", "FTModelOutObject", "FTModelOutTypedDict", "File", "FileSchema", "FileSchemaPurpose", "FileSchemaTypedDict", "FileTypedDict", "FilesAPIRoutesDeleteFileRequest", "FilesAPIRoutesDeleteFileRequestTypedDict", "FilesAPIRoutesRetrieveFileRequest", "FilesAPIRoutesRetrieveFileRequestTypedDict", "FilesAPIRoutesUploadFileMultiPartBodyParams", "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", "FilesAPIRoutesUploadFilePurpose", "FineTuneableModel", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionName", "FunctionNameTypedDict", "FunctionTypedDict", "GithubRepositoryIn", "GithubRepositoryInType", "GithubRepositoryInTypedDict", "GithubRepositoryOut", "GithubRepositoryOutType", "GithubRepositoryOutTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "ImageURL", "ImageURLChunk", "ImageURLChunkImageURL", "ImageURLChunkImageURLTypedDict", "ImageURLChunkType", "ImageURLChunkTypedDict", "ImageURLTypedDict", "Inputs", "InputsTypedDict", "Integrations", "IntegrationsTypedDict", "JobIn", "JobInIntegrations", "JobInIntegrationsTypedDict", "JobInRepositories", "JobInRepositoriesTypedDict", "JobInTypedDict", "JobMetadataOut", "JobMetadataOutTypedDict", "JobOut", "JobOutTypedDict", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobRequest", "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", "JobsAPIRoutesFineTuningStartFineTuningJobRequest", "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", "JobsOut", "JobsOutObject", "JobsOutTypedDict", "LegacyJobMetadataOut", "LegacyJobMetadataOutObject", "LegacyJobMetadataOutTypedDict", "ListFilesOut", "ListFilesOutTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "MetricOut", "MetricOutTypedDict", "ModelCapabilities", "ModelCapabilitiesTypedDict", "ModelList", "ModelListTypedDict", "Object", "Purpose", "QueryParamStatus", "Repositories", "RepositoriesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "RetrieveFileOut", "RetrieveFileOutPurpose", "RetrieveFileOutTypedDict", "RetrieveModelV1ModelsModelIDGetRequest", "RetrieveModelV1ModelsModelIDGetRequestTypedDict", "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet", "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict", "Role", "SDKError", "SampleType", "Security", "SecurityTypedDict", "Source", "Status", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkType", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolChoiceEnum", "ToolChoiceTypedDict", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "ToolTypes", "TrainingFile", "TrainingFileTypedDict", "TrainingParameters", "TrainingParametersIn", "TrainingParametersInTypedDict", "TrainingParametersTypedDict", "Type", "UnarchiveFTModelOut", "UnarchiveFTModelOutObject", "UnarchiveFTModelOutTypedDict", "UpdateFTModelIn", "UpdateFTModelInTypedDict", "UploadFileOut", "UploadFileOutTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict", "WandbIntegration", "WandbIntegrationOut", "WandbIntegrationOutTypedDict", "WandbIntegrationType", "WandbIntegrationTypedDict"] +__all__ = [ + "AgentsCompletionRequest", + "AgentsCompletionRequestMessages", + "AgentsCompletionRequestMessagesTypedDict", + "AgentsCompletionRequestStop", + "AgentsCompletionRequestStopTypedDict", + "AgentsCompletionRequestToolChoice", + "AgentsCompletionRequestToolChoiceTypedDict", + "AgentsCompletionRequestTypedDict", + "AgentsCompletionStreamRequest", + "AgentsCompletionStreamRequestMessages", + "AgentsCompletionStreamRequestMessagesTypedDict", + "AgentsCompletionStreamRequestStop", + "AgentsCompletionStreamRequestStopTypedDict", + "AgentsCompletionStreamRequestToolChoice", + "AgentsCompletionStreamRequestToolChoiceTypedDict", + "AgentsCompletionStreamRequestTypedDict", + "ArchiveFTModelOut", + "ArchiveFTModelOutObject", + "ArchiveFTModelOutTypedDict", + "Arguments", + "ArgumentsTypedDict", + "AssistantMessage", + "AssistantMessageRole", + "AssistantMessageTypedDict", + "BaseModelCard", + "BaseModelCardTypedDict", + "ChatCompletionChoice", + "ChatCompletionChoiceTypedDict", + "ChatCompletionRequest", + "ChatCompletionRequestToolChoice", + "ChatCompletionRequestToolChoiceTypedDict", + "ChatCompletionRequestTypedDict", + "ChatCompletionResponse", + "ChatCompletionResponseTypedDict", + "ChatCompletionStreamRequest", + "ChatCompletionStreamRequestMessages", + "ChatCompletionStreamRequestMessagesTypedDict", + "ChatCompletionStreamRequestStop", + "ChatCompletionStreamRequestStopTypedDict", + "ChatCompletionStreamRequestToolChoice", + "ChatCompletionStreamRequestToolChoiceTypedDict", + "ChatCompletionStreamRequestTypedDict", + "CheckpointOut", + "CheckpointOutTypedDict", + "CompletionChunk", + "CompletionChunkTypedDict", + "CompletionEvent", + "CompletionEventTypedDict", + "CompletionResponseStreamChoice", + "CompletionResponseStreamChoiceFinishReason", + "CompletionResponseStreamChoiceTypedDict", + "Content", + "ContentChunk", + "ContentChunkTypedDict", + "ContentTypedDict", + "Data", + "DataTypedDict", + "DeleteFileOut", + "DeleteFileOutTypedDict", + "DeleteModelOut", + "DeleteModelOutTypedDict", + "DeleteModelV1ModelsModelIDDeleteRequest", + "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", + "DeltaMessage", + "DeltaMessageTypedDict", + "DetailedJobOut", + "DetailedJobOutIntegrations", + "DetailedJobOutIntegrationsTypedDict", + "DetailedJobOutObject", + "DetailedJobOutRepositories", + "DetailedJobOutRepositoriesTypedDict", + "DetailedJobOutStatus", + "DetailedJobOutTypedDict", + "EmbeddingRequest", + "EmbeddingRequestTypedDict", + "EmbeddingResponse", + "EmbeddingResponseData", + "EmbeddingResponseDataTypedDict", + "EmbeddingResponseTypedDict", + "EventOut", + "EventOutTypedDict", + "FIMCompletionRequest", + "FIMCompletionRequestStop", + "FIMCompletionRequestStopTypedDict", + "FIMCompletionRequestTypedDict", + "FIMCompletionResponse", + "FIMCompletionResponseTypedDict", + "FIMCompletionStreamRequest", + "FIMCompletionStreamRequestStop", + "FIMCompletionStreamRequestStopTypedDict", + "FIMCompletionStreamRequestTypedDict", + "FTModelCapabilitiesOut", + "FTModelCapabilitiesOutTypedDict", + "FTModelCard", + "FTModelCardTypedDict", + "FTModelOut", + "FTModelOutObject", + "FTModelOutTypedDict", + "File", + "FileSchema", + "FileSchemaPurpose", + "FileSchemaTypedDict", + "FileTypedDict", + "FilesAPIRoutesDeleteFileRequest", + "FilesAPIRoutesDeleteFileRequestTypedDict", + "FilesAPIRoutesRetrieveFileRequest", + "FilesAPIRoutesRetrieveFileRequestTypedDict", + "FilesAPIRoutesUploadFileMultiPartBodyParams", + "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", + "FilesAPIRoutesUploadFilePurpose", + "FineTuneableModel", + "FinishReason", + "Function", + "FunctionCall", + "FunctionCallTypedDict", + "FunctionName", + "FunctionNameTypedDict", + "FunctionTypedDict", + "GithubRepositoryIn", + "GithubRepositoryInType", + "GithubRepositoryInTypedDict", + "GithubRepositoryOut", + "GithubRepositoryOutType", + "GithubRepositoryOutTypedDict", + "HTTPValidationError", + "HTTPValidationErrorData", + "ImageURL", + "ImageURLChunk", + "ImageURLChunkImageURL", + "ImageURLChunkImageURLTypedDict", + "ImageURLChunkType", + "ImageURLChunkTypedDict", + "ImageURLTypedDict", + "Inputs", + "InputsTypedDict", + "Integrations", + "IntegrationsTypedDict", + "JobIn", + "JobInIntegrations", + "JobInIntegrationsTypedDict", + "JobInRepositories", + "JobInRepositoriesTypedDict", + "JobInTypedDict", + "JobMetadataOut", + "JobMetadataOutTypedDict", + "JobOut", + "JobOutTypedDict", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobRequest", + "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", + "JobsAPIRoutesFineTuningStartFineTuningJobRequest", + "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", + "JobsOut", + "JobsOutObject", + "JobsOutTypedDict", + "LegacyJobMetadataOut", + "LegacyJobMetadataOutObject", + "LegacyJobMetadataOutTypedDict", + "ListFilesOut", + "ListFilesOutTypedDict", + "Loc", + "LocTypedDict", + "Messages", + "MessagesTypedDict", + "MetricOut", + "MetricOutTypedDict", + "ModelCapabilities", + "ModelCapabilitiesTypedDict", + "ModelList", + "ModelListTypedDict", + "Object", + "Purpose", + "QueryParamStatus", + "Repositories", + "RepositoriesTypedDict", + "ResponseFormat", + "ResponseFormatTypedDict", + "ResponseFormats", + "RetrieveFileOut", + "RetrieveFileOutPurpose", + "RetrieveFileOutTypedDict", + "RetrieveModelV1ModelsModelIDGetRequest", + "RetrieveModelV1ModelsModelIDGetRequestTypedDict", + "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet", + "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict", + "Role", + "SDKError", + "SampleType", + "Security", + "SecurityTypedDict", + "Source", + "Status", + "Stop", + "StopTypedDict", + "SystemMessage", + "SystemMessageTypedDict", + "TextChunk", + "TextChunkType", + "TextChunkTypedDict", + "Tool", + "ToolCall", + "ToolCallTypedDict", + "ToolChoice", + "ToolChoiceEnum", + "ToolChoiceTypedDict", + "ToolMessage", + "ToolMessageRole", + "ToolMessageTypedDict", + "ToolTypedDict", + "ToolTypes", + "TrainingFile", + "TrainingFileTypedDict", + "TrainingParameters", + "TrainingParametersIn", + "TrainingParametersInTypedDict", + "TrainingParametersTypedDict", + "Type", + "UnarchiveFTModelOut", + "UnarchiveFTModelOutObject", + "UnarchiveFTModelOutTypedDict", + "UpdateFTModelIn", + "UpdateFTModelInTypedDict", + "UploadFileOut", + "UploadFileOutTypedDict", + "UsageInfo", + "UsageInfoTypedDict", + "UserMessage", + "UserMessageContent", + "UserMessageContentTypedDict", + "UserMessageRole", + "UserMessageTypedDict", + "ValidationError", + "ValidationErrorTypedDict", + "WandbIntegration", + "WandbIntegrationOut", + "WandbIntegrationOutTypedDict", + "WandbIntegrationType", + "WandbIntegrationTypedDict", +] diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py index a5e33bd..1f0523a 100644 --- a/src/mistralai/models/agentscompletionrequest.py +++ b/src/mistralai/models/agentscompletionrequest.py @@ -23,10 +23,19 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -AgentsCompletionRequestMessagesTypedDict = Union[UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] +AgentsCompletionRequestMessagesTypedDict = Union[ + UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict +] -AgentsCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] +AgentsCompletionRequestMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] AgentsCompletionRequestToolChoiceTypedDict = Union[ToolChoiceTypedDict, ToolChoiceEnum] @@ -53,30 +62,48 @@ class AgentsCompletionRequestTypedDict(TypedDict): response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[AgentsCompletionRequestToolChoiceTypedDict] - + class AgentsCompletionRequest(BaseModel): messages: List[AgentsCompletionRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + agent_id: str r"""The ID of the agent to use for this completion.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[AgentsCompletionRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[AgentsCompletionRequestToolChoice] = None - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"] + optional_fields = [ + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "response_format", + "tools", + "tool_choice", + ] nullable_fields = ["max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] @@ -87,9 +114,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -99,4 +130,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py index 19ee472..57d1177 100644 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -23,13 +23,24 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -AgentsCompletionStreamRequestMessagesTypedDict = Union[UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] +AgentsCompletionStreamRequestMessagesTypedDict = Union[ + UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict +] -AgentsCompletionStreamRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] +AgentsCompletionStreamRequestMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] -AgentsCompletionStreamRequestToolChoiceTypedDict = Union[ToolChoiceTypedDict, ToolChoiceEnum] +AgentsCompletionStreamRequestToolChoiceTypedDict = Union[ + ToolChoiceTypedDict, ToolChoiceEnum +] AgentsCompletionStreamRequestToolChoice = Union[ToolChoice, ToolChoiceEnum] @@ -52,29 +63,47 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[AgentsCompletionStreamRequestToolChoiceTypedDict] - + class AgentsCompletionStreamRequest(BaseModel): messages: List[AgentsCompletionStreamRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + agent_id: str r"""The ID of the agent to use for this completion.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[AgentsCompletionStreamRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[AgentsCompletionStreamRequestToolChoice] = None - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"] + optional_fields = [ + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "response_format", + "tools", + "tool_choice", + ] nullable_fields = ["max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] @@ -85,9 +114,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -97,4 +130,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/archiveftmodelout.py b/src/mistralai/models/archiveftmodelout.py index be2e904..eeffa5d 100644 --- a/src/mistralai/models/archiveftmodelout.py +++ b/src/mistralai/models/archiveftmodelout.py @@ -9,13 +9,17 @@ ArchiveFTModelOutObject = Literal["model"] + class ArchiveFTModelOutTypedDict(TypedDict): id: str archived: NotRequired[bool] - + class ArchiveFTModelOut(BaseModel): id: str + + # fmt: off OBJECT: Annotated[Final[Optional[ArchiveFTModelOutObject]], pydantic.Field(alias="object")] = "model" # type: ignore + # fmt: on + archived: Optional[bool] = True - diff --git a/src/mistralai/models/assistantmessage.py b/src/mistralai/models/assistantmessage.py index ff98f17..812685d 100644 --- a/src/mistralai/models/assistantmessage.py +++ b/src/mistralai/models/assistantmessage.py @@ -11,20 +11,26 @@ AssistantMessageRole = Literal["assistant"] + class AssistantMessageTypedDict(TypedDict): content: NotRequired[Nullable[str]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - + class AssistantMessage(BaseModel): + # fmt: off ROLE: Annotated[Final[Optional[AssistantMessageRole]], pydantic.Field(alias="role")] = "assistant" # type: ignore + # fmt: on + content: OptionalNullable[str] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + prefix: Optional[bool] = False r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "content", "tool_calls", "prefix"] @@ -38,9 +44,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -50,4 +60,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/basemodelcard.py b/src/mistralai/models/basemodelcard.py index d215184..85af1f1 100644 --- a/src/mistralai/models/basemodelcard.py +++ b/src/mistralai/models/basemodelcard.py @@ -21,24 +21,46 @@ class BaseModelCardTypedDict(TypedDict): max_context_length: NotRequired[int] aliases: NotRequired[List[str]] deprecation: NotRequired[Nullable[datetime]] - + class BaseModelCard(BaseModel): id: str + capabilities: ModelCapabilities + object: Optional[str] = "model" + created: Optional[int] = None + owned_by: Optional[str] = "mistralai" + name: OptionalNullable[str] = UNSET + description: OptionalNullable[str] = UNSET + max_context_length: Optional[int] = 32768 + aliases: Optional[List[str]] = None + deprecation: OptionalNullable[datetime] = UNSET + + # fmt: off TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "base" # type: ignore - + # fmt: on + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["object", "created", "owned_by", "name", "description", "max_context_length", "aliases", "deprecation", "type"] + optional_fields = [ + "object", + "created", + "owned_by", + "name", + "description", + "max_context_length", + "aliases", + "deprecation", + "type", + ] nullable_fields = ["name", "description", "deprecation"] null_default_fields = [] @@ -49,9 +71,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -61,4 +87,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/chatcompletionchoice.py b/src/mistralai/models/chatcompletionchoice.py index 748dbc1..20d674b 100644 --- a/src/mistralai/models/chatcompletionchoice.py +++ b/src/mistralai/models/chatcompletionchoice.py @@ -8,14 +8,16 @@ FinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"] + class ChatCompletionChoiceTypedDict(TypedDict): index: int message: AssistantMessageTypedDict finish_reason: FinishReason - + class ChatCompletionChoice(BaseModel): index: int + message: AssistantMessage + finish_reason: FinishReason - diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index 94e3559..7872216 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -24,10 +24,23 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] +MessagesTypedDict = Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, +] -Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] +Messages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] ChatCompletionRequestToolChoiceTypedDict = Union[ToolChoiceTypedDict, ToolChoiceEnum] @@ -60,36 +73,60 @@ class ChatCompletionRequestTypedDict(TypedDict): tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" - + class ChatCompletionRequest(BaseModel): model: Nullable[str] r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[Messages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: Optional[float] = 0.7 r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[Stop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[ChatCompletionRequestToolChoice] = None + safe_prompt: Optional[bool] = False r"""Whether to inject a safety prompt before all conversations.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", "safe_prompt"] + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "response_format", + "tools", + "tool_choice", + "safe_prompt", + ] nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] @@ -100,9 +137,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -112,4 +153,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/chatcompletionresponse.py b/src/mistralai/models/chatcompletionresponse.py index dacb0ac..20c9010 100644 --- a/src/mistralai/models/chatcompletionresponse.py +++ b/src/mistralai/models/chatcompletionresponse.py @@ -15,13 +15,17 @@ class ChatCompletionResponseTypedDict(TypedDict): usage: UsageInfoTypedDict created: NotRequired[int] choices: NotRequired[List[ChatCompletionChoiceTypedDict]] - + class ChatCompletionResponse(BaseModel): id: str + object: str + model: str + usage: UsageInfo + created: Optional[int] = None + choices: Optional[List[ChatCompletionChoice]] = None - diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index 5bf1e0f..ccba04a 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -24,13 +24,28 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -ChatCompletionStreamRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] +ChatCompletionStreamRequestMessagesTypedDict = Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, +] -ChatCompletionStreamRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] +ChatCompletionStreamRequestMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] -ChatCompletionStreamRequestToolChoiceTypedDict = Union[ToolChoiceTypedDict, ToolChoiceEnum] +ChatCompletionStreamRequestToolChoiceTypedDict = Union[ + ToolChoiceTypedDict, ToolChoiceEnum +] ChatCompletionStreamRequestToolChoice = Union[ToolChoice, ToolChoiceEnum] @@ -59,35 +74,59 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" - + class ChatCompletionStreamRequest(BaseModel): model: Nullable[str] r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionStreamRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: Optional[float] = 0.7 r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[ChatCompletionStreamRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + safe_prompt: Optional[bool] = False r"""Whether to inject a safety prompt before all conversations.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", "safe_prompt"] + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "response_format", + "tools", + "tool_choice", + "safe_prompt", + ] nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] @@ -98,9 +137,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -110,4 +153,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/checkpointout.py b/src/mistralai/models/checkpointout.py index 108356c..f818dae 100644 --- a/src/mistralai/models/checkpointout.py +++ b/src/mistralai/models/checkpointout.py @@ -13,13 +13,14 @@ class CheckpointOutTypedDict(TypedDict): r"""The step number that the checkpoint was created at.""" created_at: int r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" - + class CheckpointOut(BaseModel): metrics: MetricOut r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + step_number: int r"""The step number that the checkpoint was created at.""" + created_at: int r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" - diff --git a/src/mistralai/models/completionchunk.py b/src/mistralai/models/completionchunk.py index f3a12c1..8859d22 100644 --- a/src/mistralai/models/completionchunk.py +++ b/src/mistralai/models/completionchunk.py @@ -1,7 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict +from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, +) from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai.types import BaseModel from typing import List, Optional, TypedDict @@ -15,13 +18,17 @@ class CompletionChunkTypedDict(TypedDict): object: NotRequired[str] created: NotRequired[int] usage: NotRequired[UsageInfoTypedDict] - + class CompletionChunk(BaseModel): id: str + model: str + choices: List[CompletionResponseStreamChoice] + object: Optional[str] = None + created: Optional[int] = None + usage: Optional[UsageInfo] = None - diff --git a/src/mistralai/models/completionevent.py b/src/mistralai/models/completionevent.py index 9b75f73..b0286fd 100644 --- a/src/mistralai/models/completionevent.py +++ b/src/mistralai/models/completionevent.py @@ -8,8 +8,7 @@ class CompletionEventTypedDict(TypedDict): data: CompletionChunkTypedDict - + class CompletionEvent(BaseModel): data: CompletionChunk - diff --git a/src/mistralai/models/completionresponsestreamchoice.py b/src/mistralai/models/completionresponsestreamchoice.py index bd3cf9b..227a2f7 100644 --- a/src/mistralai/models/completionresponsestreamchoice.py +++ b/src/mistralai/models/completionresponsestreamchoice.py @@ -7,19 +7,24 @@ from typing import Literal, TypedDict -CompletionResponseStreamChoiceFinishReason = Literal["stop", "length", "error", "tool_calls"] +CompletionResponseStreamChoiceFinishReason = Literal[ + "stop", "length", "error", "tool_calls" +] + class CompletionResponseStreamChoiceTypedDict(TypedDict): index: int delta: DeltaMessageTypedDict finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] - + class CompletionResponseStreamChoice(BaseModel): index: int + delta: DeltaMessage + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [] @@ -33,9 +38,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -45,4 +54,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/contentchunk.py b/src/mistralai/models/contentchunk.py index 2ce6ee0..9b9db09 100644 --- a/src/mistralai/models/contentchunk.py +++ b/src/mistralai/models/contentchunk.py @@ -12,5 +12,9 @@ ContentChunkTypedDict = Union[TextChunkTypedDict, ImageURLChunkTypedDict] -ContentChunk = Annotated[Union[Annotated[ImageURLChunk, Tag("image_url")], Annotated[TextChunk, Tag("text")]], Discriminator(lambda m: get_discriminator(m, "type", "type"))] - +ContentChunk = Annotated[ + Union[ + Annotated[ImageURLChunk, Tag("image_url")], Annotated[TextChunk, Tag("text")] + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] diff --git a/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py b/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py index 8935acb..2093245 100644 --- a/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py +++ b/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py @@ -10,9 +10,10 @@ class DeleteModelV1ModelsModelIDDeleteRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to delete.""" - + class DeleteModelV1ModelsModelIDDeleteRequest(BaseModel): - model_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] r"""The ID of the model to delete.""" - diff --git a/src/mistralai/models/deletefileout.py b/src/mistralai/models/deletefileout.py index 90c60ff..dc1a87f 100644 --- a/src/mistralai/models/deletefileout.py +++ b/src/mistralai/models/deletefileout.py @@ -12,13 +12,14 @@ class DeleteFileOutTypedDict(TypedDict): r"""The object type that was deleted""" deleted: bool r"""The deletion status.""" - + class DeleteFileOut(BaseModel): id: str r"""The ID of the deleted file.""" + object: str r"""The object type that was deleted""" + deleted: bool r"""The deletion status.""" - diff --git a/src/mistralai/models/deletemodelout.py b/src/mistralai/models/deletemodelout.py index bab96e0..96dbeb1 100644 --- a/src/mistralai/models/deletemodelout.py +++ b/src/mistralai/models/deletemodelout.py @@ -13,13 +13,14 @@ class DeleteModelOutTypedDict(TypedDict): r"""The object type that was deleted""" deleted: NotRequired[bool] r"""The deletion status""" - + class DeleteModelOut(BaseModel): id: str r"""The ID of the deleted model.""" + object: Optional[str] = "model" r"""The object type that was deleted""" + deleted: Optional[bool] = True r"""The deletion status""" - diff --git a/src/mistralai/models/deltamessage.py b/src/mistralai/models/deltamessage.py index 97bef0e..7b7fe79 100644 --- a/src/mistralai/models/deltamessage.py +++ b/src/mistralai/models/deltamessage.py @@ -12,13 +12,15 @@ class DeltaMessageTypedDict(TypedDict): role: NotRequired[str] content: NotRequired[Nullable[str]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] - + class DeltaMessage(BaseModel): role: Optional[str] = None + content: OptionalNullable[str] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "content", "tool_calls"] @@ -32,9 +34,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -44,4 +50,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/detailedjobout.py b/src/mistralai/models/detailedjobout.py index b33b6e3..336190c 100644 --- a/src/mistralai/models/detailedjobout.py +++ b/src/mistralai/models/detailedjobout.py @@ -15,7 +15,18 @@ from typing_extensions import Annotated, NotRequired -DetailedJobOutStatus = Literal["QUEUED", "STARTED", "VALIDATING", "VALIDATED", "RUNNING", "FAILED_VALIDATION", "FAILED", "SUCCESS", "CANCELLED", "CANCELLATION_REQUESTED"] +DetailedJobOutStatus = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] DetailedJobOutObject = Literal["job"] @@ -52,35 +63,73 @@ class DetailedJobOutTypedDict(TypedDict): events: NotRequired[List[EventOutTypedDict]] r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" checkpoints: NotRequired[List[CheckpointOutTypedDict]] - + class DetailedJobOut(BaseModel): id: str + auto_start: bool + hyperparameters: TrainingParameters + model: FineTuneableModel r"""The name of the model to fine-tune.""" + status: DetailedJobOutStatus + job_type: str + created_at: int + modified_at: int + training_files: List[str] + validation_files: OptionalNullable[List[str]] = UNSET + + # fmt: off OBJECT: Annotated[Final[Optional[DetailedJobOutObject]], pydantic.Field(alias="object")] = "job" # type: ignore + # fmt: on + fine_tuned_model: OptionalNullable[str] = UNSET + suffix: OptionalNullable[str] = UNSET + integrations: OptionalNullable[List[DetailedJobOutIntegrations]] = UNSET + trained_tokens: OptionalNullable[int] = UNSET + repositories: Optional[List[DetailedJobOutRepositories]] = None + metadata: OptionalNullable[JobMetadataOut] = UNSET + events: Optional[List[EventOut]] = None r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + checkpoints: Optional[List[CheckpointOut]] = None - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["validation_files", "object", "fine_tuned_model", "suffix", "integrations", "trained_tokens", "repositories", "metadata", "events", "checkpoints"] - nullable_fields = ["validation_files", "fine_tuned_model", "suffix", "integrations", "trained_tokens", "metadata"] + optional_fields = [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "repositories", + "metadata", + "events", + "checkpoints", + ] + nullable_fields = [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] null_default_fields = [] serialized = handler(self) @@ -90,9 +139,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -102,4 +155,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index 6d70cc8..5655472 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -23,16 +23,18 @@ class EmbeddingRequestTypedDict(TypedDict): r"""ID of the model to use.""" encoding_format: NotRequired[Nullable[str]] r"""The format to return the embeddings in.""" - + class EmbeddingRequest(BaseModel): inputs: Annotated[Inputs, pydantic.Field(alias="input")] r"""Text to embed.""" + model: str r"""ID of the model to use.""" + encoding_format: OptionalNullable[str] = UNSET r"""The format to return the embeddings in.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["encoding_format"] @@ -46,9 +48,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -58,4 +64,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/embeddingresponse.py b/src/mistralai/models/embeddingresponse.py index 040c42d..d85ceec 100644 --- a/src/mistralai/models/embeddingresponse.py +++ b/src/mistralai/models/embeddingresponse.py @@ -13,12 +13,15 @@ class EmbeddingResponseTypedDict(TypedDict): model: str usage: UsageInfoTypedDict data: List[EmbeddingResponseDataTypedDict] - + class EmbeddingResponse(BaseModel): id: str + object: str + model: str + usage: UsageInfo + data: List[EmbeddingResponseData] - diff --git a/src/mistralai/models/embeddingresponsedata.py b/src/mistralai/models/embeddingresponsedata.py index 07a061b..f37995e 100644 --- a/src/mistralai/models/embeddingresponsedata.py +++ b/src/mistralai/models/embeddingresponsedata.py @@ -10,10 +10,11 @@ class EmbeddingResponseDataTypedDict(TypedDict): object: NotRequired[str] embedding: NotRequired[List[float]] index: NotRequired[int] - + class EmbeddingResponseData(BaseModel): object: Optional[str] = None + embedding: Optional[List[float]] = None + index: Optional[int] = None - diff --git a/src/mistralai/models/eventout.py b/src/mistralai/models/eventout.py index d522abe..fa427f1 100644 --- a/src/mistralai/models/eventout.py +++ b/src/mistralai/models/eventout.py @@ -13,15 +13,17 @@ class EventOutTypedDict(TypedDict): created_at: int r"""The UNIX timestamp (in seconds) of the event.""" data: NotRequired[Nullable[Dict[str, Any]]] - + class EventOut(BaseModel): name: str r"""The name of the event.""" + created_at: int r"""The UNIX timestamp (in seconds) of the event.""" + data: OptionalNullable[Dict[str, Any]] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["data"] @@ -35,9 +37,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -47,4 +53,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/files_api_routes_delete_fileop.py b/src/mistralai/models/files_api_routes_delete_fileop.py index 8571483..def6791 100644 --- a/src/mistralai/models/files_api_routes_delete_fileop.py +++ b/src/mistralai/models/files_api_routes_delete_fileop.py @@ -9,8 +9,9 @@ class FilesAPIRoutesDeleteFileRequestTypedDict(TypedDict): file_id: str - + class FilesAPIRoutesDeleteFileRequest(BaseModel): - file_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] - + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/files_api_routes_retrieve_fileop.py b/src/mistralai/models/files_api_routes_retrieve_fileop.py index 76063be..bfbad27 100644 --- a/src/mistralai/models/files_api_routes_retrieve_fileop.py +++ b/src/mistralai/models/files_api_routes_retrieve_fileop.py @@ -9,8 +9,9 @@ class FilesAPIRoutesRetrieveFileRequestTypedDict(TypedDict): file_id: str - + class FilesAPIRoutesRetrieveFileRequest(BaseModel): - file_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] - + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/files_api_routes_upload_fileop.py b/src/mistralai/models/files_api_routes_upload_fileop.py index 74720d6..8eae7af 100644 --- a/src/mistralai/models/files_api_routes_upload_fileop.py +++ b/src/mistralai/models/files_api_routes_upload_fileop.py @@ -12,17 +12,30 @@ FilesAPIRoutesUploadFilePurpose = Union[Literal["fine-tune"], UnrecognizedStr] + class FileTypedDict(TypedDict): file_name: str content: Union[bytes, IO[bytes], io.BufferedReader] content_type: NotRequired[str] - + class File(BaseModel): - file_name: Annotated[str, pydantic.Field(alias="file"), FieldMetadata(multipart=True)] - content: Annotated[Union[bytes, IO[bytes], io.BufferedReader], pydantic.Field(alias=""), FieldMetadata(multipart=MultipartFormMetadata(content=True))] - content_type: Annotated[Optional[str], pydantic.Field(alias="Content-Type"), FieldMetadata(multipart=True)] = None - + file_name: Annotated[ + str, pydantic.Field(alias="file"), FieldMetadata(multipart=True) + ] + + content: Annotated[ + Union[bytes, IO[bytes], io.BufferedReader], + pydantic.Field(alias=""), + FieldMetadata(multipart=MultipartFormMetadata(content=True)), + ] + + content_type: Annotated[ + Optional[str], + pydantic.Field(alias="Content-Type"), + FieldMetadata(multipart=True), + ] = None + class FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict(TypedDict): file: FileTypedDict @@ -36,10 +49,14 @@ class FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict(TypedDict): file=@path/to/your/file.jsonl ``` """ - + class FilesAPIRoutesUploadFileMultiPartBodyParams(BaseModel): - file: Annotated[File, pydantic.Field(alias=""), FieldMetadata(multipart=MultipartFormMetadata(file=True))] + file: Annotated[ + File, + pydantic.Field(alias=""), + FieldMetadata(multipart=MultipartFormMetadata(file=True)), + ] r"""The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash @@ -50,5 +67,7 @@ class FilesAPIRoutesUploadFileMultiPartBodyParams(BaseModel): file=@path/to/your/file.jsonl ``` """ + + # fmt: off PURPOSE: Annotated[Final[Annotated[Optional[FilesAPIRoutesUploadFilePurpose], PlainValidator(validate_open_enum(False))]], pydantic.Field(alias="purpose"), FieldMetadata(multipart=True)] = "fine-tune" # type: ignore - + # fmt: on diff --git a/src/mistralai/models/fileschema.py b/src/mistralai/models/fileschema.py index b852dcb..1ace0fa 100644 --- a/src/mistralai/models/fileschema.py +++ b/src/mistralai/models/fileschema.py @@ -3,7 +3,14 @@ from __future__ import annotations from .sampletype import SampleType from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, UnrecognizedStr +from mistralai.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) from mistralai.utils import validate_open_enum import pydantic from pydantic import model_serializer @@ -15,6 +22,7 @@ FileSchemaPurpose = Union[Literal["fine-tune"], UnrecognizedStr] r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + class FileSchemaTypedDict(TypedDict): id: str r"""The unique identifier of the file.""" @@ -29,25 +37,35 @@ class FileSchemaTypedDict(TypedDict): sample_type: SampleType source: Source num_lines: NotRequired[Nullable[int]] - + class FileSchema(BaseModel): id: str r"""The unique identifier of the file.""" + object: str r"""The object type, which is always \"file\".""" + bytes: int r"""The size of the file, in bytes.""" + created_at: int r"""The UNIX timestamp (in seconds) of the event.""" + filename: str r"""The name of the uploaded file.""" + sample_type: SampleType + source: Source + + # fmt: off PURPOSE: Annotated[Final[Annotated[FileSchemaPurpose, PlainValidator(validate_open_enum(False))]], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore + # fmt: on r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + num_lines: OptionalNullable[int] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["num_lines"] @@ -61,9 +79,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -73,4 +95,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/fimcompletionrequest.py b/src/mistralai/models/fimcompletionrequest.py index 7e04168..4f00d3d 100644 --- a/src/mistralai/models/fimcompletionrequest.py +++ b/src/mistralai/models/fimcompletionrequest.py @@ -39,7 +39,7 @@ class FIMCompletionRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - + class FIMCompletionRequest(BaseModel): model: Nullable[str] @@ -47,28 +47,46 @@ class FIMCompletionRequest(BaseModel): - `codestral-2405` - `codestral-latest` """ + prompt: str r"""The text/code to complete.""" + temperature: Optional[float] = 0.7 r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[FIMCompletionRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"] + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "suffix", + ] nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] null_default_fields = [] @@ -79,9 +97,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -91,4 +113,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/fimcompletionresponse.py b/src/mistralai/models/fimcompletionresponse.py index f359adb..d9e11df 100644 --- a/src/mistralai/models/fimcompletionresponse.py +++ b/src/mistralai/models/fimcompletionresponse.py @@ -15,13 +15,17 @@ class FIMCompletionResponseTypedDict(TypedDict): usage: UsageInfoTypedDict created: NotRequired[int] choices: NotRequired[List[ChatCompletionChoiceTypedDict]] - + class FIMCompletionResponse(BaseModel): id: str + object: str + model: str + usage: UsageInfo + created: Optional[int] = None + choices: Optional[List[ChatCompletionChoice]] = None - diff --git a/src/mistralai/models/fimcompletionstreamrequest.py b/src/mistralai/models/fimcompletionstreamrequest.py index 6d01053..708542d 100644 --- a/src/mistralai/models/fimcompletionstreamrequest.py +++ b/src/mistralai/models/fimcompletionstreamrequest.py @@ -38,7 +38,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - + class FIMCompletionStreamRequest(BaseModel): model: Nullable[str] @@ -46,27 +46,45 @@ class FIMCompletionStreamRequest(BaseModel): - `codestral-2405` - `codestral-latest` """ + prompt: str r"""The text/code to complete.""" + temperature: Optional[float] = 0.7 r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[FIMCompletionStreamRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"] + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "suffix", + ] nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] null_default_fields = [] @@ -77,9 +95,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -89,4 +111,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/finetuneablemodel.py b/src/mistralai/models/finetuneablemodel.py index 22c8e4c..947991c 100644 --- a/src/mistralai/models/finetuneablemodel.py +++ b/src/mistralai/models/finetuneablemodel.py @@ -4,5 +4,11 @@ from typing import Literal -FineTuneableModel = Literal["open-mistral-7b", "mistral-small-latest", "codestral-latest", "mistral-large-latest", "open-mistral-nemo"] +FineTuneableModel = Literal[ + "open-mistral-7b", + "mistral-small-latest", + "codestral-latest", + "mistral-large-latest", + "open-mistral-nemo", +] r"""The name of the model to fine-tune.""" diff --git a/src/mistralai/models/ftmodelcapabilitiesout.py b/src/mistralai/models/ftmodelcapabilitiesout.py index ab76af3..fe66d30 100644 --- a/src/mistralai/models/ftmodelcapabilitiesout.py +++ b/src/mistralai/models/ftmodelcapabilitiesout.py @@ -11,11 +11,13 @@ class FTModelCapabilitiesOutTypedDict(TypedDict): completion_fim: NotRequired[bool] function_calling: NotRequired[bool] fine_tuning: NotRequired[bool] - + class FTModelCapabilitiesOut(BaseModel): completion_chat: Optional[bool] = True + completion_fim: Optional[bool] = False + function_calling: Optional[bool] = False + fine_tuning: Optional[bool] = False - diff --git a/src/mistralai/models/ftmodelcard.py b/src/mistralai/models/ftmodelcard.py index a76b4a6..b282a09 100644 --- a/src/mistralai/models/ftmodelcard.py +++ b/src/mistralai/models/ftmodelcard.py @@ -12,7 +12,7 @@ class FTModelCardTypedDict(TypedDict): r"""Extra fields for fine-tuned models.""" - + id: str capabilities: ModelCapabilitiesTypedDict job: str @@ -26,29 +26,55 @@ class FTModelCardTypedDict(TypedDict): aliases: NotRequired[List[str]] deprecation: NotRequired[Nullable[datetime]] archived: NotRequired[bool] - + class FTModelCard(BaseModel): r"""Extra fields for fine-tuned models.""" - + id: str + capabilities: ModelCapabilities + job: str + root: str + object: Optional[str] = "model" + created: Optional[int] = None + owned_by: Optional[str] = "mistralai" + name: OptionalNullable[str] = UNSET + description: OptionalNullable[str] = UNSET + max_context_length: Optional[int] = 32768 + aliases: Optional[List[str]] = None + deprecation: OptionalNullable[datetime] = UNSET + + # fmt: off TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "fine-tuned" # type: ignore + # fmt: on + archived: Optional[bool] = False - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["object", "created", "owned_by", "name", "description", "max_context_length", "aliases", "deprecation", "type", "archived"] + optional_fields = [ + "object", + "created", + "owned_by", + "name", + "description", + "max_context_length", + "aliases", + "deprecation", + "type", + "archived", + ] nullable_fields = ["name", "description", "deprecation"] null_default_fields = [] @@ -59,9 +85,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -71,4 +101,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/ftmodelout.py b/src/mistralai/models/ftmodelout.py index 6f99bcb..664dd5d 100644 --- a/src/mistralai/models/ftmodelout.py +++ b/src/mistralai/models/ftmodelout.py @@ -1,7 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .ftmodelcapabilitiesout import FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict +from .ftmodelcapabilitiesout import ( + FTModelCapabilitiesOut, + FTModelCapabilitiesOutTypedDict, +) from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL import pydantic from pydantic import model_serializer @@ -11,6 +14,7 @@ FTModelOutObject = Literal["model"] + class FTModelOutTypedDict(TypedDict): id: str created: int @@ -23,25 +27,44 @@ class FTModelOutTypedDict(TypedDict): description: NotRequired[Nullable[str]] max_context_length: NotRequired[int] aliases: NotRequired[List[str]] - + class FTModelOut(BaseModel): id: str + created: int + owned_by: str + root: str + archived: bool + capabilities: FTModelCapabilitiesOut + job: str + + # fmt: off OBJECT: Annotated[Final[Optional[FTModelOutObject]], pydantic.Field(alias="object")] = "model" # type: ignore + # fmt: on + name: OptionalNullable[str] = UNSET + description: OptionalNullable[str] = UNSET + max_context_length: Optional[int] = 32768 + aliases: Optional[List[str]] = None - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["object", "name", "description", "max_context_length", "aliases"] + optional_fields = [ + "object", + "name", + "description", + "max_context_length", + "aliases", + ] nullable_fields = ["name", "description"] null_default_fields = [] @@ -52,9 +75,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -64,4 +91,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/function.py b/src/mistralai/models/function.py index 78eb259..a872eea 100644 --- a/src/mistralai/models/function.py +++ b/src/mistralai/models/function.py @@ -10,10 +10,11 @@ class FunctionTypedDict(TypedDict): name: str parameters: Dict[str, Any] description: NotRequired[str] - + class Function(BaseModel): name: str + parameters: Dict[str, Any] + description: Optional[str] = "" - diff --git a/src/mistralai/models/functioncall.py b/src/mistralai/models/functioncall.py index 4b79c32..941cc5e 100644 --- a/src/mistralai/models/functioncall.py +++ b/src/mistralai/models/functioncall.py @@ -14,9 +14,9 @@ class FunctionCallTypedDict(TypedDict): name: str arguments: ArgumentsTypedDict - + class FunctionCall(BaseModel): name: str + arguments: Arguments - diff --git a/src/mistralai/models/functionname.py b/src/mistralai/models/functionname.py index 0de738f..20fc9be 100644 --- a/src/mistralai/models/functionname.py +++ b/src/mistralai/models/functionname.py @@ -7,12 +7,11 @@ class FunctionNameTypedDict(TypedDict): r"""this restriction of `Function` is used to select a specific function to call""" - + name: str - + class FunctionName(BaseModel): r"""this restriction of `Function` is used to select a specific function to call""" - + name: str - diff --git a/src/mistralai/models/githubrepositoryin.py b/src/mistralai/models/githubrepositoryin.py index 234afeb..cb8bad6 100644 --- a/src/mistralai/models/githubrepositoryin.py +++ b/src/mistralai/models/githubrepositoryin.py @@ -10,22 +10,30 @@ GithubRepositoryInType = Literal["github"] + class GithubRepositoryInTypedDict(TypedDict): name: str owner: str token: str ref: NotRequired[Nullable[str]] weight: NotRequired[float] - + class GithubRepositoryIn(BaseModel): name: str + owner: str + token: str + + # fmt: off TYPE: Annotated[Final[Optional[GithubRepositoryInType]], pydantic.Field(alias="type")] = "github" # type: ignore + # fmt: on + ref: OptionalNullable[str] = UNSET + weight: Optional[float] = 1 - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["type", "ref", "weight"] @@ -39,9 +47,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -51,4 +63,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/githubrepositoryout.py b/src/mistralai/models/githubrepositoryout.py index 2c0a427..7f023c7 100644 --- a/src/mistralai/models/githubrepositoryout.py +++ b/src/mistralai/models/githubrepositoryout.py @@ -10,22 +10,30 @@ GithubRepositoryOutType = Literal["github"] + class GithubRepositoryOutTypedDict(TypedDict): name: str owner: str commit_id: str ref: NotRequired[Nullable[str]] weight: NotRequired[float] - + class GithubRepositoryOut(BaseModel): name: str + owner: str + commit_id: str + + # fmt: off TYPE: Annotated[Final[Optional[GithubRepositoryOutType]], pydantic.Field(alias="type")] = "github" # type: ignore + # fmt: on + ref: OptionalNullable[str] = UNSET + weight: Optional[float] = 1 - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["type", "ref", "weight"] @@ -39,9 +47,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -51,4 +63,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/httpvalidationerror.py b/src/mistralai/models/httpvalidationerror.py index 4e4a209..991b8bd 100644 --- a/src/mistralai/models/httpvalidationerror.py +++ b/src/mistralai/models/httpvalidationerror.py @@ -6,13 +6,14 @@ from mistralai.types import BaseModel from typing import List, Optional + class HTTPValidationErrorData(BaseModel): detail: Optional[List[ValidationError]] = None - class HTTPValidationError(Exception): r"""Validation Error""" + data: HTTPValidationErrorData def __init__(self, data: HTTPValidationErrorData): @@ -20,4 +21,3 @@ def __init__(self, data: HTTPValidationErrorData): def __str__(self) -> str: return utils.marshal_json(self.data, HTTPValidationErrorData) - diff --git a/src/mistralai/models/imageurl.py b/src/mistralai/models/imageurl.py index 5c74be0..af24a1a 100644 --- a/src/mistralai/models/imageurl.py +++ b/src/mistralai/models/imageurl.py @@ -10,12 +10,13 @@ class ImageURLTypedDict(TypedDict): url: str detail: NotRequired[Nullable[str]] - + class ImageURL(BaseModel): url: str + detail: OptionalNullable[str] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["detail"] @@ -29,9 +30,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -41,4 +46,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/imageurlchunk.py b/src/mistralai/models/imageurlchunk.py index 2e0f903..4440902 100644 --- a/src/mistralai/models/imageurlchunk.py +++ b/src/mistralai/models/imageurlchunk.py @@ -18,13 +18,15 @@ class ImageURLChunkTypedDict(TypedDict): r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - + image_url: ImageURLChunkImageURLTypedDict - + class ImageURLChunk(BaseModel): r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - + image_url: ImageURLChunkImageURL + + # fmt: off TYPE: Annotated[Final[Optional[ImageURLChunkType]], pydantic.Field(alias="type")] = "image_url" # type: ignore - + # fmt: on diff --git a/src/mistralai/models/jobin.py b/src/mistralai/models/jobin.py index dd8e4ee..db875c1 100644 --- a/src/mistralai/models/jobin.py +++ b/src/mistralai/models/jobin.py @@ -39,27 +39,41 @@ class JobInTypedDict(TypedDict): repositories: NotRequired[List[JobInRepositoriesTypedDict]] auto_start: NotRequired[bool] r"""This field will be required in a future release.""" - + class JobIn(BaseModel): model: FineTuneableModel r"""The name of the model to fine-tune.""" + hyperparameters: TrainingParametersIn r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" + training_files: Optional[List[TrainingFile]] = None + validation_files: OptionalNullable[List[str]] = UNSET r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" + suffix: OptionalNullable[str] = UNSET r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" + integrations: OptionalNullable[List[JobInIntegrations]] = UNSET r"""A list of integrations to enable for your fine-tuning job.""" + repositories: Optional[List[JobInRepositories]] = None + auto_start: Optional[bool] = None r"""This field will be required in a future release.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["training_files", "validation_files", "suffix", "integrations", "repositories", "auto_start"] + optional_fields = [ + "training_files", + "validation_files", + "suffix", + "integrations", + "repositories", + "auto_start", + ] nullable_fields = ["validation_files", "suffix", "integrations"] null_default_fields = [] @@ -70,9 +84,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -82,4 +100,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/jobmetadataout.py b/src/mistralai/models/jobmetadataout.py index 9d3bfba..690540d 100644 --- a/src/mistralai/models/jobmetadataout.py +++ b/src/mistralai/models/jobmetadataout.py @@ -15,21 +15,43 @@ class JobMetadataOutTypedDict(TypedDict): train_tokens: NotRequired[Nullable[int]] data_tokens: NotRequired[Nullable[int]] estimated_start_time: NotRequired[Nullable[int]] - + class JobMetadataOut(BaseModel): expected_duration_seconds: OptionalNullable[int] = UNSET + cost: OptionalNullable[float] = UNSET + cost_currency: OptionalNullable[str] = UNSET + train_tokens_per_step: OptionalNullable[int] = UNSET + train_tokens: OptionalNullable[int] = UNSET + data_tokens: OptionalNullable[int] = UNSET + estimated_start_time: OptionalNullable[int] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["expected_duration_seconds", "cost", "cost_currency", "train_tokens_per_step", "train_tokens", "data_tokens", "estimated_start_time"] - nullable_fields = ["expected_duration_seconds", "cost", "cost_currency", "train_tokens_per_step", "train_tokens", "data_tokens", "estimated_start_time"] + optional_fields = [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + ] + nullable_fields = [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + ] null_default_fields = [] serialized = handler(self) @@ -39,9 +61,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -51,4 +77,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/jobout.py b/src/mistralai/models/jobout.py index f0e0d25..a716cb7 100644 --- a/src/mistralai/models/jobout.py +++ b/src/mistralai/models/jobout.py @@ -13,7 +13,18 @@ from typing_extensions import Annotated, NotRequired -Status = Literal["QUEUED", "STARTED", "VALIDATING", "VALIDATED", "RUNNING", "FAILED_VALIDATION", "FAILED", "SUCCESS", "CANCELLED", "CANCELLATION_REQUESTED"] +Status = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] r"""The current status of the fine-tuning job.""" Object = Literal["job"] @@ -60,44 +71,78 @@ class JobOutTypedDict(TypedDict): r"""Total number of tokens trained.""" repositories: NotRequired[List[RepositoriesTypedDict]] metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - + class JobOut(BaseModel): id: str r"""The ID of the job.""" + auto_start: bool + hyperparameters: TrainingParameters + model: FineTuneableModel r"""The name of the model to fine-tune.""" + status: Status r"""The current status of the fine-tuning job.""" + job_type: str r"""The type of job (`FT` for fine-tuning).""" + created_at: int r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + modified_at: int r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + training_files: List[str] r"""A list containing the IDs of uploaded files that contain training data.""" + validation_files: OptionalNullable[List[str]] = UNSET r"""A list containing the IDs of uploaded files that contain validation data.""" + + # fmt: off OBJECT: Annotated[Final[Optional[Object]], pydantic.Field(alias="object")] = "job" # type: ignore + # fmt: on r"""The object type of the fine-tuning job.""" + fine_tuned_model: OptionalNullable[str] = UNSET r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + integrations: OptionalNullable[List[Integrations]] = UNSET r"""A list of integrations enabled for your fine-tuning job.""" + trained_tokens: OptionalNullable[int] = UNSET r"""Total number of tokens trained.""" + repositories: Optional[List[Repositories]] = None + metadata: OptionalNullable[JobMetadataOut] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["validation_files", "object", "fine_tuned_model", "suffix", "integrations", "trained_tokens", "repositories", "metadata"] - nullable_fields = ["validation_files", "fine_tuned_model", "suffix", "integrations", "trained_tokens", "metadata"] + optional_fields = [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "repositories", + "metadata", + ] + nullable_fields = [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] null_default_fields = [] serialized = handler(self) @@ -107,9 +152,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -119,4 +168,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py index e32d52b..da52142 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py @@ -10,9 +10,10 @@ class JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to archive.""" - + class JobsAPIRoutesFineTuningArchiveFineTunedModelRequest(BaseModel): - model_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] r"""The ID of the model to archive.""" - diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py index 0ba05ee..e84b082 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py @@ -10,9 +10,10 @@ class JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict(TypedDict): job_id: str r"""The ID of the job to cancel.""" - + class JobsAPIRoutesFineTuningCancelFineTuningJobRequest(BaseModel): - job_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] r"""The ID of the job to cancel.""" - diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py index c4ba3c1..1925a1a 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py @@ -6,10 +6,11 @@ from typing import Union -JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = Union[LegacyJobMetadataOutTypedDict, JobOutTypedDict] +JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = Union[ + LegacyJobMetadataOutTypedDict, JobOutTypedDict +] r"""OK""" JobsAPIRoutesFineTuningCreateFineTuningJobResponse = Union[LegacyJobMetadataOut, JobOut] r"""OK""" - diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py index f8924c8..0570612 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py @@ -10,9 +10,10 @@ class JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict(TypedDict): job_id: str r"""The ID of the job to analyse.""" - + class JobsAPIRoutesFineTuningGetFineTuningJobRequest(BaseModel): - job_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] r"""The ID of the job to analyse.""" - diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py index bb5bf3b..3320b10 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py @@ -9,9 +9,21 @@ from typing_extensions import Annotated, NotRequired -QueryParamStatus = Literal["QUEUED", "STARTED", "VALIDATING", "VALIDATED", "RUNNING", "FAILED_VALIDATION", "FAILED", "SUCCESS", "CANCELLED", "CANCELLATION_REQUESTED"] +QueryParamStatus = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] r"""The current job state to filter on. When set, the other results are not displayed.""" + class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): page: NotRequired[int] r"""The page number of the results to be returned.""" @@ -31,32 +43,84 @@ class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" suffix: NotRequired[Nullable[str]] r"""The model suffix to filter on. When set, the other results are not displayed.""" - + class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): - page: Annotated[Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = 0 + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 r"""The page number of the results to be returned.""" - page_size: Annotated[Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = 100 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 r"""The number of items to return per page.""" - model: Annotated[OptionalNullable[str], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + + model: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" - created_after: Annotated[OptionalNullable[datetime], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + + created_after: Annotated[ + OptionalNullable[datetime], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" - created_by_me: Annotated[Optional[bool], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = False + + created_by_me: Annotated[ + Optional[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = False r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" - status: Annotated[OptionalNullable[QueryParamStatus], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + + status: Annotated[ + OptionalNullable[QueryParamStatus], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET r"""The current job state to filter on. When set, the other results are not displayed.""" - wandb_project: Annotated[OptionalNullable[str], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + + wandb_project: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" - wandb_name: Annotated[OptionalNullable[str], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + + wandb_name: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" - suffix: Annotated[OptionalNullable[str], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + + suffix: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET r"""The model suffix to filter on. When set, the other results are not displayed.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["page", "page_size", "model", "created_after", "created_by_me", "status", "wandb_project", "wandb_name", "suffix"] - nullable_fields = ["model", "created_after", "status", "wandb_project", "wandb_name", "suffix"] + optional_fields = [ + "page", + "page_size", + "model", + "created_after", + "created_by_me", + "status", + "wandb_project", + "wandb_name", + "suffix", + ] + nullable_fields = [ + "model", + "created_after", + "status", + "wandb_project", + "wandb_name", + "suffix", + ] null_default_fields = [] serialized = handler(self) @@ -66,9 +130,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -78,4 +146,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py index 312063f..bc1b6d4 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py @@ -9,8 +9,9 @@ class JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict(TypedDict): job_id: str - + class JobsAPIRoutesFineTuningStartFineTuningJobRequest(BaseModel): - job_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] - + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py index ef44fed..acc6bf4 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py @@ -10,9 +10,10 @@ class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to unarchive.""" - + class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest(BaseModel): - model_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] r"""The ID of the model to unarchive.""" - diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py index 8a229f0..50298ce 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py @@ -12,10 +12,15 @@ class JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to update.""" update_ft_model_in: UpdateFTModelInTypedDict - + class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): - model_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] r"""The ID of the model to update.""" - update_ft_model_in: Annotated[UpdateFTModelIn, FieldMetadata(request=RequestMetadata(media_type="application/json"))] - + + update_ft_model_in: Annotated[ + UpdateFTModelIn, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/models/jobsout.py b/src/mistralai/models/jobsout.py index 0ed51c8..bd5edf6 100644 --- a/src/mistralai/models/jobsout.py +++ b/src/mistralai/models/jobsout.py @@ -10,13 +10,17 @@ JobsOutObject = Literal["list"] + class JobsOutTypedDict(TypedDict): total: int data: NotRequired[List[JobOutTypedDict]] - + class JobsOut(BaseModel): total: int + data: Optional[List[JobOut]] = None + + # fmt: off OBJECT: Annotated[Final[Optional[JobsOutObject]], pydantic.Field(alias="object")] = "list" # type: ignore - + # fmt: on diff --git a/src/mistralai/models/legacyjobmetadataout.py b/src/mistralai/models/legacyjobmetadataout.py index 3b3106d..677cad8 100644 --- a/src/mistralai/models/legacyjobmetadataout.py +++ b/src/mistralai/models/legacyjobmetadataout.py @@ -10,6 +10,7 @@ LegacyJobMetadataOutObject = Literal["job.metadata"] + class LegacyJobMetadataOutTypedDict(TypedDict): details: str expected_duration_seconds: NotRequired[Nullable[int]] @@ -30,34 +31,69 @@ class LegacyJobMetadataOutTypedDict(TypedDict): r"""The number of complete passes through the entire training dataset.""" training_steps: NotRequired[Nullable[int]] r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - + class LegacyJobMetadataOut(BaseModel): details: str + expected_duration_seconds: OptionalNullable[int] = UNSET r"""The approximated time (in seconds) for the fine-tuning process to complete.""" + cost: OptionalNullable[float] = UNSET r"""The cost of the fine-tuning job.""" + cost_currency: OptionalNullable[str] = UNSET r"""The currency used for the fine-tuning job cost.""" + train_tokens_per_step: OptionalNullable[int] = UNSET r"""The number of tokens consumed by one training step.""" + train_tokens: OptionalNullable[int] = UNSET r"""The total number of tokens used during the fine-tuning process.""" + data_tokens: OptionalNullable[int] = UNSET r"""The total number of tokens in the training dataset.""" + estimated_start_time: OptionalNullable[int] = UNSET + deprecated: Optional[bool] = True + epochs: OptionalNullable[float] = UNSET r"""The number of complete passes through the entire training dataset.""" + training_steps: OptionalNullable[int] = UNSET r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + + # fmt: off OBJECT: Annotated[Final[Optional[LegacyJobMetadataOutObject]], pydantic.Field(alias="object")] = "job.metadata" # type: ignore - + # fmt: on + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["expected_duration_seconds", "cost", "cost_currency", "train_tokens_per_step", "train_tokens", "data_tokens", "estimated_start_time", "deprecated", "epochs", "training_steps", "object"] - nullable_fields = ["expected_duration_seconds", "cost", "cost_currency", "train_tokens_per_step", "train_tokens", "data_tokens", "estimated_start_time", "epochs", "training_steps"] + optional_fields = [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + "deprecated", + "epochs", + "training_steps", + "object", + ] + nullable_fields = [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + "epochs", + "training_steps", + ] null_default_fields = [] serialized = handler(self) @@ -67,9 +103,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -79,4 +119,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/listfilesout.py b/src/mistralai/models/listfilesout.py index b6f4dd1..928a7be 100644 --- a/src/mistralai/models/listfilesout.py +++ b/src/mistralai/models/listfilesout.py @@ -9,9 +9,9 @@ class ListFilesOutTypedDict(TypedDict): data: List[FileSchemaTypedDict] object: str - + class ListFilesOut(BaseModel): data: List[FileSchema] + object: str - diff --git a/src/mistralai/models/metricout.py b/src/mistralai/models/metricout.py index b85cd7d..99fe9fb 100644 --- a/src/mistralai/models/metricout.py +++ b/src/mistralai/models/metricout.py @@ -9,19 +9,21 @@ class MetricOutTypedDict(TypedDict): r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" - + train_loss: NotRequired[Nullable[float]] valid_loss: NotRequired[Nullable[float]] valid_mean_token_accuracy: NotRequired[Nullable[float]] - + class MetricOut(BaseModel): r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" - + train_loss: OptionalNullable[float] = UNSET + valid_loss: OptionalNullable[float] = UNSET + valid_mean_token_accuracy: OptionalNullable[float] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["train_loss", "valid_loss", "valid_mean_token_accuracy"] @@ -35,9 +37,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -47,4 +53,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/modelcapabilities.py b/src/mistralai/models/modelcapabilities.py index c7af0e6..af981cc 100644 --- a/src/mistralai/models/modelcapabilities.py +++ b/src/mistralai/models/modelcapabilities.py @@ -12,12 +12,15 @@ class ModelCapabilitiesTypedDict(TypedDict): function_calling: NotRequired[bool] fine_tuning: NotRequired[bool] vision: NotRequired[bool] - + class ModelCapabilities(BaseModel): completion_chat: Optional[bool] = True + completion_fim: Optional[bool] = False + function_calling: Optional[bool] = True + fine_tuning: Optional[bool] = False + vision: Optional[bool] = False - diff --git a/src/mistralai/models/modellist.py b/src/mistralai/models/modellist.py index fedd7a6..759b931 100644 --- a/src/mistralai/models/modellist.py +++ b/src/mistralai/models/modellist.py @@ -13,15 +13,20 @@ DataTypedDict = Union[BaseModelCardTypedDict, FTModelCardTypedDict] -Data = Annotated[Union[Annotated[BaseModelCard, Tag("base")], Annotated[FTModelCard, Tag("fine-tuned")]], Discriminator(lambda m: get_discriminator(m, "type", "type"))] +Data = Annotated[ + Union[ + Annotated[BaseModelCard, Tag("base")], Annotated[FTModelCard, Tag("fine-tuned")] + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] class ModelListTypedDict(TypedDict): object: NotRequired[str] data: NotRequired[List[DataTypedDict]] - + class ModelList(BaseModel): object: Optional[str] = "list" + data: Optional[List[Data]] = None - diff --git a/src/mistralai/models/responseformat.py b/src/mistralai/models/responseformat.py index bda9f68..bf53869 100644 --- a/src/mistralai/models/responseformat.py +++ b/src/mistralai/models/responseformat.py @@ -10,9 +10,8 @@ class ResponseFormatTypedDict(TypedDict): type: NotRequired[ResponseFormats] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - + class ResponseFormat(BaseModel): type: Optional[ResponseFormats] = None r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - diff --git a/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py b/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py index 15fe5fe..37c52c9 100644 --- a/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py +++ b/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py @@ -13,17 +13,25 @@ class RetrieveModelV1ModelsModelIDGetRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to retrieve.""" - + class RetrieveModelV1ModelsModelIDGetRequest(BaseModel): - model_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] r"""The ID of the model to retrieve.""" - -RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict = Union[BaseModelCardTypedDict, FTModelCardTypedDict] + +RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict = Union[ + BaseModelCardTypedDict, FTModelCardTypedDict +] r"""Successful Response""" -RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet = Annotated[Union[Annotated[BaseModelCard, Tag("base")], Annotated[FTModelCard, Tag("fine-tuned")]], Discriminator(lambda m: get_discriminator(m, "type", "type"))] +RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet = Annotated[ + Union[ + Annotated[BaseModelCard, Tag("base")], Annotated[FTModelCard, Tag("fine-tuned")] + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] r"""Successful Response""" - diff --git a/src/mistralai/models/retrievefileout.py b/src/mistralai/models/retrievefileout.py index cab3b65..9cc9bb2 100644 --- a/src/mistralai/models/retrievefileout.py +++ b/src/mistralai/models/retrievefileout.py @@ -3,7 +3,14 @@ from __future__ import annotations from .sampletype import SampleType from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, UnrecognizedStr +from mistralai.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) from mistralai.utils import validate_open_enum import pydantic from pydantic import model_serializer @@ -15,6 +22,7 @@ RetrieveFileOutPurpose = Union[Literal["fine-tune"], UnrecognizedStr] r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + class RetrieveFileOutTypedDict(TypedDict): id: str r"""The unique identifier of the file.""" @@ -29,25 +37,35 @@ class RetrieveFileOutTypedDict(TypedDict): sample_type: SampleType source: Source num_lines: NotRequired[Nullable[int]] - + class RetrieveFileOut(BaseModel): id: str r"""The unique identifier of the file.""" + object: str r"""The object type, which is always \"file\".""" + bytes: int r"""The size of the file, in bytes.""" + created_at: int r"""The UNIX timestamp (in seconds) of the event.""" + filename: str r"""The name of the uploaded file.""" + sample_type: SampleType + source: Source + + # fmt: off PURPOSE: Annotated[Final[Annotated[RetrieveFileOutPurpose, PlainValidator(validate_open_enum(False))]], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore + # fmt: on r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + num_lines: OptionalNullable[int] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["num_lines"] @@ -61,9 +79,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -73,4 +95,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/security.py b/src/mistralai/models/security.py index 3d69602..5bd4c7e 100644 --- a/src/mistralai/models/security.py +++ b/src/mistralai/models/security.py @@ -9,8 +9,17 @@ class SecurityTypedDict(TypedDict): api_key: NotRequired[str] - + class Security(BaseModel): - api_key: Annotated[Optional[str], FieldMetadata(security=SecurityMetadata(scheme=True, scheme_type="http", sub_type="bearer", field_name="Authorization"))] = None - + api_key: Annotated[ + Optional[str], + FieldMetadata( + security=SecurityMetadata( + scheme=True, + scheme_type="http", + sub_type="bearer", + field_name="Authorization", + ) + ), + ] = None diff --git a/src/mistralai/models/systemmessage.py b/src/mistralai/models/systemmessage.py index 5a1b78f..cb359c7 100644 --- a/src/mistralai/models/systemmessage.py +++ b/src/mistralai/models/systemmessage.py @@ -18,9 +18,11 @@ class SystemMessageTypedDict(TypedDict): content: ContentTypedDict - + class SystemMessage(BaseModel): content: Content + + # fmt: off ROLE: Annotated[Final[Optional[Role]], pydantic.Field(alias="role")] = "system" # type: ignore - + # fmt: on diff --git a/src/mistralai/models/textchunk.py b/src/mistralai/models/textchunk.py index c3469f9..9c1f9d7 100644 --- a/src/mistralai/models/textchunk.py +++ b/src/mistralai/models/textchunk.py @@ -9,11 +9,14 @@ TextChunkType = Literal["text"] + class TextChunkTypedDict(TypedDict): text: str - + class TextChunk(BaseModel): text: str + + # fmt: off TYPE: Annotated[Final[Optional[TextChunkType]], pydantic.Field(alias="type")] = "text" # type: ignore - + # fmt: on diff --git a/src/mistralai/models/tool.py b/src/mistralai/models/tool.py index 4bd1193..51295f3 100644 --- a/src/mistralai/models/tool.py +++ b/src/mistralai/models/tool.py @@ -13,9 +13,11 @@ class ToolTypedDict(TypedDict): function: FunctionTypedDict type: NotRequired[ToolTypes] - + class Tool(BaseModel): function: Function - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = None - + + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( + None + ) diff --git a/src/mistralai/models/toolcall.py b/src/mistralai/models/toolcall.py index ba65651..66d570e 100644 --- a/src/mistralai/models/toolcall.py +++ b/src/mistralai/models/toolcall.py @@ -14,10 +14,13 @@ class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] - + class ToolCall(BaseModel): function: FunctionCall + id: Optional[str] = "null" - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = None - + + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( + None + ) diff --git a/src/mistralai/models/toolchoice.py b/src/mistralai/models/toolchoice.py index 7e50fa8..fc36512 100644 --- a/src/mistralai/models/toolchoice.py +++ b/src/mistralai/models/toolchoice.py @@ -12,16 +12,18 @@ class ToolChoiceTypedDict(TypedDict): r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" - + function: FunctionNameTypedDict r"""this restriction of `Function` is used to select a specific function to call""" type: NotRequired[ToolTypes] - + class ToolChoice(BaseModel): r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" - + function: FunctionName r"""this restriction of `Function` is used to select a specific function to call""" - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = None - + + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( + None + ) diff --git a/src/mistralai/models/toolmessage.py b/src/mistralai/models/toolmessage.py index 139d742..752b0fc 100644 --- a/src/mistralai/models/toolmessage.py +++ b/src/mistralai/models/toolmessage.py @@ -10,18 +10,24 @@ ToolMessageRole = Literal["tool"] + class ToolMessageTypedDict(TypedDict): content: str tool_call_id: NotRequired[Nullable[str]] name: NotRequired[Nullable[str]] - + class ToolMessage(BaseModel): content: str + + # fmt: off ROLE: Annotated[Final[Optional[ToolMessageRole]], pydantic.Field(alias="role")] = "tool" # type: ignore + # fmt: on + tool_call_id: OptionalNullable[str] = UNSET + name: OptionalNullable[str] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "tool_call_id", "name"] @@ -35,9 +41,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -47,4 +57,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/trainingfile.py b/src/mistralai/models/trainingfile.py index 097ea17..1917d37 100644 --- a/src/mistralai/models/trainingfile.py +++ b/src/mistralai/models/trainingfile.py @@ -9,9 +9,9 @@ class TrainingFileTypedDict(TypedDict): file_id: str weight: NotRequired[float] - + class TrainingFile(BaseModel): file_id: str + weight: Optional[float] = 1 - diff --git a/src/mistralai/models/trainingparameters.py b/src/mistralai/models/trainingparameters.py index dcbb394..885f3ff 100644 --- a/src/mistralai/models/trainingparameters.py +++ b/src/mistralai/models/trainingparameters.py @@ -14,20 +14,38 @@ class TrainingParametersTypedDict(TypedDict): warmup_fraction: NotRequired[Nullable[float]] epochs: NotRequired[Nullable[float]] fim_ratio: NotRequired[Nullable[float]] - + class TrainingParameters(BaseModel): training_steps: OptionalNullable[int] = UNSET + learning_rate: Optional[float] = 0.0001 + weight_decay: OptionalNullable[float] = UNSET + warmup_fraction: OptionalNullable[float] = UNSET + epochs: OptionalNullable[float] = UNSET + fim_ratio: OptionalNullable[float] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["training_steps", "learning_rate", "weight_decay", "warmup_fraction", "epochs", "fim_ratio"] - nullable_fields = ["training_steps", "weight_decay", "warmup_fraction", "epochs", "fim_ratio"] + optional_fields = [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "fim_ratio", + ] + nullable_fields = [ + "training_steps", + "weight_decay", + "warmup_fraction", + "epochs", + "fim_ratio", + ] null_default_fields = [] serialized = handler(self) @@ -37,9 +55,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -49,4 +71,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/trainingparametersin.py b/src/mistralai/models/trainingparametersin.py index f4ec585..8ecb027 100644 --- a/src/mistralai/models/trainingparametersin.py +++ b/src/mistralai/models/trainingparametersin.py @@ -9,7 +9,7 @@ class TrainingParametersInTypedDict(TypedDict): r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" - + training_steps: NotRequired[Nullable[int]] r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" learning_rate: NotRequired[float] @@ -20,26 +20,44 @@ class TrainingParametersInTypedDict(TypedDict): r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" epochs: NotRequired[Nullable[float]] fim_ratio: NotRequired[Nullable[float]] - + class TrainingParametersIn(BaseModel): r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" - + training_steps: OptionalNullable[int] = UNSET r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + learning_rate: Optional[float] = 0.0001 r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" + weight_decay: OptionalNullable[float] = UNSET r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" + warmup_fraction: OptionalNullable[float] = UNSET r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" + epochs: OptionalNullable[float] = UNSET + fim_ratio: OptionalNullable[float] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["training_steps", "learning_rate", "weight_decay", "warmup_fraction", "epochs", "fim_ratio"] - nullable_fields = ["training_steps", "weight_decay", "warmup_fraction", "epochs", "fim_ratio"] + optional_fields = [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "fim_ratio", + ] + nullable_fields = [ + "training_steps", + "weight_decay", + "warmup_fraction", + "epochs", + "fim_ratio", + ] null_default_fields = [] serialized = handler(self) @@ -49,9 +67,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -61,4 +83,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/unarchiveftmodelout.py b/src/mistralai/models/unarchiveftmodelout.py index 7391df2..6eac820 100644 --- a/src/mistralai/models/unarchiveftmodelout.py +++ b/src/mistralai/models/unarchiveftmodelout.py @@ -9,13 +9,17 @@ UnarchiveFTModelOutObject = Literal["model"] + class UnarchiveFTModelOutTypedDict(TypedDict): id: str archived: NotRequired[bool] - + class UnarchiveFTModelOut(BaseModel): id: str + + # fmt: off OBJECT: Annotated[Final[Optional[UnarchiveFTModelOutObject]], pydantic.Field(alias="object")] = "model" # type: ignore + # fmt: on + archived: Optional[bool] = False - diff --git a/src/mistralai/models/updateftmodelin.py b/src/mistralai/models/updateftmodelin.py index 8c3d847..c22c511 100644 --- a/src/mistralai/models/updateftmodelin.py +++ b/src/mistralai/models/updateftmodelin.py @@ -10,12 +10,13 @@ class UpdateFTModelInTypedDict(TypedDict): name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] - + class UpdateFTModelIn(BaseModel): name: OptionalNullable[str] = UNSET + description: OptionalNullable[str] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["name", "description"] @@ -29,9 +30,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -41,4 +46,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/uploadfileout.py b/src/mistralai/models/uploadfileout.py index dce8d0f..7754ae3 100644 --- a/src/mistralai/models/uploadfileout.py +++ b/src/mistralai/models/uploadfileout.py @@ -3,7 +3,14 @@ from __future__ import annotations from .sampletype import SampleType from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, UnrecognizedStr +from mistralai.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) from mistralai.utils import validate_open_enum import pydantic from pydantic import model_serializer @@ -15,6 +22,7 @@ Purpose = Union[Literal["fine-tune"], UnrecognizedStr] r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + class UploadFileOutTypedDict(TypedDict): id: str r"""The unique identifier of the file.""" @@ -29,25 +37,35 @@ class UploadFileOutTypedDict(TypedDict): sample_type: SampleType source: Source num_lines: NotRequired[Nullable[int]] - + class UploadFileOut(BaseModel): id: str r"""The unique identifier of the file.""" + object: str r"""The object type, which is always \"file\".""" + bytes: int r"""The size of the file, in bytes.""" + created_at: int r"""The UNIX timestamp (in seconds) of the event.""" + filename: str r"""The name of the uploaded file.""" + sample_type: SampleType + source: Source + + # fmt: off PURPOSE: Annotated[Final[Annotated[Purpose, PlainValidator(validate_open_enum(False))]], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore + # fmt: on r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + num_lines: OptionalNullable[int] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["num_lines"] @@ -61,9 +79,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -73,4 +95,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/usageinfo.py b/src/mistralai/models/usageinfo.py index 153ab6b..e8113e3 100644 --- a/src/mistralai/models/usageinfo.py +++ b/src/mistralai/models/usageinfo.py @@ -9,10 +9,11 @@ class UsageInfoTypedDict(TypedDict): prompt_tokens: int completion_tokens: int total_tokens: int - + class UsageInfo(BaseModel): prompt_tokens: int + completion_tokens: int + total_tokens: int - diff --git a/src/mistralai/models/usermessage.py b/src/mistralai/models/usermessage.py index 93bd2a0..1764c99 100644 --- a/src/mistralai/models/usermessage.py +++ b/src/mistralai/models/usermessage.py @@ -18,9 +18,11 @@ class UserMessageTypedDict(TypedDict): content: UserMessageContentTypedDict - + class UserMessage(BaseModel): content: UserMessageContent + + # fmt: off ROLE: Annotated[Final[Optional[UserMessageRole]], pydantic.Field(alias="role")] = "user" # type: ignore - + # fmt: on diff --git a/src/mistralai/models/validationerror.py b/src/mistralai/models/validationerror.py index 42b9af4..ed394a6 100644 --- a/src/mistralai/models/validationerror.py +++ b/src/mistralai/models/validationerror.py @@ -15,10 +15,11 @@ class ValidationErrorTypedDict(TypedDict): loc: List[LocTypedDict] msg: str type: str - + class ValidationError(BaseModel): loc: List[Loc] + msg: str + type: str - diff --git a/src/mistralai/models/wandbintegration.py b/src/mistralai/models/wandbintegration.py index 2a86caa..7659e27 100644 --- a/src/mistralai/models/wandbintegration.py +++ b/src/mistralai/models/wandbintegration.py @@ -10,6 +10,7 @@ WandbIntegrationType = Literal["wandb"] + class WandbIntegrationTypedDict(TypedDict): project: str r"""The name of the project that the new run will be created under.""" @@ -18,18 +19,24 @@ class WandbIntegrationTypedDict(TypedDict): name: NotRequired[Nullable[str]] r"""A display name to set for the run. If not set, will use the job ID as the name.""" run_name: NotRequired[Nullable[str]] - + class WandbIntegration(BaseModel): project: str r"""The name of the project that the new run will be created under.""" + api_key: str r"""The WandB API key to use for authentication.""" + + # fmt: off TYPE: Annotated[Final[Optional[WandbIntegrationType]], pydantic.Field(alias="type")] = "wandb" # type: ignore + # fmt: on + name: OptionalNullable[str] = UNSET r"""A display name to set for the run. If not set, will use the job ID as the name.""" + run_name: OptionalNullable[str] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["type", "name", "run_name"] @@ -43,9 +50,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -55,4 +66,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/wandbintegrationout.py b/src/mistralai/models/wandbintegrationout.py index f6e185a..5635af7 100644 --- a/src/mistralai/models/wandbintegrationout.py +++ b/src/mistralai/models/wandbintegrationout.py @@ -10,22 +10,28 @@ Type = Literal["wandb"] + class WandbIntegrationOutTypedDict(TypedDict): project: str r"""The name of the project that the new run will be created under.""" name: NotRequired[Nullable[str]] r"""A display name to set for the run. If not set, will use the job ID as the name.""" run_name: NotRequired[Nullable[str]] - + class WandbIntegrationOut(BaseModel): project: str r"""The name of the project that the new run will be created under.""" + + # fmt: off TYPE: Annotated[Final[Optional[Type]], pydantic.Field(alias="type")] = "wandb" # type: ignore + # fmt: on + name: OptionalNullable[str] = UNSET r"""A display name to set for the run. If not set, will use the job ID as the name.""" + run_name: OptionalNullable[str] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["type", "name", "run_name"] @@ -39,9 +45,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -51,4 +61,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index 6c858f2..32fdcbc 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -7,12 +7,13 @@ from mistralai.utils import get_security_from_env from typing import Any, Optional + class Models(BaseSDK): r"""Model Management API""" - - + def list( - self, *, + self, + *, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -29,7 +30,7 @@ def list( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url req = self.build_request( @@ -46,44 +47,50 @@ def list( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="list_models_v1_models_get", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="list_models_v1_models_get", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.ModelList]) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def list_async( - self, *, + self, + *, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -100,10 +107,10 @@ async def list_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - req = self.build_request( + req = self.build_request_async( method="GET", path="/v1/models", base_url=base_url, @@ -117,49 +124,57 @@ async def list_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="list_models_v1_models_get", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="list_models_v1_models_get", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.ModelList]) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def retrieve( - self, *, + self, + *, model_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, - ) -> Optional[models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet]: + ) -> Optional[ + models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet + ]: r"""Retrieve Model Retrieve a model information. @@ -173,14 +188,14 @@ def retrieve( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.RetrieveModelV1ModelsModelIDGetRequest( model_id=model_id, ) - + req = self.build_request( method="GET", path="/v1/models/{model_id}", @@ -195,49 +210,62 @@ def retrieve( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="retrieve_model_v1_models__model_id__get", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet]) + return utils.unmarshal_json( + http_res.text, + Optional[ + models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet + ], + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def retrieve_async( - self, *, + self, + *, model_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, - ) -> Optional[models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet]: + ) -> Optional[ + models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet + ]: r"""Retrieve Model Retrieve a model information. @@ -251,15 +279,15 @@ async def retrieve_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.RetrieveModelV1ModelsModelIDGetRequest( model_id=model_id, ) - - req = self.build_request( + + req = self.build_request_async( method="GET", path="/v1/models/{model_id}", base_url=base_url, @@ -273,44 +301,55 @@ async def retrieve_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="retrieve_model_v1_models__model_id__get", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet]) + return utils.unmarshal_json( + http_res.text, + Optional[ + models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet + ], + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def delete( - self, *, + self, + *, model_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -329,14 +368,14 @@ def delete( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.DeleteModelV1ModelsModelIDDeleteRequest( model_id=model_id, ) - + req = self.build_request( method="DELETE", path="/v1/models/{model_id}", @@ -351,44 +390,50 @@ def delete( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="delete_model_v1_models__model_id__delete", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DeleteModelOut]) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def delete_async( - self, *, + self, + *, model_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -407,15 +452,15 @@ async def delete_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.DeleteModelV1ModelsModelIDDeleteRequest( model_id=model_id, ) - - req = self.build_request( + + req = self.build_request_async( method="DELETE", path="/v1/models/{model_id}", base_url=base_url, @@ -429,44 +474,50 @@ async def delete_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="delete_model_v1_models__model_id__delete", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DeleteModelOut]) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def update( - self, *, + self, + *, model_id: str, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, @@ -479,8 +530,8 @@ def update( Update a model name or description. :param model_id: The ID of the model to update. - :param name: - :param description: + :param name: + :param description: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -489,10 +540,10 @@ def update( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( model_id=model_id, update_ft_model_in=models.UpdateFTModelIn( @@ -500,7 +551,7 @@ def update( description=description, ), ) - + req = self.build_request( method="PATCH", path="/v1/fine_tuning/models/{model_id}", @@ -513,43 +564,51 @@ def update( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn), + get_serialized_body=lambda: utils.serialize_request_body( + request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.FTModelOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def update_async( - self, *, + self, + *, model_id: str, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, @@ -562,8 +621,8 @@ async def update_async( Update a model name or description. :param model_id: The ID of the model to update. - :param name: - :param description: + :param name: + :param description: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -572,10 +631,10 @@ async def update_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( model_id=model_id, update_ft_model_in=models.UpdateFTModelIn( @@ -583,8 +642,8 @@ async def update_async( description=description, ), ) - - req = self.build_request( + + req = self.build_request_async( method="PATCH", path="/v1/fine_tuning/models/{model_id}", base_url=base_url, @@ -596,43 +655,51 @@ async def update_async( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn), + get_serialized_body=lambda: utils.serialize_request_body( + request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.FTModelOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def archive( - self, *, + self, + *, model_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -651,14 +718,14 @@ def archive( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( model_id=model_id, ) - + req = self.build_request( method="POST", path="/v1/fine_tuning/models/{model_id}/archive", @@ -673,40 +740,48 @@ def archive( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ArchiveFTModelOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + return utils.unmarshal_json( + http_res.text, Optional[models.ArchiveFTModelOut] + ) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def archive_async( - self, *, + self, + *, model_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -725,15 +800,15 @@ async def archive_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( model_id=model_id, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/fine_tuning/models/{model_id}/archive", base_url=base_url, @@ -747,40 +822,48 @@ async def archive_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ArchiveFTModelOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + return utils.unmarshal_json( + http_res.text, Optional[models.ArchiveFTModelOut] + ) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def unarchive( - self, *, + self, + *, model_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -799,14 +882,14 @@ def unarchive( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( model_id=model_id, ) - + req = self.build_request( method="DELETE", path="/v1/fine_tuning/models/{model_id}/archive", @@ -821,40 +904,48 @@ def unarchive( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.UnarchiveFTModelOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + return utils.unmarshal_json( + http_res.text, Optional[models.UnarchiveFTModelOut] + ) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def unarchive_async( - self, *, + self, + *, model_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -873,15 +964,15 @@ async def unarchive_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( model_id=model_id, ) - - req = self.build_request( + + req = self.build_request_async( method="DELETE", path="/v1/fine_tuning/models/{model_id}/archive", base_url=base_url, @@ -895,34 +986,41 @@ async def unarchive_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.UnarchiveFTModelOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - - content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + return utils.unmarshal_json( + http_res.text, Optional[models.UnarchiveFTModelOut] + ) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) - + content_type = http_res.headers.get("Content-Type") + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index 1b79f70..05029ab 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -18,8 +18,10 @@ from mistralai.types import OptionalNullable, UNSET from typing import Any, Callable, Dict, Optional, Union + class Mistral(BaseSDK): r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" + models: Models r"""Model Management API""" files: Files @@ -33,6 +35,7 @@ class Mistral(BaseSDK): r"""Agents API.""" embeddings: Embeddings r"""Embeddings API.""" + def __init__( self, api_key: Optional[Union[Optional[str], Callable[[], Optional[str]]]] = None, @@ -43,7 +46,7 @@ def __init__( async_client: Optional[AsyncHttpClient] = None, retry_config: OptionalNullable[RetryConfig] = UNSET, timeout_ms: Optional[int] = None, - debug_logger: Optional[Logger] = None + debug_logger: Optional[Logger] = None, ) -> None: r"""Instantiates the SDK configuring it with the provided parameters. @@ -72,33 +75,37 @@ def __init__( assert issubclass( type(async_client), AsyncHttpClient ), "The provided async_client must implement the AsyncHttpClient protocol." - + security: Any = None if callable(api_key): - security = lambda: models.Security(api_key = api_key()) # pylint: disable=unnecessary-lambda-assignment + security = lambda: models.Security(api_key=api_key()) # pylint: disable=unnecessary-lambda-assignment else: - security = models.Security(api_key = api_key) + security = models.Security(api_key=api_key) if server_url is not None: if url_params is not None: server_url = utils.template_url(server_url, url_params) - - - BaseSDK.__init__(self, SDKConfiguration( - client=client, - async_client=async_client, - security=security, - server_url=server_url, - server=server, - retry_config=retry_config, - timeout_ms=timeout_ms, - debug_logger=debug_logger - )) + + BaseSDK.__init__( + self, + SDKConfiguration( + client=client, + async_client=async_client, + security=security, + server_url=server_url, + server=server, + retry_config=retry_config, + timeout_ms=timeout_ms, + debug_logger=debug_logger, + ), + ) hooks = SDKHooks() current_server_url, *_ = self.sdk_configuration.get_server_details() - server_url, self.sdk_configuration.client = hooks.sdk_init(current_server_url, self.sdk_configuration.client) + server_url, self.sdk_configuration.client = hooks.sdk_init( + current_server_url, self.sdk_configuration.client + ) if current_server_url != server_url: self.sdk_configuration.server_url = server_url @@ -107,7 +114,6 @@ def __init__( self._init_sdks() - def _init_sdks(self): self.models = Models(self.sdk_configuration) self.files = Files(self.sdk_configuration) @@ -116,4 +122,3 @@ def _init_sdks(self): self.fim = Fim(self.sdk_configuration) self.agents = Agents(self.sdk_configuration) self.embeddings = Embeddings(self.sdk_configuration) - diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 34bda64..d2af46d 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -1,6 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - from ._hooks import SDKHooks from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix @@ -14,7 +13,7 @@ SERVER_PROD = "prod" r"""Production server""" SERVERS = { - SERVER_PROD: "https://api.mistral.ai", + SERVER_PROD: "https://api.mistral.ai", } """Contains the list of servers available to the SDK""" @@ -24,14 +23,14 @@ class SDKConfiguration: client: HttpClient async_client: AsyncHttpClient debug_logger: Logger - security: Optional[Union[models.Security,Callable[[], models.Security]]] = None + security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.0.4" - gen_version: str = "2.404.11" - user_agent: str = "speakeasy-sdk/python 1.0.4 2.404.11 0.0.2 mistralai" + sdk_version: str = "1.1.0" + gen_version: str = "2.415.6" + user_agent: str = "speakeasy-sdk/python 1.1.0 2.415.6 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None @@ -45,10 +44,9 @@ def get_server_details(self) -> Tuple[str, Dict[str, str]]: self.server = SERVER_PROD if self.server not in SERVERS: - raise ValueError(f"Invalid server \"{self.server}\"") + raise ValueError(f'Invalid server "{self.server}"') return SERVERS[self.server], {} - def get_hooks(self) -> SDKHooks: return self._hooks diff --git a/src/mistralai/utils/__init__.py b/src/mistralai/utils/__init__.py index feee4dc..74109c1 100644 --- a/src/mistralai/utils/__init__.py +++ b/src/mistralai/utils/__init__.py @@ -34,7 +34,12 @@ validate_open_enum, ) from .url import generate_url, template_url, remove_suffix -from .values import get_global_from_env, match_content_type, match_status_codes, match_response +from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, +) from .logger import Logger, get_body_content, get_default_logger __all__ = [ diff --git a/src/mistralai/utils/logger.py b/src/mistralai/utils/logger.py index c4ea1a0..cc08930 100644 --- a/src/mistralai/utils/logger.py +++ b/src/mistralai/utils/logger.py @@ -5,20 +5,23 @@ import os from typing import Any, Protocol + class Logger(Protocol): def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: pass + class NoOpLogger: def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: pass + def get_body_content(req: httpx.Request) -> str: return "" if not hasattr(req, "_content") else str(req.content) + def get_default_logger() -> Logger: if os.getenv("MISTRAL_DEBUG"): logging.basicConfig(level=logging.DEBUG) return logging.getLogger("mistralai") return NoOpLogger() - diff --git a/src/mistralai/utils/retries.py b/src/mistralai/utils/retries.py index a06f927..4d60867 100644 --- a/src/mistralai/utils/retries.py +++ b/src/mistralai/utils/retries.py @@ -1,5 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +import asyncio import random import time from typing import List @@ -212,5 +213,5 @@ async def retry_with_backoff_async( raise sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) sleep = min(sleep, max_interval / 1000) - time.sleep(sleep) + await asyncio.sleep(sleep) retries += 1 diff --git a/src/mistralai/utils/security.py b/src/mistralai/utils/security.py index 4c511d9..3b8526b 100644 --- a/src/mistralai/utils/security.py +++ b/src/mistralai/utils/security.py @@ -44,8 +44,10 @@ def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: _parse_security_option(headers, query_params, value) return headers, query_params if metadata.scheme: - # Special case for basic auth which could be a flattened model - if metadata.sub_type == "basic" and not isinstance(value, BaseModel): + # Special case for basic auth or custom auth which could be a flattened model + if metadata.sub_type in ["basic", "custom"] and not isinstance( + value, BaseModel + ): _parse_security_scheme(headers, query_params, metadata, name, security) else: _parse_security_scheme(headers, query_params, metadata, name, value) @@ -64,7 +66,7 @@ def get_security_from_env(security: Any, security_class: Any) -> Optional[BaseMo if os.getenv("MISTRAL_API_KEY"): security_dict["api_key"] = os.getenv("MISTRAL_API_KEY") - + return security_class(**security_dict) if security_dict else None @@ -97,9 +99,12 @@ def _parse_security_scheme( sub_type = scheme_metadata.sub_type if isinstance(scheme, BaseModel): - if scheme_type == "http" and sub_type == "basic": - _parse_basic_auth_scheme(headers, scheme) - return + if scheme_type == "http": + if sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + if sub_type == "custom": + return scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields for name in scheme_fields: @@ -148,6 +153,8 @@ def _parse_security_scheme_value( elif scheme_type == "http": if sub_type == "bearer": headers[header_name] = _apply_bearer(value) + elif sub_type == "custom": + return else: raise ValueError("sub type {sub_type} not supported") else: