From c66686f59448ea6fc9c5134a04ca31db51236b19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9mence=20Lesn=C3=A9?= Date: Mon, 29 Jul 2024 12:21:54 +0000 Subject: [PATCH 1/5] doc: Make clearer the local install instructions --- Makefile | 33 ++++----- README.md | 196 +++++++++++++++++++++++++++--------------------------- 2 files changed, 117 insertions(+), 112 deletions(-) diff --git a/Makefile b/Makefile index ebaf7315..1fd42905 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # Versioning version_full ?= $(shell $(MAKE) --silent version-full) version_small ?= $(shell $(MAKE) --silent version) -# DevTunnel configuration +# Dev tunnels configuration tunnel_name := call-center-ai-$(shell hostname | sed 's/[^a-zA-Z0-9]//g' | tr '[:upper:]' '[:lower:]') tunnel_url ?= $(shell res=$$(devtunnel show $(tunnel_name) | grep -o 'http[s]*://[^"]*' | xargs) && echo $${res%/}) # App location @@ -121,6 +121,22 @@ dev: VERSION=$(version_full) PUBLIC_DOMAIN=$(tunnel_url) func start deploy: + $(MAKE) deploy-bicep + + @echo "💤 Wait 10 secs for output to be available..." + sleep 10 + + @echo "🛠️ Deploying Function App..." + func azure functionapp publish $(function_app_name) \ + --build local \ + --build-native-deps \ + --python + + @echo "🚀 Call Center AI is running on $(app_url)" + + @$(MAKE) deploy-post + +deploy-bicep: @echo "👀 Current subscription:" @az account show --query "{subscriptionId:id, subscriptionName:name, tenantId:tenantId}" --output table @@ -137,20 +153,7 @@ deploy: --template-file bicep/main.bicep \ --name $(name_sanitized) - @echo "💤 Wait 10 secs for output to be available..." - sleep 10 - - @echo "🛠️ Deploying Function App..." - func azure functionapp publish $(function_app_name) \ - --build local \ - --build-native-deps \ - --python - - @echo "🚀 Call Center AI is running on $(app_url)" - - @$(MAKE) post-deploy name=$(name_sanitized) - -post-deploy: +deploy-post: @$(MAKE) copy-resources \ name=$(blob_storage_public_name) diff --git a/README.md b/README.md index e1e58c87..6b1c083b 100644 --- a/README.md +++ b/README.md @@ -243,7 +243,7 @@ sequenceDiagram ## Deployment -Some prerequisites are needed to deploy the solution. +Some local prerequisites are needed to deploy the solution. [Prefer using GitHub Codespaces for a quick start.](https://codespaces.new/microsoft/call-center-ai?quickstart=1) The environment will setup automatically with all the required tools. @@ -258,9 +258,7 @@ For other systems, make sure you have the following installed: - [Azure Functions Core Tools](https://github.com/Azure/azure-functions-core-tools?tab=readme-ov-file#installing) - [Twilio CLI](https://www.twilio.com/docs/twilio-cli/getting-started/install) (optional) -### Remote (on Azure) - -Steps to deploy: +Then, Azure resources are needed: 1. [Create a new resource group](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/manage-resource-groups-portal) @@ -277,7 +275,11 @@ Steps to deploy: - Allow inbound and outbound communication - Enable voice (required) and SMS (optional) capabilities -4. Create a local `config.yaml` file +Now that the prerequisites are configured (local + Azure), the deployment can be done. + +### Remote (on Azure) + +1. Create a local `config.yaml` file ```yaml # config.yaml @@ -300,12 +302,12 @@ Steps to deploy: tts: {} ``` -5. Connect to your Azure environment (e.g. `az login`) -6. Run deployment automation with `make deploy name=my-rg-name` +2. Connect to your Azure environment (e.g. `az login`) +3. Run deployment automation with `make deploy name=my-rg-name` - Wait for the deployment to finish -7. [Create a AI Search resource](https://learn.microsoft.com/en-us/azure/search/search-create-service-portal) +4. [Create a AI Search resource](https://learn.microsoft.com/en-us/azure/search/search-create-service-portal) - An index named `trainings` - A semantic search configuration on the index named `default` @@ -314,110 +316,110 @@ Get the logs with `make logs name=my-rg-name`. ### Local (on your machine) -#### Prerequisites for local development - -Place a file called `config.yaml` in the root of the project with the following content: +1. Create a local `config.yaml` file -```yaml -# config.yaml -resources: - public_url: https://xxx.blob.core.windows.net/public + > [!TIP] + > To use a Service Principal to authenticate to Azure, you can also add the following in a `.env` file: + > + > ```dotenv + > AZURE_CLIENT_ID=xxx + > AZURE_CLIENT_SECRET=xxx + > AZURE_TENANT_ID=xxx + > ``` -conversation: - initiate: - agent_phone_number: "+33612345678" - bot_company: Contoso - bot_name: Robert - -communication_services: - access_key: xxx - call_queue_name: call-33612345678 - endpoint: https://xxx.france.communication.azure.com - phone_number: "+33612345678" - post_queue_name: post-33612345678 - resource_id: xxx - sms_queue_name: sms-33612345678 - -cognitive_service: - # Must be of type "AI services multi-service account" - endpoint: https://xxx.cognitiveservices.azure.com + > [!TIP] + > If you already deployed the application to Azure and if it is working, you can: + > + > - Copy the configuration from the Azure Function App to your local machine by using the content of the `CONFIG_JSON` application setting + > - Then convert it to YAML format -llm: - fast: - mode: azure_openai - azure_openai: - api_key: xxx - context: 16385 - deployment: gpt-35-turbo-0125 - endpoint: https://xxx.openai.azure.com - model: gpt-35-turbo - streaming: true - slow: - mode: azure_openai - azure_openai: - api_key: xxx - context: 128000 - deployment: gpt-4o-2024-05-13 - endpoint: https://xxx.openai.azure.com - model: gpt-4o - streaming: true - -ai_search: - access_key: xxx - endpoint: https://xxx.search.windows.net - index: trainings - -ai_translation: - access_key: xxx - endpoint: https://xxx.cognitiveservices.azure.com -``` - -To use a Service Principal to authenticate to Azure, you can also add the following in a `.env` file: - -```dotenv -AZURE_CLIENT_ID=xxx -AZURE_CLIENT_SECRET=xxx -AZURE_TENANT_ID=xxx -``` - -To override a specific configuration value, you can also use environment variables. For example, to override the `llm.fast.endpoint` value, you can use the `LLM__FAST__ENDPOINT` variable: - -```dotenv -LLM__FAST__ENDPOINT=https://xxx.openai.azure.com -``` + ```yaml + # config.yaml + resources: + public_url: https://xxx.blob.core.windows.net/public -Then run: + conversation: + initiate: + agent_phone_number: "+33612345678" + bot_company: Contoso + bot_name: Robert -```bash -# Install dependencies -make install -``` + communication_services: + access_key: xxx + call_queue_name: call-33612345678 + endpoint: https://xxx.france.communication.azure.com + phone_number: "+33612345678" + post_queue_name: post-33612345678 + resource_id: xxx + sms_queue_name: sms-33612345678 + + cognitive_service: + # Must be of type "AI services multi-service account" + endpoint: https://xxx.cognitiveservices.azure.com + + llm: + fast: + mode: azure_openai + azure_openai: + api_key: xxx + context: 16385 + deployment: gpt-35-turbo-0125 + endpoint: https://xxx.openai.azure.com + model: gpt-35-turbo + streaming: true + slow: + mode: azure_openai + azure_openai: + api_key: xxx + context: 128000 + deployment: gpt-4o-2024-05-13 + endpoint: https://xxx.openai.azure.com + model: gpt-4o + streaming: true + + ai_search: + access_key: xxx + endpoint: https://xxx.search.windows.net + index: trainings + + ai_translation: + access_key: xxx + endpoint: https://xxx.cognitiveservices.azure.com + ``` -Also, a public file server is needed to host the audio files. Upload the files with `make copy-resources name=my-rg-name` (`my-rg-name` is the storage account name), or manually. +2. Run the deployment automation with `make deploy-bicep deploy-post name=my-rg-name` -For your knowledge, this `resources` folder contains: + - This will deploy the Azure resources without the API server, allowing you to test the bot locally + - Wait for the deployment to finish -- Audio files (`xxx.wav`) to be played during the call -- [Lexicon file (`lexicon.xml`)](https://learn.microsoft.com/en-us/azure/ai-services/speech-service/speech-synthesis-markup-pronunciation#custom-lexicon) to be used by the bot to understand the company products (note: any change [makes up to 15 minutes](https://learn.microsoft.com/en-us/azure/ai-services/speech-service/speech-synthesis-markup-pronunciation#custom-lexicon-file) to be taken into account) +3. Copy local file `local.example.settings.json` to `local.settings.json` and fill the required fields -#### Run + - `APPLICATIONINSIGHTS_CONNECTION_STRING`, as the connection string of the Application Insights resource + - `AzureWebJobsStorage`, as the connection string of the Azure Storage account -Finally, run: +4. Connect to Azure Dev tunnels with `devtunnel login`, then run it with `make tunnel` -```bash -# Start the local API server -make dev -``` + > [!IMPORTANT] + > Tunnel requires to be run in a separate terminal, because it needs to be running all the time -#### Debug +5. Iterate quickly with the code by running `make dev` -Breakpoints can be added in the code to debug the application with your favorite IDE. + > [!NOTE] + > To override a specific configuration value, you can use environment variables. For example, to override the `llm.fast.endpoint` value, you can use the `LLM__FAST__ENDPOINT` variable: + > + > ```dotenv + > LLM__FAST__ENDPOINT=https://xxx.openai.azure.com + > ``` -Also, `local.py` script is available to test the application without the need of a phone call (= without Communication Services). Run the script with: + > [!NOTE] + > Also, `local.py` script is available to test the application without the need of a phone call (= without Communication Services). Run the script with: + > + > ```bash + > python3 -m tests.local + > ``` -```bash -python3 -m tests.local -``` + - Code is automatically reloaded on file changes, no need to restart the server + - The API server is available at `http://localhost:8080` ## Advanced usage From aa7f30e0f2dad886663ceb37f8113d347e8d17e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9mence=20Lesn=C3=A9?= Date: Mon, 29 Jul 2024 12:30:26 +0000 Subject: [PATCH 2/5] doc: Use headings instead of lists for installation steps --- README.md | 305 ++++++++++++++++++++++++++---------------------- configs/key.txt | 3 + 2 files changed, 169 insertions(+), 139 deletions(-) create mode 100644 configs/key.txt diff --git a/README.md b/README.md index 6b1c083b..ee6c338b 100644 --- a/README.md +++ b/README.md @@ -243,7 +243,7 @@ sequenceDiagram ## Deployment -Some local prerequisites are needed to deploy the solution. +### Prerequisites [Prefer using GitHub Codespaces for a quick start.](https://codespaces.new/microsoft/call-center-ai?quickstart=1) The environment will setup automatically with all the required tools. @@ -260,166 +260,193 @@ For other systems, make sure you have the following installed: Then, Azure resources are needed: -1. [Create a new resource group](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/manage-resource-groups-portal) +#### 1. [Create a new resource group](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/manage-resource-groups-portal) - - Prefer to use lowercase and no special characters other than dashes (e.g. `ccai-customer-a`) +- Prefer to use lowercase and no special characters other than dashes (e.g. `ccai-customer-a`) -2. [Create a Communication Services resource](https://learn.microsoft.com/en-us/azure/communication-services/quickstarts/create-communication-resource?tabs=linux&pivots=platform-azp) +#### 2. [Create a Communication Services resource](https://learn.microsoft.com/en-us/azure/communication-services/quickstarts/create-communication-resource?tabs=linux&pivots=platform-azp) - - Same name as the resource group - - Enable system managed identity +- Same name as the resource group +- Enable system managed identity -3. [Buy a phone number](https://learn.microsoft.com/en-us/azure/communication-services/quickstarts/telephony/get-phone-number?tabs=linux&pivots=platform-azp-new) +#### 3. [Buy a phone number](https://learn.microsoft.com/en-us/azure/communication-services/quickstarts/telephony/get-phone-number?tabs=linux&pivots=platform-azp-new) - - From the Communication Services resource - - Allow inbound and outbound communication - - Enable voice (required) and SMS (optional) capabilities +- From the Communication Services resource +- Allow inbound and outbound communication +- Enable voice (required) and SMS (optional) capabilities Now that the prerequisites are configured (local + Azure), the deployment can be done. ### Remote (on Azure) -1. Create a local `config.yaml` file +#### 1. Create the light config file - ```yaml - # config.yaml - conversation: - initiate: - # Phone number the bot will transfer the call to if customer asks for a human agent - agent_phone_number: "+33612345678" - bot_company: Contoso - bot_name: Amélie - lang: {} +File is named `config.yaml`: - communication_services: - # Phone number purshased from Communication Services - phone_number: "+33612345678" +```yaml +# config.yaml +conversation: + initiate: + # Phone number the bot will transfer the call to if customer asks for a human agent + agent_phone_number: "+33612345678" + bot_company: Contoso + bot_name: Amélie + lang: {} + +communication_services: + # Phone number purshased from Communication Services + phone_number: "+33612345678" + +sms: {} + +prompts: + llm: {} + tts: {} +``` - sms: {} +#### 2. Connect to your Azure environment - prompts: - llm: {} - tts: {} - ``` +```zsh +az login +``` -2. Connect to your Azure environment (e.g. `az login`) -3. Run deployment automation with `make deploy name=my-rg-name` +#### 3. Run deployment automation - - Wait for the deployment to finish +```zsh +make deploy name=my-rg-name +``` -4. [Create a AI Search resource](https://learn.microsoft.com/en-us/azure/search/search-create-service-portal) +- Wait for the deployment to finish - - An index named `trainings` - - A semantic search configuration on the index named `default` +#### 4. [Create a AI Search resource](https://learn.microsoft.com/en-us/azure/search/search-create-service-portal) -Get the logs with `make logs name=my-rg-name`. +- An index named `trainings` +- A semantic search configuration on the index named `default` + +#### 5. Get the logs + +```zsh +make logs name=my-rg-name +``` ### Local (on your machine) -1. Create a local `config.yaml` file - - > [!TIP] - > To use a Service Principal to authenticate to Azure, you can also add the following in a `.env` file: - > - > ```dotenv - > AZURE_CLIENT_ID=xxx - > AZURE_CLIENT_SECRET=xxx - > AZURE_TENANT_ID=xxx - > ``` - - > [!TIP] - > If you already deployed the application to Azure and if it is working, you can: - > - > - Copy the configuration from the Azure Function App to your local machine by using the content of the `CONFIG_JSON` application setting - > - Then convert it to YAML format - - ```yaml - # config.yaml - resources: - public_url: https://xxx.blob.core.windows.net/public - - conversation: - initiate: - agent_phone_number: "+33612345678" - bot_company: Contoso - bot_name: Robert - - communication_services: - access_key: xxx - call_queue_name: call-33612345678 - endpoint: https://xxx.france.communication.azure.com - phone_number: "+33612345678" - post_queue_name: post-33612345678 - resource_id: xxx - sms_queue_name: sms-33612345678 - - cognitive_service: - # Must be of type "AI services multi-service account" - endpoint: https://xxx.cognitiveservices.azure.com - - llm: - fast: - mode: azure_openai - azure_openai: - api_key: xxx - context: 16385 - deployment: gpt-35-turbo-0125 - endpoint: https://xxx.openai.azure.com - model: gpt-35-turbo - streaming: true - slow: - mode: azure_openai - azure_openai: - api_key: xxx - context: 128000 - deployment: gpt-4o-2024-05-13 - endpoint: https://xxx.openai.azure.com - model: gpt-4o - streaming: true - - ai_search: - access_key: xxx - endpoint: https://xxx.search.windows.net - index: trainings - - ai_translation: - access_key: xxx - endpoint: https://xxx.cognitiveservices.azure.com - ``` - -2. Run the deployment automation with `make deploy-bicep deploy-post name=my-rg-name` - - - This will deploy the Azure resources without the API server, allowing you to test the bot locally - - Wait for the deployment to finish - -3. Copy local file `local.example.settings.json` to `local.settings.json` and fill the required fields - - - `APPLICATIONINSIGHTS_CONNECTION_STRING`, as the connection string of the Application Insights resource - - `AzureWebJobsStorage`, as the connection string of the Azure Storage account - -4. Connect to Azure Dev tunnels with `devtunnel login`, then run it with `make tunnel` - - > [!IMPORTANT] - > Tunnel requires to be run in a separate terminal, because it needs to be running all the time - -5. Iterate quickly with the code by running `make dev` - - > [!NOTE] - > To override a specific configuration value, you can use environment variables. For example, to override the `llm.fast.endpoint` value, you can use the `LLM__FAST__ENDPOINT` variable: - > - > ```dotenv - > LLM__FAST__ENDPOINT=https://xxx.openai.azure.com - > ``` - - > [!NOTE] - > Also, `local.py` script is available to test the application without the need of a phone call (= without Communication Services). Run the script with: - > - > ```bash - > python3 -m tests.local - > ``` - - - Code is automatically reloaded on file changes, no need to restart the server - - The API server is available at `http://localhost:8080` +#### 1. Create the full config file + +> [!TIP] +> To use a Service Principal to authenticate to Azure, you can also add the following in a `.env` file: +> +> ```dotenv +> AZURE_CLIENT_ID=xxx +> AZURE_CLIENT_SECRET=xxx +> AZURE_TENANT_ID=xxx +> ``` + +> [!TIP] +> If you already deployed the application to Azure and if it is working, you can: +> +> - Copy the configuration from the Azure Function App to your local machine by using the content of the `CONFIG_JSON` application setting +> - Then convert it to YAML format + +File is named `config.yaml`: + +```yaml +# config.yaml +resources: + public_url: https://xxx.blob.core.windows.net/public + +conversation: + initiate: + agent_phone_number: "+33612345678" + bot_company: Contoso + bot_name: Robert + +communication_services: + access_key: xxx + call_queue_name: call-33612345678 + endpoint: https://xxx.france.communication.azure.com + phone_number: "+33612345678" + post_queue_name: post-33612345678 + resource_id: xxx + sms_queue_name: sms-33612345678 + +cognitive_service: + # Must be of type "AI services multi-service account" + endpoint: https://xxx.cognitiveservices.azure.com + +llm: + fast: + mode: azure_openai + azure_openai: + api_key: xxx + context: 16385 + deployment: gpt-35-turbo-0125 + endpoint: https://xxx.openai.azure.com + model: gpt-35-turbo + streaming: true + slow: + mode: azure_openai + azure_openai: + api_key: xxx + context: 128000 + deployment: gpt-4o-2024-05-13 + endpoint: https://xxx.openai.azure.com + model: gpt-4o + streaming: true + +ai_search: + access_key: xxx + endpoint: https://xxx.search.windows.net + index: trainings + +ai_translation: + access_key: xxx + endpoint: https://xxx.cognitiveservices.azure.com +``` + +#### 2. Run the deployment automation + +```zsh +make deploy-bicep deploy-post name=my-rg-name +``` + +- This will deploy the Azure resources without the API server, allowing you to test the bot locally +- Wait for the deployment to finish + +#### 3. Initialize local function config + +Copy `local.example.settings.json` to `local.settings.json`, then fill the required fields: + +- `APPLICATIONINSIGHTS_CONNECTION_STRING`, as the connection string of the Application Insights resource +- `AzureWebJobsStorage`, as the connection string of the Azure Storage account + +#### 4. Connect to Azure Dev tunnels with `devtunnel login`, then run it with `make tunnel` + +> [!IMPORTANT] +> Tunnel requires to be run in a separate terminal, because it needs to be running all the time + +#### 5. Iterate quickly with the code + +> [!NOTE] +> To override a specific configuration value, you can use environment variables. For example, to override the `llm.fast.endpoint` value, you can use the `LLM__FAST__ENDPOINT` variable: +> +> ```dotenv +> LLM__FAST__ENDPOINT=https://xxx.openai.azure.com +> ``` + +> [!NOTE] +> Also, `local.py` script is available to test the application without the need of a phone call (= without Communication Services). Run the script with: +> +> ```bash +> python3 -m tests.local +> ``` + +```zsh +make dev +``` + +- Code is automatically reloaded on file changes, no need to restart the server +- The API server is available at `http://localhost:8080` ## Advanced usage diff --git a/configs/key.txt b/configs/key.txt new file mode 100644 index 00000000..c9926da5 --- /dev/null +++ b/configs/key.txt @@ -0,0 +1,3 @@ +# created: 2024-07-23T14:45:18Z +# public key: age104x66len7wszfjum8rytkzvtvnaq4vlrcz703he9zj9ve98hhugsdvqg5q +AGE-SECRET-KEY-1FQ26Q3AZC70S7QQ34LMLJKNL2HEHJ5SDV9ST2YY0GN20GVF6JFMQAULKQS From 9c19ff46ab3ba288d5f4b8897f0d76f90347bfda Mon Sep 17 00:00:00 2001 From: Elvis Mboning Date: Wed, 7 Aug 2024 17:51:57 +0200 Subject: [PATCH 3/5] reorganize the project --- .gitignore | 3 + .python-version | 2 +- Makefile | 41 ++++++------ README.md | 30 ++++----- .funcignore => app/.funcignore | 0 {docs => app/docs}/demo.json | 0 {docs => app/docs}/demo.mp4 | Bin {docs => app/docs}/user_report.jpg | Bin {examples => app/examples}/blocklist.csv | 0 {examples => app/examples}/blocklist.ipynb | 0 function_app.py => app/function_app.py | 2 + {helpers => app/helpers}/__init__.py | 0 {helpers => app/helpers}/call_events.py | 59 ++++++++++++++---- {helpers => app/helpers}/call_llm.py | 0 {helpers => app/helpers}/call_utils.py | 0 {helpers => app/helpers}/config.py | 2 +- .../helpers}/config_models/__init__.py | 0 .../helpers}/config_models/ai_search.py | 0 .../helpers}/config_models/ai_translation.py | 0 .../helpers}/config_models/cache.py | 0 .../config_models/cognitive_service.py | 0 .../config_models/communication_services.py | 0 .../helpers}/config_models/conversation.py | 1 + .../helpers}/config_models/database.py | 0 {helpers => app/helpers}/config_models/llm.py | 1 + .../helpers}/config_models/monitoring.py | 0 .../helpers}/config_models/prompts.py | 0 .../helpers}/config_models/resources.py | 0 .../helpers}/config_models/root.py | 0 {helpers => app/helpers}/config_models/sms.py | 0 {helpers => app/helpers}/http.py | 2 +- {helpers => app/helpers}/llm_tools.py | 54 ++++++++++++++++ {helpers => app/helpers}/llm_utils.py | 0 {helpers => app/helpers}/llm_worker.py | 0 {helpers => app/helpers}/logging.py | 0 {helpers => app/helpers}/monitoring.py | 0 .../helpers}/pydantic_types/__init__.py | 0 .../helpers}/pydantic_types/phone_numbers.py | 0 app/helpers/resources.py | 12 ++++ {helpers => app/helpers}/translation.py | 0 host.json => app/host.json | 0 .../local.example.settings.json | 0 {models => app/models}/__init__.py | 0 {models => app/models}/call.py | 0 {models => app/models}/claim.py | 0 {models => app/models}/message.py | 4 +- {models => app/models}/next.py | 0 {models => app/models}/readiness.py | 0 {models => app/models}/reminder.py | 0 {models => app/models}/synthesis.py | 0 {models => app/models}/training.py | 0 {persistence => app/persistence}/__init__.py | 0 {persistence => app/persistence}/ai_search.py | 0 .../persistence}/communication_services.py | 0 {persistence => app/persistence}/cosmos_db.py | 0 {persistence => app/persistence}/icache.py | 0 {persistence => app/persistence}/isearch.py | 0 {persistence => app/persistence}/isms.py | 0 {persistence => app/persistence}/istore.py | 0 {persistence => app/persistence}/memory.py | 0 {persistence => app/persistence}/redis.py | 0 {persistence => app/persistence}/sqlite.py | 0 {persistence => app/persistence}/twilio.py | 0 .../public_website}/list.html.jinja | 0 .../public_website}/single.html.jinja | 0 .../public_website}/templates/base.html.jinja | 0 .../templates/call_me.html.jinja | 0 pyproject.toml => app/pyproject.toml | 0 .../requirements-dev.txt | 0 requirements.txt => app/requirements.txt | 0 {resources => app/resources}/lexicon.xml | 0 {resources => app/resources}/loading.wav | Bin {resources => app/resources}/openapi.json | 0 {resources => app/resources}/ready.wav | Bin .../9b5ad71b2ce5302211f9c61530b329a4922fc6a4 | 0 .../resources}/tiktoken/README.md | 0 .../fb374d419588a4632f3f557e76b4b70aebbca790 | 0 {tests => app/tests}/__init__.py | 0 {tests => app/tests}/cache.py | 0 {tests => app/tests}/conftest.py | 0 {tests => app/tests}/conversations.yaml | 0 {tests => app/tests}/llm.py | 0 {tests => app/tests}/local.py | 0 {tests => app/tests}/search.py | 0 {tests => app/tests}/store.py | 0 cicd/version | 1 - helpers/resources.py | 22 ------- {bicep => infra/bicep}/app.bicep | 13 ++-- {bicep => infra/bicep}/main.bicep | 4 +- 89 files changed, 176 insertions(+), 77 deletions(-) rename .funcignore => app/.funcignore (100%) rename {docs => app/docs}/demo.json (100%) rename {docs => app/docs}/demo.mp4 (100%) rename {docs => app/docs}/user_report.jpg (100%) rename {examples => app/examples}/blocklist.csv (100%) rename {examples => app/examples}/blocklist.ipynb (100%) rename function_app.py => app/function_app.py (99%) rename {helpers => app/helpers}/__init__.py (100%) rename {helpers => app/helpers}/call_events.py (88%) rename {helpers => app/helpers}/call_llm.py (100%) rename {helpers => app/helpers}/call_utils.py (100%) rename {helpers => app/helpers}/config.py (96%) rename {helpers => app/helpers}/config_models/__init__.py (100%) rename {helpers => app/helpers}/config_models/ai_search.py (100%) rename {helpers => app/helpers}/config_models/ai_translation.py (100%) rename {helpers => app/helpers}/config_models/cache.py (100%) rename {helpers => app/helpers}/config_models/cognitive_service.py (100%) rename {helpers => app/helpers}/config_models/communication_services.py (100%) rename {helpers => app/helpers}/config_models/conversation.py (99%) rename {helpers => app/helpers}/config_models/database.py (100%) rename {helpers => app/helpers}/config_models/llm.py (98%) rename {helpers => app/helpers}/config_models/monitoring.py (100%) rename {helpers => app/helpers}/config_models/prompts.py (100%) rename {helpers => app/helpers}/config_models/resources.py (100%) rename {helpers => app/helpers}/config_models/root.py (100%) rename {helpers => app/helpers}/config_models/sms.py (100%) rename {helpers => app/helpers}/http.py (97%) rename {helpers => app/helpers}/llm_tools.py (91%) rename {helpers => app/helpers}/llm_utils.py (100%) rename {helpers => app/helpers}/llm_worker.py (100%) rename {helpers => app/helpers}/logging.py (100%) rename {helpers => app/helpers}/monitoring.py (100%) rename {helpers => app/helpers}/pydantic_types/__init__.py (100%) rename {helpers => app/helpers}/pydantic_types/phone_numbers.py (100%) create mode 100644 app/helpers/resources.py rename {helpers => app/helpers}/translation.py (100%) rename host.json => app/host.json (100%) rename local.example.settings.json => app/local.example.settings.json (100%) rename {models => app/models}/__init__.py (100%) rename {models => app/models}/call.py (100%) rename {models => app/models}/claim.py (100%) rename {models => app/models}/message.py (98%) rename {models => app/models}/next.py (100%) rename {models => app/models}/readiness.py (100%) rename {models => app/models}/reminder.py (100%) rename {models => app/models}/synthesis.py (100%) rename {models => app/models}/training.py (100%) rename {persistence => app/persistence}/__init__.py (100%) rename {persistence => app/persistence}/ai_search.py (100%) rename {persistence => app/persistence}/communication_services.py (100%) rename {persistence => app/persistence}/cosmos_db.py (100%) rename {persistence => app/persistence}/icache.py (100%) rename {persistence => app/persistence}/isearch.py (100%) rename {persistence => app/persistence}/isms.py (100%) rename {persistence => app/persistence}/istore.py (100%) rename {persistence => app/persistence}/memory.py (100%) rename {persistence => app/persistence}/redis.py (100%) rename {persistence => app/persistence}/sqlite.py (100%) rename {persistence => app/persistence}/twilio.py (100%) rename {public_website => app/public_website}/list.html.jinja (100%) rename {public_website => app/public_website}/single.html.jinja (100%) rename {public_website => app/public_website}/templates/base.html.jinja (100%) rename {public_website => app/public_website}/templates/call_me.html.jinja (100%) rename pyproject.toml => app/pyproject.toml (100%) rename requirements-dev.txt => app/requirements-dev.txt (100%) rename requirements.txt => app/requirements.txt (100%) rename {resources => app/resources}/lexicon.xml (100%) rename {resources => app/resources}/loading.wav (100%) rename {resources => app/resources}/openapi.json (100%) rename {resources => app/resources}/ready.wav (100%) rename {resources => app/resources}/tiktoken/9b5ad71b2ce5302211f9c61530b329a4922fc6a4 (100%) rename {resources => app/resources}/tiktoken/README.md (100%) rename {resources => app/resources}/tiktoken/fb374d419588a4632f3f557e76b4b70aebbca790 (100%) rename {tests => app/tests}/__init__.py (100%) rename {tests => app/tests}/cache.py (100%) rename {tests => app/tests}/conftest.py (100%) rename {tests => app/tests}/conversations.yaml (100%) rename {tests => app/tests}/llm.py (100%) rename {tests => app/tests}/local.py (100%) rename {tests => app/tests}/search.py (100%) rename {tests => app/tests}/store.py (100%) delete mode 160000 cicd/version delete mode 100644 helpers/resources.py rename {bicep => infra/bicep}/app.bicep (97%) rename {bicep => infra/bicep}/main.bicep (94%) diff --git a/.gitignore b/.gitignore index 5a25a302..04eec8cd 100644 --- a/.gitignore +++ b/.gitignore @@ -756,3 +756,6 @@ sbom-reports/ # Azure dev tunnels local installation DevTunnels/ + +# Fake data for demos +app/helpers/db \ No newline at end of file diff --git a/.python-version b/.python-version index ff7de600..9daeafb9 100644 --- a/.python-version +++ b/.python-version @@ -1 +1 @@ -callcenterai311 +test diff --git a/Makefile b/Makefile index 1fd42905..9944eb3d 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,5 @@ +# Global Path +app_config_folder = "configs" # Versioning version_full ?= $(shell $(MAKE) --silent version-full) version_small ?= $(shell $(MAKE) --silent version) @@ -13,21 +15,23 @@ search_location := francecentral # Sanitize variables name_sanitized := $(shell echo $(name) | tr '[:upper:]' '[:lower:]') # App configuration -bot_phone_number ?= $(shell cat config.yaml | yq '.communication_services.phone_number') +bot_phone_number ?= $(shell cat $(app_config_folder)/config.yaml | yq '.communication_services.phone_number') event_subscription_name ?= $(shell echo '$(name_sanitized)-$(bot_phone_number)' | tr -dc '[:alnum:]-') -twilio_phone_number ?= $(shell cat config.yaml | yq '.sms.twilio.phone_number') +twilio_phone_number ?= $(shell cat $(app_config_folder)/config.yaml | yq '.sms.twilio.phone_number') # Bicep outputs app_url ?= $(shell az deployment sub show --name $(name_sanitized) | yq '.properties.outputs["appUrl"].value') blob_storage_public_name ?= $(shell az deployment sub show --name $(name_sanitized) | yq '.properties.outputs["blobStoragePublicName"].value') communication_id ?= $(shell az deployment sub show --name $(name_sanitized) | yq '.properties.outputs["communicationId"].value') function_app_name ?= $(shell az deployment sub show --name $(name_sanitized) | yq '.properties.outputs["functionAppName"].value') log_analytics_workspace_customer_id ?= $(shell az deployment sub show --name $(name_sanitized) | yq '.properties.outputs["logAnalyticsWorkspaceName"].value') +# Resource Infra deployment +enable_content_filter := false version: - @bash ./cicd/version/version.sh -g . -c + @bash ./infra/cicd/version/version.sh -g . -c version-full: - @bash ./cicd/version/version.sh -g . -c -m + @bash ./infra/cicd/version/version.sh -g . -c -m brew: @echo "➡️ Installing yq..." @@ -66,12 +70,12 @@ upgrade: python3 -m pip install --upgrade pip-tools @echo "➡️ Compiling app requirements..." - pip-compile \ + cd ./app && pip-compile \ --output-file requirements.txt \ pyproject.toml @echo "➡️ Compiling dev requirements..." - pip-compile \ + cd ./app && pip-compile \ --extra dev \ --output-file requirements-dev.txt \ pyproject.toml @@ -81,31 +85,31 @@ upgrade: test: @echo "➡️ Test generic formatter (Black)..." - python3 -m black --check . + cd ./app && python3 -m black --check . @echo "➡️ Test import formatter (isort)..." - python3 -m isort --jobs -1 --check . + cd ./app && python3 -m isort --jobs -1 --check . @echo "➡️ Test dependencies issues (deptry)..." - python3 -m deptry . + cd ./app && python3 -m deptry . @echo "➡️ Test code smells (Pylint)..." - python3 -m pylint . + cd ./app && python3 -m pylint . @echo "➡️ Test types (Pyright)..." - python3 -m pyright . + cd ./app && python3 -m pyright . @echo "➡️ Unit tests (Pytest)..." - PUBLIC_DOMAIN=dummy pytest \ + cd ./app && PUBLIC_DOMAIN=dummy pytest \ --junit-xml=test-reports/$(version_full).xml \ tests/*.py lint: @echo "➡️ Fix with generic formatter (Black)..." - python3 -m black . + cd ./app && python3 -m black . @echo "➡️ Fix with import formatter (isort)..." - python3 -m isort --jobs -1 . + cd ./app && python3 -m isort --jobs -1 . tunnel: @echo "➡️ Creating tunnel..." @@ -118,7 +122,7 @@ tunnel: devtunnel host $(tunnel_name) dev: - VERSION=$(version_full) PUBLIC_DOMAIN=$(tunnel_url) func start + cd ./app && VERSION=$(version_full) PUBLIC_DOMAIN=$(tunnel_url) func start --python deploy: $(MAKE) deploy-bicep @@ -150,7 +154,8 @@ deploy-bicep: 'openaiLocation=$(openai_location)' \ 'searchLocation=$(search_location)' \ 'version=$(version_full)' \ - --template-file bicep/main.bicep \ + 'enableContentFilter=$(enable_content_filter)' \ + --template-file infra/bicep/main.bicep \ --name $(name_sanitized) deploy-post: @@ -173,7 +178,7 @@ destroy: az deployment sub delete --name $(name_sanitized) logs: - func azure functionapp logstream $(function_app_name) \ + cd ./app && func azure functionapp logstream $(function_app_name) \ --browser twilio-register: @@ -183,7 +188,7 @@ twilio-register: copy-resources: @echo "📦 Copying resources to Azure storage account..." - az storage blob upload-batch \ + cd ./app && az storage blob upload-batch \ --account-name $(name_sanitized) \ --destination '$$web' \ --no-progress \ diff --git a/README.md b/README.md index ee6c338b..119cc5a5 100644 --- a/README.md +++ b/README.md @@ -281,10 +281,10 @@ Now that the prerequisites are configured (local + Azure), the deployment can be #### 1. Create the light config file -File is named `config.yaml`: +File is named `configs/config.yaml`: ```yaml -# config.yaml +# configs/config.yaml conversation: initiate: # Phone number the bot will transfer the call to if customer asks for a human agent @@ -348,10 +348,10 @@ make logs name=my-rg-name > - Copy the configuration from the Azure Function App to your local machine by using the content of the `CONFIG_JSON` application setting > - Then convert it to YAML format -File is named `config.yaml`: +File is named `configs/config.yaml`: ```yaml -# config.yaml +# configs/config.yaml resources: public_url: https://xxx.blob.core.windows.net/public @@ -476,7 +476,7 @@ The bot can be used in multiple languages. It can understand the language the us See the [list of supported languages](https://learn.microsoft.com/en-us/azure/ai-services/speech-service/language-support?tabs=tts#supported-languages) for the Text-to-Speech service. ```yaml -# config.yaml +# configs/config.yaml [...] conversation: @@ -495,7 +495,7 @@ conversation: If you built and deployed an [Azure Speech Custom Neural Voice (CNV)](https://learn.microsoft.com/en-us/azure/ai-services/speech-service/custom-neural-voice), add field `custom_voice_endpoint_id` on the language configuration: ```yaml -# config.yaml +# configs/config.yaml [...] conversation: @@ -535,7 +535,7 @@ Finally, an optional description can be provided. The description must be short Default schema, for inbound calls, is defined in the configuration: ```yaml -# config.yaml +# configs/config.yaml [...] conversation: @@ -563,7 +563,7 @@ This solution is priviledged instead of overriding the LLM prompt. Default task, for inbound calls, is defined in the configuration: ```yaml -# config.yaml +# configs/config.yaml [...] conversation: @@ -576,10 +576,10 @@ Task can be customized for each call, by adding the `task` field in the `POST /c ### Customize the conversation -Conversation options are documented in [conversation.py](helpers/config_models/conversation.py). The options can all be overridden in `config.yaml` file: +Conversation options are documented in [conversation.py](helpers/config_models/conversation.py). The options can all be overridden in `configs/config.yaml` file: ```yaml -# config.yaml +# configs/config.yaml [...] conversation: @@ -601,10 +601,10 @@ To use a model compatible with the OpenAI completion API, you need to create an - Model name - Streaming capability -Then, add the following in the `config.yaml` file: +Then, add the following in the `configs/config.yaml` file: ```yaml -# config.yaml +# configs/config.yaml [...] llm: @@ -634,10 +634,10 @@ To use Twilio for SMS, you need to create an account and get the following infor - Auth Token - Phone number -Then, add the following in the `config.yaml` file: +Then, add the following in the `configs/config.yaml` file: ```yaml -# config.yaml +# configs/config.yaml [...] sms: @@ -655,7 +655,7 @@ Note that prompt examples contains `{xxx}` placeholders. These placeholders are Be sure to write all the TTS prompts in English. This language is used as a pivot language for the conversation translation. ```yaml -# config.yaml +# configs/config.yaml [...] prompts: diff --git a/.funcignore b/app/.funcignore similarity index 100% rename from .funcignore rename to app/.funcignore diff --git a/docs/demo.json b/app/docs/demo.json similarity index 100% rename from docs/demo.json rename to app/docs/demo.json diff --git a/docs/demo.mp4 b/app/docs/demo.mp4 similarity index 100% rename from docs/demo.mp4 rename to app/docs/demo.mp4 diff --git a/docs/user_report.jpg b/app/docs/user_report.jpg similarity index 100% rename from docs/user_report.jpg rename to app/docs/user_report.jpg diff --git a/examples/blocklist.csv b/app/examples/blocklist.csv similarity index 100% rename from examples/blocklist.csv rename to app/examples/blocklist.csv diff --git a/examples/blocklist.ipynb b/app/examples/blocklist.ipynb similarity index 100% rename from examples/blocklist.ipynb rename to app/examples/blocklist.ipynb diff --git a/function_app.py b/app/function_app.py similarity index 99% rename from function_app.py rename to app/function_app.py index 15d31b48..3531eb0e 100644 --- a/function_app.py +++ b/app/function_app.py @@ -673,6 +673,8 @@ async def _trainings_callback(_call: CallStateModel) -> None: await on_call_connected( call=call, client=automation_client, + post_callback=_post_callback, + trainings_callback=_trainings_callback, ) elif event_type == "Microsoft.Communication.CallDisconnected": # Call hung up diff --git a/helpers/__init__.py b/app/helpers/__init__.py similarity index 100% rename from helpers/__init__.py rename to app/helpers/__init__.py diff --git a/helpers/call_events.py b/app/helpers/call_events.py similarity index 88% rename from helpers/call_events.py rename to app/helpers/call_events.py index 099fb30f..ac2c0886 100644 --- a/helpers/call_events.py +++ b/app/helpers/call_events.py @@ -81,6 +81,8 @@ async def on_new_call( async def on_call_connected( call: CallStateModel, client: CallAutomationClient, + post_callback: Callable[[CallStateModel], Awaitable[None]], + trainings_callback: Callable[[CallStateModel], Awaitable[None]], ) -> None: logger.info("Call connected, asking for language") call.recognition_retry = 0 # Reset recognition retry counter @@ -91,14 +93,47 @@ async def on_call_connected( persona=MessagePersonaEnum.HUMAN, ) ) - await asyncio.gather( - _handle_ivr_language( - call=call, client=client - ), # First, every time a call is answered, confirm the language - _db.call_aset( - call - ), # save in DB allowing SMS answers to be more "in-sync", should be quick enough to be in sync with the next message - ) + if CONFIG.conversation.initiate.enable_language_choice: + await asyncio.gather( + _handle_ivr_language( + call=call, client=client + ), # First, every time a call is answered, confirm the language + _db.call_aset(call) # save in DB allowing SMS answers to be more "in-sync", should be quick enough to be in sync with the next message + ) + else: + persist_coro = _db.call_aset(call) + if len(call.messages) <= 1: # First call, or only the call action + await asyncio.gather( + handle_recognize_text( + call=call, + client=client, + text=await CONFIG.prompts.tts.hello(call), + ), # First, greet the user + persist_coro, # Second, persist language change for next messages, should be quick enough to be in sync with the next message + load_llm_chat( + call=call, + client=client, + post_callback=post_callback, + trainings_callback=trainings_callback, + ), # Third, the LLM should be loaded to continue the conversation + ) # All in parallel to lower the response latency + + else: # Returning call + await asyncio.gather( + handle_recognize_text( + call=call, + client=client, + style=MessageStyleEnum.CHEERFUL, + text=await CONFIG.prompts.tts.welcome_back(call), + ), # First, welcome back the user + persist_coro, # Second, persist language change for next messages, should be quick enough to be in sync with the next message + load_llm_chat( + call=call, + client=client, + post_callback=post_callback, + trainings_callback=trainings_callback, + ), # Third, the LLM should be loaded to continue the conversation + ) @tracer.start_as_current_span("on_call_disconnected") @@ -439,10 +474,12 @@ async def on_end_call( ) return + actions = [_intelligence_next(call)] + if "send_sms" not in CONFIG.llm.excluded_llm_tools: + actions.append(_intelligence_sms(call)) + actions.append(_intelligence_synthesis(call)) await asyncio.gather( - _intelligence_next(call), - _intelligence_sms(call), - _intelligence_synthesis(call), + *actions ) diff --git a/helpers/call_llm.py b/app/helpers/call_llm.py similarity index 100% rename from helpers/call_llm.py rename to app/helpers/call_llm.py diff --git a/helpers/call_utils.py b/app/helpers/call_utils.py similarity index 100% rename from helpers/call_utils.py rename to app/helpers/call_utils.py diff --git a/helpers/config.py b/app/helpers/config.py similarity index 96% rename from helpers/config.py rename to app/helpers/config.py index a6b9c276..fa6bd97d 100644 --- a/helpers/config.py +++ b/app/helpers/config.py @@ -19,7 +19,7 @@ class ConfigBadFormat(Exception): def load_config() -> RootModel: config: Optional[RootModel] = None config_env = "CONFIG_JSON" - config_file = "config.yaml" + config_file = "../../configs/config.yaml" if config_env in environ: config = RootModel.model_validate_json(environ[config_env]) diff --git a/helpers/config_models/__init__.py b/app/helpers/config_models/__init__.py similarity index 100% rename from helpers/config_models/__init__.py rename to app/helpers/config_models/__init__.py diff --git a/helpers/config_models/ai_search.py b/app/helpers/config_models/ai_search.py similarity index 100% rename from helpers/config_models/ai_search.py rename to app/helpers/config_models/ai_search.py diff --git a/helpers/config_models/ai_translation.py b/app/helpers/config_models/ai_translation.py similarity index 100% rename from helpers/config_models/ai_translation.py rename to app/helpers/config_models/ai_translation.py diff --git a/helpers/config_models/cache.py b/app/helpers/config_models/cache.py similarity index 100% rename from helpers/config_models/cache.py rename to app/helpers/config_models/cache.py diff --git a/helpers/config_models/cognitive_service.py b/app/helpers/config_models/cognitive_service.py similarity index 100% rename from helpers/config_models/cognitive_service.py rename to app/helpers/config_models/cognitive_service.py diff --git a/helpers/config_models/communication_services.py b/app/helpers/config_models/communication_services.py similarity index 100% rename from helpers/config_models/communication_services.py rename to app/helpers/config_models/communication_services.py diff --git a/helpers/config_models/conversation.py b/app/helpers/config_models/conversation.py similarity index 99% rename from helpers/config_models/conversation.py rename to app/helpers/config_models/conversation.py index da63d611..01ae9bf3 100644 --- a/helpers/config_models/conversation.py +++ b/app/helpers/config_models/conversation.py @@ -128,6 +128,7 @@ class WorkflowInitiateModel(BaseModel): ge=0.75, le=1.25, ) + enable_language_choice: bool = True # add language choice option task: str = ( "Helping the customer to file an insurance claim. The customer is probably calling because they have a problem with something covered by their policy, but it's not certain. The assistant needs information from the customer to complete the claim. The conversation is over when all the data relevant to the case has been collected. Filling in as much information as possible is important for further processing." ) diff --git a/helpers/config_models/database.py b/app/helpers/config_models/database.py similarity index 100% rename from helpers/config_models/database.py rename to app/helpers/config_models/database.py diff --git a/helpers/config_models/llm.py b/app/helpers/config_models/llm.py similarity index 98% rename from helpers/config_models/llm.py rename to app/helpers/config_models/llm.py index f4cc1f50..930739cc 100644 --- a/helpers/config_models/llm.py +++ b/app/helpers/config_models/llm.py @@ -105,6 +105,7 @@ def selected(self) -> Union[AzureOpenaiPlatformModel, OpenaiPlatformModel]: class LlmModel(BaseModel): + excluded_llm_tools: Optional[list] = [] fast: SelectedPlatformModel = Field( serialization_alias="backup", # Backwards compatibility with v6 ) diff --git a/helpers/config_models/monitoring.py b/app/helpers/config_models/monitoring.py similarity index 100% rename from helpers/config_models/monitoring.py rename to app/helpers/config_models/monitoring.py diff --git a/helpers/config_models/prompts.py b/app/helpers/config_models/prompts.py similarity index 100% rename from helpers/config_models/prompts.py rename to app/helpers/config_models/prompts.py diff --git a/helpers/config_models/resources.py b/app/helpers/config_models/resources.py similarity index 100% rename from helpers/config_models/resources.py rename to app/helpers/config_models/resources.py diff --git a/helpers/config_models/root.py b/app/helpers/config_models/root.py similarity index 100% rename from helpers/config_models/root.py rename to app/helpers/config_models/root.py diff --git a/helpers/config_models/sms.py b/app/helpers/config_models/sms.py similarity index 100% rename from helpers/config_models/sms.py rename to app/helpers/config_models/sms.py diff --git a/helpers/http.py b/app/helpers/http.py similarity index 97% rename from helpers/http.py rename to app/helpers/http.py index b993690b..6d5bef87 100644 --- a/helpers/http.py +++ b/app/helpers/http.py @@ -47,7 +47,7 @@ async def aiohttp_session() -> ClientSession: cookie_jar=await _aiohttp_cookie_jar(), trust_env=True, # Performance - connector=TCPConnector(resolver=AsyncResolver()), + #connector=TCPConnector(resolver=AsyncResolver()), # Reliability timeout=ClientTimeout( connect=5, diff --git a/helpers/llm_tools.py b/app/helpers/llm_tools.py similarity index 91% rename from helpers/llm_tools.py rename to app/helpers/llm_tools.py index 202d6538..a7e76512 100644 --- a/helpers/llm_tools.py +++ b/app/helpers/llm_tools.py @@ -582,6 +582,59 @@ async def speech_lang( # LLM confirmation return f"Voice language set to {lang} (was {initial_lang})" + # This is for SG hackathon demo: authentification group + async def load_customer_data( + self, + customer_phone_number: Annotated[ + str, + """ + Phone number of the customer setting from the context of the call. + + # Rules + - It is phone number in french format + - Always get it from context + + # Examples + - +33601020304 + - +33708091011 + - +33782820096 + """ + ] + ) -> str: + """ + Use to get claim customer data based on its mobile phone + + # Behavior + 1. Get customer info + 2. Return a customer data + + # Usage examples + - Assistant want to identity the customer + - Customer need to be identify by assistant + """ + if len(customer_phone_number) < 12: # Check if customer phone number is valid + return f"Customer phone number <{customer_phone_number}> is not valid" + + import json + from pathlib import Path + # read cutomer data from local file + customer_phone_number = customer_phone_number[-9:] + customer_data_file = Path(__file__).parent / "db" / "data_customer.json" + with customer_data_file.open(mode="r", encoding="utf-8") as file_fp: + customer_datas = json.load(file_fp) + logger.info( + f"Customers data loaded {customer_datas} | customer phone number {customer_phone_number}" + ) + customer_data = customer_datas.get(customer_phone_number) + if not customer_data: # Check if customer data is found + return f"Customer data are not found in our Database" + + # format customer data + format_customer_data = "\n".join([f"{x}: {y}" for x, y in customer_data.items()]) + + # return customer data + return f"# Customer data are: \n {format_customer_data}" + @staticmethod async def to_openai(call: CallStateModel) -> list[ChatCompletionToolParam]: return await asyncio.gather( @@ -589,5 +642,6 @@ async def to_openai(call: CallStateModel) -> list[ChatCompletionToolParam]: function_schema(type, call=call) for name, type in getmembers(LlmPlugins, isfunction) if not name.startswith("_") and name != "to_openai" + and name not in CONFIG.llm.excluded_llm_tools # try to exclude unused functions ] ) diff --git a/helpers/llm_utils.py b/app/helpers/llm_utils.py similarity index 100% rename from helpers/llm_utils.py rename to app/helpers/llm_utils.py diff --git a/helpers/llm_worker.py b/app/helpers/llm_worker.py similarity index 100% rename from helpers/llm_worker.py rename to app/helpers/llm_worker.py diff --git a/helpers/logging.py b/app/helpers/logging.py similarity index 100% rename from helpers/logging.py rename to app/helpers/logging.py diff --git a/helpers/monitoring.py b/app/helpers/monitoring.py similarity index 100% rename from helpers/monitoring.py rename to app/helpers/monitoring.py diff --git a/helpers/pydantic_types/__init__.py b/app/helpers/pydantic_types/__init__.py similarity index 100% rename from helpers/pydantic_types/__init__.py rename to app/helpers/pydantic_types/__init__.py diff --git a/helpers/pydantic_types/phone_numbers.py b/app/helpers/pydantic_types/phone_numbers.py similarity index 100% rename from helpers/pydantic_types/phone_numbers.py rename to app/helpers/pydantic_types/phone_numbers.py diff --git a/app/helpers/resources.py b/app/helpers/resources.py new file mode 100644 index 00000000..a5faafbf --- /dev/null +++ b/app/helpers/resources.py @@ -0,0 +1,12 @@ +from functools import lru_cache +from pathlib import Path + + +@lru_cache # Cache results in memory as resources are not expected to change +def resources_dir(folder: str) -> str: + """ + Get the absolute path to the resources folder. + """ + app_path = Path(__file__).parent.parent + resources_dir_path = app_path / "resources" / folder + return str(resources_dir_path.absolute()) diff --git a/helpers/translation.py b/app/helpers/translation.py similarity index 100% rename from helpers/translation.py rename to app/helpers/translation.py diff --git a/host.json b/app/host.json similarity index 100% rename from host.json rename to app/host.json diff --git a/local.example.settings.json b/app/local.example.settings.json similarity index 100% rename from local.example.settings.json rename to app/local.example.settings.json diff --git a/models/__init__.py b/app/models/__init__.py similarity index 100% rename from models/__init__.py rename to app/models/__init__.py diff --git a/models/call.py b/app/models/call.py similarity index 100% rename from models/call.py rename to app/models/call.py diff --git a/models/claim.py b/app/models/claim.py similarity index 100% rename from models/claim.py rename to app/models/claim.py diff --git a/models/message.py b/app/models/message.py similarity index 98% rename from models/message.py rename to app/models/message.py index 28e30684..2fcc39e0 100644 --- a/models/message.py +++ b/app/models/message.py @@ -148,8 +148,10 @@ def _available_function_names() -> list[str]: from helpers.llm_tools import ( # pylint: disable=import-outside-toplevel LlmPlugins, ) + from helpers.config import CONFIG - return [name for name, _ in getmembers(LlmPlugins, isfunction)] + return [name for name, _ in getmembers(LlmPlugins, isfunction) + if name not in CONFIG.llm.excluded_llm_tools] class MessageModel(BaseModel): diff --git a/models/next.py b/app/models/next.py similarity index 100% rename from models/next.py rename to app/models/next.py diff --git a/models/readiness.py b/app/models/readiness.py similarity index 100% rename from models/readiness.py rename to app/models/readiness.py diff --git a/models/reminder.py b/app/models/reminder.py similarity index 100% rename from models/reminder.py rename to app/models/reminder.py diff --git a/models/synthesis.py b/app/models/synthesis.py similarity index 100% rename from models/synthesis.py rename to app/models/synthesis.py diff --git a/models/training.py b/app/models/training.py similarity index 100% rename from models/training.py rename to app/models/training.py diff --git a/persistence/__init__.py b/app/persistence/__init__.py similarity index 100% rename from persistence/__init__.py rename to app/persistence/__init__.py diff --git a/persistence/ai_search.py b/app/persistence/ai_search.py similarity index 100% rename from persistence/ai_search.py rename to app/persistence/ai_search.py diff --git a/persistence/communication_services.py b/app/persistence/communication_services.py similarity index 100% rename from persistence/communication_services.py rename to app/persistence/communication_services.py diff --git a/persistence/cosmos_db.py b/app/persistence/cosmos_db.py similarity index 100% rename from persistence/cosmos_db.py rename to app/persistence/cosmos_db.py diff --git a/persistence/icache.py b/app/persistence/icache.py similarity index 100% rename from persistence/icache.py rename to app/persistence/icache.py diff --git a/persistence/isearch.py b/app/persistence/isearch.py similarity index 100% rename from persistence/isearch.py rename to app/persistence/isearch.py diff --git a/persistence/isms.py b/app/persistence/isms.py similarity index 100% rename from persistence/isms.py rename to app/persistence/isms.py diff --git a/persistence/istore.py b/app/persistence/istore.py similarity index 100% rename from persistence/istore.py rename to app/persistence/istore.py diff --git a/persistence/memory.py b/app/persistence/memory.py similarity index 100% rename from persistence/memory.py rename to app/persistence/memory.py diff --git a/persistence/redis.py b/app/persistence/redis.py similarity index 100% rename from persistence/redis.py rename to app/persistence/redis.py diff --git a/persistence/sqlite.py b/app/persistence/sqlite.py similarity index 100% rename from persistence/sqlite.py rename to app/persistence/sqlite.py diff --git a/persistence/twilio.py b/app/persistence/twilio.py similarity index 100% rename from persistence/twilio.py rename to app/persistence/twilio.py diff --git a/public_website/list.html.jinja b/app/public_website/list.html.jinja similarity index 100% rename from public_website/list.html.jinja rename to app/public_website/list.html.jinja diff --git a/public_website/single.html.jinja b/app/public_website/single.html.jinja similarity index 100% rename from public_website/single.html.jinja rename to app/public_website/single.html.jinja diff --git a/public_website/templates/base.html.jinja b/app/public_website/templates/base.html.jinja similarity index 100% rename from public_website/templates/base.html.jinja rename to app/public_website/templates/base.html.jinja diff --git a/public_website/templates/call_me.html.jinja b/app/public_website/templates/call_me.html.jinja similarity index 100% rename from public_website/templates/call_me.html.jinja rename to app/public_website/templates/call_me.html.jinja diff --git a/pyproject.toml b/app/pyproject.toml similarity index 100% rename from pyproject.toml rename to app/pyproject.toml diff --git a/requirements-dev.txt b/app/requirements-dev.txt similarity index 100% rename from requirements-dev.txt rename to app/requirements-dev.txt diff --git a/requirements.txt b/app/requirements.txt similarity index 100% rename from requirements.txt rename to app/requirements.txt diff --git a/resources/lexicon.xml b/app/resources/lexicon.xml similarity index 100% rename from resources/lexicon.xml rename to app/resources/lexicon.xml diff --git a/resources/loading.wav b/app/resources/loading.wav similarity index 100% rename from resources/loading.wav rename to app/resources/loading.wav diff --git a/resources/openapi.json b/app/resources/openapi.json similarity index 100% rename from resources/openapi.json rename to app/resources/openapi.json diff --git a/resources/ready.wav b/app/resources/ready.wav similarity index 100% rename from resources/ready.wav rename to app/resources/ready.wav diff --git a/resources/tiktoken/9b5ad71b2ce5302211f9c61530b329a4922fc6a4 b/app/resources/tiktoken/9b5ad71b2ce5302211f9c61530b329a4922fc6a4 similarity index 100% rename from resources/tiktoken/9b5ad71b2ce5302211f9c61530b329a4922fc6a4 rename to app/resources/tiktoken/9b5ad71b2ce5302211f9c61530b329a4922fc6a4 diff --git a/resources/tiktoken/README.md b/app/resources/tiktoken/README.md similarity index 100% rename from resources/tiktoken/README.md rename to app/resources/tiktoken/README.md diff --git a/resources/tiktoken/fb374d419588a4632f3f557e76b4b70aebbca790 b/app/resources/tiktoken/fb374d419588a4632f3f557e76b4b70aebbca790 similarity index 100% rename from resources/tiktoken/fb374d419588a4632f3f557e76b4b70aebbca790 rename to app/resources/tiktoken/fb374d419588a4632f3f557e76b4b70aebbca790 diff --git a/tests/__init__.py b/app/tests/__init__.py similarity index 100% rename from tests/__init__.py rename to app/tests/__init__.py diff --git a/tests/cache.py b/app/tests/cache.py similarity index 100% rename from tests/cache.py rename to app/tests/cache.py diff --git a/tests/conftest.py b/app/tests/conftest.py similarity index 100% rename from tests/conftest.py rename to app/tests/conftest.py diff --git a/tests/conversations.yaml b/app/tests/conversations.yaml similarity index 100% rename from tests/conversations.yaml rename to app/tests/conversations.yaml diff --git a/tests/llm.py b/app/tests/llm.py similarity index 100% rename from tests/llm.py rename to app/tests/llm.py diff --git a/tests/local.py b/app/tests/local.py similarity index 100% rename from tests/local.py rename to app/tests/local.py diff --git a/tests/search.py b/app/tests/search.py similarity index 100% rename from tests/search.py rename to app/tests/search.py diff --git a/tests/store.py b/app/tests/store.py similarity index 100% rename from tests/store.py rename to app/tests/store.py diff --git a/cicd/version b/cicd/version deleted file mode 160000 index ae933dab..00000000 --- a/cicd/version +++ /dev/null @@ -1 +0,0 @@ -Subproject commit ae933dabf27bbece481f59c80c659ea502bf1878 diff --git a/helpers/resources.py b/helpers/resources.py deleted file mode 100644 index 71f780bb..00000000 --- a/helpers/resources.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -from functools import lru_cache -from os import path -from pathlib import Path - - -@lru_cache # Cache results in memory as resources are not expected to change -def resources_dir(folder: str) -> str: - """ - Get the absolute path to the resources folder. - """ - return str( - Path( - path.join( - os.path.abspath(os.getcwd()), - "resources", - folder, - ) - ) - .resolve() - .absolute() - ) diff --git a/bicep/app.bicep b/infra/bicep/app.bicep similarity index 97% rename from bicep/app.bicep rename to infra/bicep/app.bicep index 7a392385..91118fde 100644 --- a/bicep/app.bicep +++ b/infra/bicep/app.bicep @@ -19,6 +19,7 @@ param openaiLocation string param searchLocation string param tags object param version string +param enableContentFilter bool var appName = 'call-center-ai' var prefix = deployment().name @@ -28,7 +29,7 @@ var llmFastModelFullName = toLower('${llmFastModel}-${llmFastVersion}') var llmSlowModelFullName = toLower('${llmSlowModel}-${llmSlowVersion}') var embeddingModelFullName = toLower('${embeddingModel}-${embeddingVersion}') var cosmosContainerName = 'calls-v3' // Third schema version -var localConfig = loadYamlContent('../config.yaml') +var localConfig = loadYamlContent('../../configs/config.yaml') var phonenumberSanitized = replace(localConfig.communication_services.phone_number, '+', '') var config = { public_domain: appUrl @@ -50,6 +51,7 @@ var config = { bot_company: localConfig.conversation.initiate.bot_company bot_name: localConfig.conversation.initiate.bot_name lang: localConfig.conversation.initiate.lang + enable_language_choice: localConfig.conversation.initiate.enable_language_choice } } communication_services: { @@ -67,6 +69,7 @@ var config = { endpoint: cognitiveCommunication.properties.endpoint } llm: { + excluded_llm_tools: localConfig.llm.excluded_llm_tools fast: { mode: 'azure_openai' azure_openai: { @@ -463,7 +466,7 @@ resource cognitiveOpenai 'Microsoft.CognitiveServices/accounts@2024-04-01-previe } } -resource contentfilter 'Microsoft.CognitiveServices/accounts/raiPolicies@2024-04-01-preview' = { +resource contentfilter 'Microsoft.CognitiveServices/accounts/raiPolicies@2024-04-01-preview' = if (enableContentFilter) { parent: cognitiveOpenai name: 'disabled' tags: tags @@ -560,7 +563,7 @@ resource llmSlow 'Microsoft.CognitiveServices/accounts/deployments@2024-04-01-pr name: llmSlowDeploymentType } properties: { - raiPolicyName: contentfilter.name + raiPolicyName: (enableContentFilter? contentfilter.name: null) versionUpgradeOption: 'NoAutoUpgrade' model: { format: 'OpenAI' @@ -579,7 +582,7 @@ resource llmFast 'Microsoft.CognitiveServices/accounts/deployments@2024-04-01-pr name: llmFastDeploymentType } properties: { - raiPolicyName: contentfilter.name + raiPolicyName: (enableContentFilter? contentfilter.name: null) versionUpgradeOption: 'NoAutoUpgrade' model: { format: 'OpenAI' @@ -601,7 +604,7 @@ resource embedding 'Microsoft.CognitiveServices/accounts/deployments@2024-04-01- name: embeddingDeploymentType } properties: { - raiPolicyName: contentfilter.name + raiPolicyName: (enableContentFilter? contentfilter.name: null) versionUpgradeOption: 'NoAutoUpgrade' model: { format: 'OpenAI' diff --git a/bicep/main.bicep b/infra/bicep/main.bicep similarity index 94% rename from bicep/main.bicep rename to infra/bicep/main.bicep index fdef80a1..9a8c4603 100644 --- a/bicep/main.bicep +++ b/infra/bicep/main.bicep @@ -19,6 +19,7 @@ param location string = deployment().location param openaiLocation string param searchLocation string param version string +param enableContentFilter bool = false targetScope = 'subscription' @@ -32,7 +33,7 @@ var tags = { application: 'call-center-ai' instance: instance managed_by: 'Bicep' - sources: 'https://github.com/clemlesne/call-center-ai' + sources: 'https://github.com/microsoft/call-center-ai' version: version } @@ -67,5 +68,6 @@ module app 'app.bicep' = { searchLocation: searchLocation tags: tags version: version + enableContentFilter: enableContentFilter } } From 94125dd9b97a5d87c7f0f1f29a668a8438523314 Mon Sep 17 00:00:00 2001 From: Elvis Mboning Date: Wed, 7 Aug 2024 18:09:00 +0200 Subject: [PATCH 4/5] add docs --- .gitignore | 3 ++- README.md | 22 +++++++++++++++++++++- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 04eec8cd..8ca1d750 100644 --- a/.gitignore +++ b/.gitignore @@ -758,4 +758,5 @@ sbom-reports/ DevTunnels/ # Fake data for demos -app/helpers/db \ No newline at end of file +app/helpers/db +UPDATES.md \ No newline at end of file diff --git a/README.md b/README.md index 119cc5a5..cfe74adc 100644 --- a/README.md +++ b/README.md @@ -404,14 +404,34 @@ ai_translation: endpoint: https://xxx.cognitiveservices.azure.com ``` +- We add the ability to disable language choice in `config.yaml` at the begining of the call with `enable_language_choice`. Default value is `false`. + +```yaml + +conversation: + initiate: + task: "xxx" + agent_phone_number: "xxx" + bot_company: xxx + bot_name: xxx + enable_language_choice: true +``` + #### 2. Run the deployment automation ```zsh + make deploy-bicep deploy-post name=my-rg-name ``` - This will deploy the Azure resources without the API server, allowing you to test the bot locally - Wait for the deployment to finish +- You can disable Azure content safety during the bicep deployment + +```bash + +make deploy-bicep deploy-post name=my-rg-name enable_content_filter=false +``` #### 3. Initialize local function config @@ -442,7 +462,7 @@ Copy `local.example.settings.json` to `local.settings.json`, then fill the requi > ``` ```zsh -make dev +make dev ``` - Code is automatically reloaded on file changes, no need to restart the server From 6628ac5f12664eb3c7fbda0c37d53dc0b610d0ca Mon Sep 17 00:00:00 2001 From: Elvis Mboning Date: Mon, 12 Aug 2024 20:29:35 +0200 Subject: [PATCH 5/5] add docs --- app/helpers/http.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/helpers/http.py b/app/helpers/http.py index 6d5bef87..b2f02f3e 100644 --- a/app/helpers/http.py +++ b/app/helpers/http.py @@ -46,7 +46,7 @@ async def aiohttp_session() -> ClientSession: auto_decompress=False, cookie_jar=await _aiohttp_cookie_jar(), trust_env=True, - # Performance + # Performance / we disable this feature to resolve this issue: https://github.com/microsoft/call-center-ai/issues/275 #connector=TCPConnector(resolver=AsyncResolver()), # Reliability timeout=ClientTimeout(