diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4a76f218..ed8c1beb 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,10 +6,18 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
+## [0.7.5] - 2024-01-17
+### Added
+- Support for larger prompts by storing LLMPromptSummaryTemplate in S3 rather than SSM. By default, the CF templtae will migrate existing SSM prompts to DynamoDB.
+
+### Fixed
+- #125 Updated the pca-aws-sf-bulk-queue-space.py function to correctly count jobs based on IN_PROGRESS as well as QUEUED
+- #224 Updated the pca-aws-sf-bulk-queue-space.py function to correctly count both Transcribe and Transcribe Call Analytics (vs just Transcribe).
+
## [0.7.4] - 2023-12-15
### Added
-- Drag/drop upload from call list page
-- Refresh call summary from call details page
+- Drag/drop upload from call list page.
+- Refresh call summary from call details page.
### Fixed
- Accessibility improvements
@@ -152,7 +160,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Added
- Initial release
-[Unreleased]: https://github.com/aws-samples/amazon-transcribe-post-call-analytics/compare/v0.7.3...develop
+[Unreleased]: https://github.com/aws-samples/amazon-transcribe-post-call-analytics/compare/v0.7.5...develop
+[0.7.5]: https://github.com/aws-samples/amazon-transcribe-post-call-analytics/releases/tag/v0.7.5
+[0.7.4]: https://github.com/aws-samples/amazon-transcribe-post-call-analytics/releases/tag/v0.7.4
[0.7.3]: https://github.com/aws-samples/amazon-transcribe-post-call-analytics/releases/tag/v0.7.3
[0.7.2]: https://github.com/aws-samples/amazon-transcribe-post-call-analytics/releases/tag/v0.7.2
[0.7.1]: https://github.com/aws-samples/amazon-transcribe-post-call-analytics/releases/tag/v0.7.1
diff --git a/VERSION b/VERSION
index ef090a6c..8bd6ba8c 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-0.7.4
\ No newline at end of file
+0.7.5
diff --git a/docs/generative_ai.md b/docs/generative_ai.md
index 8cc70da3..285abfe9 100644
--- a/docs/generative_ai.md
+++ b/docs/generative_ai.md
@@ -13,58 +13,22 @@ PCA also supports 'Generative AI Queries' - which simply means you can ask quest
## Generative AI Insights
-When enabled, PCA can run one or more FM inferences against Amazon Bedrock or Anthropic APIs. The prompt used to generate the insights is configured in a [AWS Systems Manager Parameter Store](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-parameter-store.html). The name of the parameter is `LLMPromptSummaryTemplate`.
+When enabled, PCA can run one or more FM inferences against Amazon Bedrock or Anthropic APIs. The prompt used to generate the insights is stored in DynamoDB. The name of the table contains the string `LLMPromptConfigure`, and the table partition key is `LLMPromptTemplateId`. There are two items in the table, one with the partition key value of `LLMPromptSummaryTemplate` and the other with the partition key value of `LLMPromptQueryTemplate`.
-### Multiple inferences per call
+### Generative AI interactive queries
-The default value for `LLMPromptSummaryTemplate` is a JSON object with key/value pairs, each pair representing the label (key) and prompt (value). During the `Summarize` step, PCA will iterate the keys and run each prompt. PCA will replace `
` tags with newlines, and `{transcript}` is replaced with the call transcript. The key will be used as a header for the value in the "generated insights" section in the PCA UI.
+The item in Dynamo with the key `LLMPromptQueryTemplate` allows you to customize the interactive query prompt as seen in the call details page. You can use this to provide model specific prompts. The default valu is in [Anthropic's prompt format](https://docs.anthropic.com/claude/docs/constructing-a-prompt).
-Below is the default value of `LLMpromptSummaryTemplate`.
-
-```
-{
- "Summary":"
Human: Answer the questions below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
What is a summary of the transcript?
{transcript}
Assistant:",
- "Topic":"
Human: Answer the questions below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
What is the topic of the call? For example, iphone issue, billing issue, cancellation. Only reply with the topic, nothing more.
{transcript}
Assistant:",
- "Product":"
Human: Answer the questions below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
What product did the customer call about? For example, internet, broadband, mobile phone, mobile plans. Only reply with the product, nothing more.
{transcript}
Assistant:",
- "Resolved":"
Human: Answer the questions below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
Did the agent resolve the customer's questions? Only reply with yes or no, nothing more.
{transcript}
Assistant:",
- "Callback":"
Human: Answer the questions below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
Was this a callback? (yes or no) Only reply with yes or no, nothing more.
{transcript}
Assistant:",
- "Politeness":"
Human: Answer the question below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
Was the agent polite and professional? (yes or no) Only reply with yes or no, nothing more.
{transcript}
Assistant:",
- "Actions":"
Human: Answer the question below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
What actions did the Agent take?
{transcript}
Assistant:"
-}
-```
-
-The expected output after the summarize step is a single json object, as a string, that contains all the key/value pairs. For example:
-
-```
-{
- "Summary": "...",
- "Topic": "...",
- "Product": "...",
- "Resolved": "...",
- "Callback": "...",
- "Politeness": "...",
- "Actions": "...",
-}
-```
-
-
-### Single FM Inference
-
-Some LLMs may be able to generate the JSON with one inference, rather than several. Below is an example that we've seen work, but with mixed results.
+The default value is:
```
-
Human: Answer all the questions below, based on the contents of , as a json object with key value pairs. Use the text before the colon as the key, and the answer as the value. If you cannot answer the question, reply with 'n/a'. Only return json. Use gender neutral pronouns. Skip the preamble; go straight into the json.
+
Human: You are an AI chatbot. Carefully read the following transcript within
+and then provide a short answer to the question. If the answer cannot be determined from the transcript or
+the context, then reply saying Sorry, I don't know. Use gender neutral pronouns. Skip the preamble; when you reply, only
+respond with the answer.
-
-
Summary: Summarize the transcript in no more than 5 sentences. Were the caller's needs met during the call?
-
Topic: Topic of the call. Choose from one of these or make one up (iphone issue, billing issue, cancellation)
-
Product: What product did the customer call about? (internet, broadband, mobile phone, mobile plans)
-
Resolved: Did the agent resolve the customer's questions? (yes or no)
-
Callback: Was this a callback? (yes or no)
-
Politeness: Was the agent polite and professional? (yes or no)
-
Actions: What actions did the Agent take?
-
+
{question}
{transcript}
@@ -75,35 +39,30 @@ Some LLMs may be able to generate the JSON with one inference, rather than sever
The `
` tags are replaced with newlines, and `{transcript}` is replaced with the call transcript.
-**Note:** This prompt generates 7 insights in a single inference - summary, topic, product, resolved, callback, agent politeness, and actions.
-The expected output of the inference should be a single JSON object with key-value pairs, similar to above.
+### Generative AI insights
-### Call list default columns
+The item in Dynamo with the key `LLMPromptSummaryTemplate` contains 1 or more attributes. Each attribute is a single prompt that will be invoked for each call analyzed. Each attribute contains an attribute name and value. The attribute name is an integer, followed by a `#`, followed by the name of the insight. The number signifies the order of the insight. For example, `1#Summary` will show up first.
-The call list main screen contains additional pre-defined columns. If the output of the inference contains JSON with the column names (or the names are keys in the multiple inferences per call), the values will propogate to the main call list. The names columns are: `Summary`, `Topic`, `Product`, `Resolved`, `Callback`, `Politeness`, `Actions`. They are also in the default prompt.
+Default attributes:
-## Generative AI Queries
+| Key | Description | Prompt |
+| ----- | -------- | ---------- |
+| `1#Summary` | What is a summary of the transcript? | `
Human: Answer the questions below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
What is a summary of the transcript?
{transcript}
Assistant:` |
+| `2#Topic` | What is the topic of the call? | `
Human: Answer the questions below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
What is the topic of the call? For example, iphone issue, billing issue, cancellation. Only reply with the topic, nothing more.
{transcript}
Assistant:` |
+| `3#Product` | What product did the customer call about? | `
Human: Answer the questions below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
What product did the customer call about? For example, internet, broadband, mobile phone, mobile plans. Only reply with the product, nothing more.
{transcript}
Assistant:` |
+| `4#Resolved` | Did the agent resolve the customer's questions? Only reply with yes or no. | `
Human: Answer the questions below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
Did the agent resolve the customer's questions? Only reply with yes or no, nothing more.
{transcript}
Assistant:` |
+| `5#Callback` | Was this a callback? | `
Human: Answer the questions below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
Was this a callback? (yes or no) Only reply with yes or no, nothing more.
{transcript}
Assistant:` |
+| `6#Politeness` | Was the agent polite and professional? | `
Human: Answer the question below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
Was the agent polite and professional? (yes or no) Only reply with yes or no, nothing more.
{transcript}
Assistant:` |
+| `7#Actions` | What actions did the Agent take? | `
Human: Answer the question below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
What actions did the Agent take?
{transcript}
Assistant:` |
-For interactive queries from within PCA, it uses a different parameter, named `LLMPromptQueryTemplate`. This will only run a single inference per question.
+The `
` tags are replaced with newlines, and `{transcript}` is replaced with the call transcript. Some Bedrock models such as Claude require newlines in specific spots.
-The default value is:
+#### Customizing
-```
-
-
Human: You are an AI chatbot. Carefully read the following transcript within
-and then provide a short answer to the question. If the answer cannot be determined from the transcript or
-the context, then reply saying Sorry, I don't know. Use gender neutral pronouns. Skip the preamble; when you reply, only
-respond with the answer.
-
-
{question}
-
-
-
{transcript}
-
-
-
Assistant:
-```
+You can add your own additional attributes and prompts by editing this item in DynamoDB. Make sure you include an order number and insight name in the attribute name. For example `9#NPS Score`. You can use any of the above prompts as a starting point for crafting a prompt. Do not forget to include `{transcript}` as a placeholder, otherwise your transcript will not be included in the LLM inference!
-The `
` tags are replaced with newlines, and `{transcript}` is replaced with the call transcript.
+### Call list default columns
+
+The call list main screen contains additional pre-defined columns. If the output of the inference contains the column names, the values will propogate to the main call list. The names columns are: `Summary`, `Topic`, `Product`, `Resolved`, `Callback`, `Politeness`, `Actions`. They are also in the default prompt.
diff --git a/pca-main-nokendra.template b/pca-main-nokendra.template
index c2d32d5f..6448beb2 100644
--- a/pca-main-nokendra.template
+++ b/pca-main-nokendra.template
@@ -1,6 +1,6 @@
AWSTemplateFormatVersion: "2010-09-09"
-Description: Amazon Transcribe Post Call Analytics - PCA (v0.7.4) (uksb-1sn29lk73)
+Description: Amazon Transcribe Post Call Analytics - PCA (v0.7.5) (uksb-1sn29lk73)
Parameters:
@@ -758,11 +758,17 @@ Resources:
ServiceToken: !GetAtt TestBedrockModelFunction.Arn
LLMModelId: !Ref SummarizationBedrockModelId
+ LLMPromptConfigure:
+ Type: AWS::CloudFormation::Stack
+ Properties:
+ TemplateURL: pca-server/cfn/lib/llm.template
+
########################################################
# SSM Stack
########################################################
SSM:
Type: AWS::CloudFormation::Stack
+ DependsOn: LLMPromptConfigure
Properties:
TemplateURL: pca-ssm/cfn/ssm.template
Parameters:
@@ -876,6 +882,7 @@ Resources:
- ShouldDeployBedrockBoto3Layer
- !GetAtt BedrockBoto3Layer.Outputs.Boto3Layer
- ''
+ LLMTableName: !GetAtt LLMPromptConfigure.Outputs.LLMTableName
PCAUI:
Type: AWS::CloudFormation::Stack
@@ -911,6 +918,7 @@ Resources:
- ShouldDeployBedrockBoto3Layer
- !GetAtt BedrockBoto3Layer.Outputs.Boto3Layer
- ''
+ LLMTableName: !GetAtt LLMPromptConfigure.Outputs.LLMTableName
PcaDashboards:
Type: AWS::CloudFormation::Stack
@@ -1086,10 +1094,10 @@ Outputs:
Description: Lambda function arn that will generate a string of the entire transcript for custom Lambda functions to use.
Value: !GetAtt PCAServer.Outputs.FetchTranscriptArn
- LLMPromptSummaryTemplateParameter:
- Description: The LLM summary prompt template in SSM Parameter Store - open to customise call summary prompts.
- Value: !Sub "https://${AWS::Region}.console.aws.amazon.com/systems-manager/parameters/${SSM.Outputs.LLMPromptSummaryTemplateParameter}"
-
- LLMPromptQueryTemplateParameter:
- Description: The LLM query prompt template in SSM Parameter Store - open to customise query prompts.
- Value: !Sub "https://${AWS::Region}.console.aws.amazon.com/systems-manager/parameters/${SSM.Outputs.LLMPromptQueryTemplateParameter}"
+ LLMPromptSummaryTemplate:
+ Description: The LLM summary prompt template in DynamoDB Table - open to customise summary prompts.
+ Value: !Sub "https://${AWS::Region}.console.aws.amazon.com/dynamodbv2/home?region=${AWS::Region}#edit-item?itemMode=2&pk=LLMPromptSummaryTemplate&route=ROUTE_ITEM_EXPLORER&sk=&table=${LLMPromptConfigure.Outputs.LLMTableName}"
+
+ LLMPromptQueryTemplate:
+ Description: The LLM query prompt template in DynamoDB Table - open to customise query prompts.
+ Value: !Sub "https://${AWS::Region}.console.aws.amazon.com/dynamodbv2/home?region=${AWS::Region}#edit-item?itemMode=2&pk=LLMPromptQueryTemplate&route=ROUTE_ITEM_EXPLORER&sk=&table=${LLMPromptConfigure.Outputs.LLMTableName}"
diff --git a/pca-main.template b/pca-main.template
index 65a45ee3..5a137dfb 100644
--- a/pca-main.template
+++ b/pca-main.template
@@ -1,6 +1,6 @@
AWSTemplateFormatVersion: "2010-09-09"
-Description: Amazon Transcribe Post Call Analytics - PCA (v0.7.4) (uksb-1sn29lk73)
+Description: Amazon Transcribe Post Call Analytics - PCA (v0.7.5) (uksb-1sn29lk73)
Parameters:
@@ -889,12 +889,18 @@ Resources:
Properties:
ServiceToken: !GetAtt TestBedrockModelFunction.Arn
LLMModelId: !Ref SummarizationBedrockModelId
-
+
+ LLMPromptConfigure:
+ Type: AWS::CloudFormation::Stack
+ Properties:
+ TemplateURL: pca-server/cfn/lib/llm.template
+
########################################################
# SSM Stack
########################################################
SSM:
Type: AWS::CloudFormation::Stack
+ DependsOn: LLMPromptConfigure
Properties:
TemplateURL: pca-ssm/cfn/ssm.template
Parameters:
@@ -1008,6 +1014,7 @@ Resources:
- ShouldDeployBedrockBoto3Layer
- !GetAtt BedrockBoto3Layer.Outputs.Boto3Layer
- ''
+ LLMTableName: !GetAtt LLMPromptConfigure.Outputs.LLMTableName
PCAUI:
Type: AWS::CloudFormation::Stack
@@ -1043,6 +1050,7 @@ Resources:
- ShouldDeployBedrockBoto3Layer
- !GetAtt BedrockBoto3Layer.Outputs.Boto3Layer
- ''
+ LLMTableName: !GetAtt LLMPromptConfigure.Outputs.LLMTableName
MediaSearchFinder:
Type: AWS::CloudFormation::Stack
@@ -1232,10 +1240,10 @@ Outputs:
Description: Lambda function arn that will generate a string of the entire transcript for custom Lambda functions to use.
Value: !GetAtt PCAServer.Outputs.FetchTranscriptArn
- LLMPromptSummaryTemplateParameter:
- Description: The LLM summary prompt template in SSM Parameter Store - open to customise call summary prompts.
- Value: !Sub "https://${AWS::Region}.console.aws.amazon.com/systems-manager/parameters/${SSM.Outputs.LLMPromptSummaryTemplateParameter}"
-
- LLMPromptQueryTemplateParameter:
- Description: The LLM query prompt template in SSM Parameter Store - open to customise query prompts.
- Value: !Sub "https://${AWS::Region}.console.aws.amazon.com/systems-manager/parameters/${SSM.Outputs.LLMPromptQueryTemplateParameter}"
+ LLMPromptSummaryTemplate:
+ Description: The LLM summary prompt template in DynamoDB Table - open to customise summary prompts.
+ Value: !Sub "https://${AWS::Region}.console.aws.amazon.com/dynamodbv2/home?region=${AWS::Region}#edit-item?itemMode=2&pk=LLMPromptSummaryTemplate&route=ROUTE_ITEM_EXPLORER&sk=&table=${LLMPromptConfigure.Outputs.LLMTableName}"
+
+ LLMPromptQueryTemplate:
+ Description: The LLM query prompt template in DynamoDB Table - open to customise query prompts.
+ Value: !Sub "https://${AWS::Region}.console.aws.amazon.com/dynamodbv2/home?region=${AWS::Region}#edit-item?itemMode=2&pk=LLMPromptQueryTemplate&route=ROUTE_ITEM_EXPLORER&sk=&table=${LLMPromptConfigure.Outputs.LLMTableName}"
diff --git a/pca-server/cfn/lib/llm.template b/pca-server/cfn/lib/llm.template
new file mode 100644
index 00000000..9ba89bbd
--- /dev/null
+++ b/pca-server/cfn/lib/llm.template
@@ -0,0 +1,107 @@
+AWSTemplateFormatVersion: "2010-09-09"
+
+Description: Amazon Transcribe Post Call Analytics - PCA Server - S3 Trigger
+
+Transform: AWS::Serverless-2016-10-31
+
+Parameters:
+ LLMPromptSummaryTemplate:
+ Type: String
+ Description: >-
+ Prompt to use to generate insights for a call. This can be a single string where an LLM returns a string,
+ or a single string where the LLM returns a JSON object with key/value pairs, or a string that contains
+ a JSON Object with key/value pairs, where the LLM will run one inference on each key/value pair with the value
+ containing the prompt. Use {transcript} as a placeholder for where the call transcript will be injected.
+ Default: >-
+ {
+ "Summary":"
Human: Answer the questions below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
What is a summary of the transcript?
{transcript}
Assistant:",
+ "Topic":"
Human: Answer the questions below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
What is the topic of the call? For example, iphone issue, billing issue, cancellation. Only reply with the topic, nothing more.
{transcript}
Assistant:",
+ "Product":"
Human: Answer the questions below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
What product did the customer call about? For example, internet, broadband, mobile phone, mobile plans. Only reply with the product, nothing more.
{transcript}
Assistant:",
+ "Resolved":"
Human: Answer the questions below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
Did the agent resolve the customer's questions? Only reply with yes or no, nothing more.
{transcript}
Assistant:",
+ "Callback":"
Human: Answer the questions below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
Was this a callback? (yes or no) Only reply with yes or no, nothing more.
{transcript}
Assistant:",
+ "Politeness":"
Human: Answer the question below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
Was the agent polite and professional? (yes or no) Only reply with yes or no, nothing more.
{transcript}
Assistant:",
+ "Actions":"
Human: Answer the question below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
What actions did the Agent take?
{transcript}
Assistant:"
+ }
+
+ LLMPromptQueryTemplate:
+ Type: String
+ Description: This is the LLM prompt template to use when querying an individual call transcript.
+ Default: >-
+
Human: You are an AI chatbot. Carefully read the following transcript within tags. Provide a
+ short answer to the question at the end. If the answer cannot be determined from the transcript, then reply saying Sorry,
+ I don't know. Use gender neutral pronouns. Do not use XML tags in the answer.
{transcript}
{question}
Assistant:
+
+Resources:
+ LLMPromptTable:
+ Type: "AWS::DynamoDB::Table"
+ Properties:
+ KeySchema:
+ - AttributeName: LLMPromptTemplateId
+ KeyType: HASH
+ AttributeDefinitions:
+ - AttributeName: LLMPromptTemplateId
+ AttributeType: S
+ BillingMode: PAY_PER_REQUEST
+ SSESpecification:
+ SSEEnabled: True
+ PointInTimeRecoverySpecification:
+ PointInTimeRecoveryEnabled: true
+
+ ConfigureDynamoDBRole:
+ Type: "AWS::IAM::Role"
+ Properties:
+ AssumeRolePolicyDocument:
+ Statement:
+ - Effect: Allow
+ Principal:
+ Service: lambda.amazonaws.com
+ Action:
+ - "sts:AssumeRole"
+ ManagedPolicyArns:
+ - "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"
+ Policies:
+ - PolicyName: allow-s3-notification-config
+ PolicyDocument:
+ Statement:
+ Effect: Allow
+ Resource: !GetAtt LLMPromptTable.Arn
+ Action:
+ - 'dynamodb:PutItem'
+ - 'dynamodb:UpdateItem'
+ - PolicyName: SSMGetParameterPolicy
+ PolicyDocument:
+ Statement:
+ - Effect: Allow
+ Action:
+ - ssm:GetParameter
+ - ssm:GetParameters
+ Resource: !Sub arn:aws:ssm:${AWS::Region}:${AWS::AccountId}:parameter/*
+
+ ConfigureDynamoDBFunction:
+ Type: "AWS::Lambda::Function"
+ Properties:
+ Code: ../../src/llm
+ Handler: llm_prompt_upload.lambda_handler
+ Role: !GetAtt ConfigureDynamoDBRole.Arn
+ Runtime: python3.11
+ MemorySize: 128
+ Timeout: 60
+ Environment:
+ Variables:
+ StackName: !Ref AWS::StackName
+ TableName: !Ref LLMPromptTable
+ LLMPromptSummaryTemplate: !Ref LLMPromptSummaryTemplate
+ LLMPromptQueryTemplate: !Ref LLMPromptQueryTemplate
+
+ ConfigureDynamoDB:
+ Type: "AWS::CloudFormation::CustomResource"
+ Properties:
+ ServiceToken: !GetAtt ConfigureDynamoDBFunction.Arn
+ TableName: !Ref LLMPromptTable
+ StackName: !Ref AWS::StackName
+ LLMPromptSummaryTemplate: !Ref LLMPromptSummaryTemplate
+ LLMPromptQueryTemplate: !Ref LLMPromptQueryTemplate
+
+Outputs:
+ LLMTableName:
+ Value: !Ref LLMPromptTable
\ No newline at end of file
diff --git a/pca-server/cfn/lib/pca.template b/pca-server/cfn/lib/pca.template
index 44e25930..182b17cc 100644
--- a/pca-server/cfn/lib/pca.template
+++ b/pca-server/cfn/lib/pca.template
@@ -51,6 +51,9 @@ Parameters:
Type: String
Default: ''
+ LLMTableName:
+ Type: String
+
Globals:
Function:
Runtime: python3.11
@@ -269,6 +272,7 @@ Resources:
AWS_DATA_PATH: "/opt/models"
FETCH_TRANSCRIPT_LAMBDA_ARN: !GetAtt SFFetchTranscript.Arn
BEDROCK_MODEL_ID: !Ref SummarizationBedrockModelId
+ LLM_TABLE_NAME: !Ref LLMTableName
SUMMARY_TYPE: !Ref CallSummarization
SUMMARY_SAGEMAKER_ENDPOINT: !Ref SummarizationSagemakerEndpointName
ANTHROPIC_API_KEY: !Ref SummarizationLLMThirdPartyApiKey
@@ -288,6 +292,11 @@ Resources:
Action:
- lambda:InvokeFunction
Resource: !GetAtt SFFetchTranscript.Arn
+ - Sid: DynamoDBAccess
+ Effect: Allow
+ Resource: !Sub arn:${AWS::Partition}:dynamodb:${AWS::Region}:${AWS::AccountId}:table/${LLMTableName}
+ Action:
+ - 'dynamodb:GetItem'
- Sid: InvokeBedrock
Effect: Allow
Action:
diff --git a/pca-server/cfn/pca-server.template b/pca-server/cfn/pca-server.template
index 70d409b0..865adb5d 100644
--- a/pca-server/cfn/pca-server.template
+++ b/pca-server/cfn/pca-server.template
@@ -61,6 +61,11 @@ Parameters:
Type: String
Description: PyUtils layer arn from main stack.
+ LLMTableName:
+ Type: String
+ Description: The DynamoDB table name where the summary and query prompt templates are stored.
+
+
Conditions:
ShouldCreateBoto3Layer: !Equals [!Ref Boto3LayerArn, '']
ShouldDeployBedrockSummarizer: !Equals [!Ref CallSummarization, "BEDROCK"]
@@ -116,6 +121,7 @@ Resources:
- ''
SummarizationLLMThirdPartyApiKey: !Ref SummarizationLLMThirdPartyApiKey
SummarizationLambdaFunctionArn: !Ref SummarizationLambdaFunctionArn
+ LLMTableName: !Ref LLMTableName
Trigger:
Type: AWS::CloudFormation::Stack
diff --git a/pca-server/src/llm/cfnresponse.py b/pca-server/src/llm/cfnresponse.py
new file mode 100644
index 00000000..78cd9b3b
--- /dev/null
+++ b/pca-server/src/llm/cfnresponse.py
@@ -0,0 +1,47 @@
+# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+# SPDX-License-Identifier: MIT-0
+
+from __future__ import print_function
+import urllib3
+import json
+
+SUCCESS = "SUCCESS"
+FAILED = "FAILED"
+
+http = urllib3.PoolManager()
+
+
+def send(event, context, responseStatus, responseData, physicalResourceId=None, noEcho=False, reason=None):
+ responseUrl = event['ResponseURL']
+
+ print(responseUrl)
+
+ responseBody = {
+ 'Status' : responseStatus,
+ 'Reason' : reason or "See the details in CloudWatch Log Stream: {}".format(context.log_stream_name),
+ 'PhysicalResourceId' : physicalResourceId or context.log_stream_name,
+ 'StackId' : event['StackId'],
+ 'RequestId' : event['RequestId'],
+ 'LogicalResourceId' : event['LogicalResourceId'],
+ 'NoEcho' : noEcho,
+ 'Data' : responseData
+ }
+
+ json_responseBody = json.dumps(responseBody)
+
+ print("Response body:")
+ print(json_responseBody)
+
+ headers = {
+ 'content-type' : '',
+ 'content-length' : str(len(json_responseBody))
+ }
+
+ try:
+ response = http.request('PUT', responseUrl, headers=headers, body=json_responseBody)
+ print("Status code:", response.status)
+
+
+ except Exception as e:
+
+ print("send(..) failed executing http.request(..):", e)
diff --git a/pca-server/src/llm/llm_prompt_upload.py b/pca-server/src/llm/llm_prompt_upload.py
new file mode 100644
index 00000000..b9f3dd47
--- /dev/null
+++ b/pca-server/src/llm/llm_prompt_upload.py
@@ -0,0 +1,88 @@
+import boto3
+import cfnresponse
+import json
+
+def lambda_handler(event, context):
+ print(event)
+ # Init ...
+ the_event = event['RequestType']
+ print("The event is: ", str(the_event))
+
+ table_name = event['ResourceProperties']['TableName']
+ llm_prompt_summary_template = event['ResourceProperties']['LLMPromptSummaryTemplate']
+ llm_prompt_query_template = event['ResourceProperties']['LLMPromptQueryTemplate']
+
+ response_data = {}
+ dynamodb = boto3.resource('dynamodb')
+ table = dynamodb.Table(table_name)
+ ssm_client = boto3.client('ssm')
+
+ try:
+ if the_event in ('Create'):
+ # First look if LLMPromptSummaryTemplate parameter exists in the Parameter Store, so we can
+ # migrate the template to DynamoDB. This is to preserve backwards compatibility when users
+ # update their stack.
+
+ try:
+ summary_prompt_template_str = ssm_client.get_parameter(Name="LLMPromptSummaryTemplate")["Parameter"]["Value"]
+ except Exception as e:
+ print("No parameter found:", str(e))
+ summary_prompt_template_str = llm_prompt_summary_template
+
+ try:
+ summary_prompt_template = json.loads(summary_prompt_template_str)
+ except Exception as e:
+ print("Not a valid JSON:", str(e))
+ summary_prompt_template = {"Summary": summary_prompt_template_str}
+
+ update_expression = "SET"
+ expression_attribute_names = {}
+ expression_attribute_values = {}
+
+ i = 1
+ for key, value in summary_prompt_template.items():
+ update_expression += f" #{i} = :{i},"
+ expression_attribute_names[f"#{i}"] = f"{i}#{key}"
+ expression_attribute_values[f":{i}"] = value
+ i += 1
+
+ update_expression = update_expression[:-1] # remove last comma
+
+ # Next look if LLMPromptQueryTemplate parameter exists in the Parameter Store, so we can
+ # migrate the template to DynamoDB. This is to preserve backwards compatibility when users
+ # update their stack.
+
+ try:
+ query_prompt_template = ssm_client.get_parameter(Name="LLMPromptQueryTemplate")["Parameter"]["Value"]
+ except Exception as e:
+ print("No parameter found:", str(e))
+ query_prompt_template = llm_prompt_query_template
+
+ response = table.update_item(
+ Key={'LLMPromptTemplateId': 'LLMPromptSummaryTemplate'},
+ UpdateExpression=update_expression,
+ ExpressionAttributeValues=expression_attribute_values,
+ ExpressionAttributeNames=expression_attribute_names
+ )
+
+ item = {
+ 'LLMPromptTemplateId': 'LLMPromptQueryTemplate',
+ 'LLMPromptTemplateValue': query_prompt_template
+ }
+
+ response = table.put_item(Item=item)
+
+ # Everything OK... send the signal back
+ print("Operation successful!")
+ cfnresponse.send(event,
+ context,
+ cfnresponse.SUCCESS,
+ response_data)
+ except Exception as e:
+ print("Operation failed...")
+ print(str(e))
+ response_data['Data'] = str(e)
+ cfnresponse.send(event,
+ context,
+ cfnresponse.FAILED,
+ response_data)
\ No newline at end of file
diff --git a/pca-server/src/pca/pca-aws-sf-bulk-queue-space.py b/pca-server/src/pca/pca-aws-sf-bulk-queue-space.py
index 2a047246..3debf455 100644
--- a/pca-server/src/pca/pca-aws-sf-bulk-queue-space.py
+++ b/pca-server/src/pca/pca-aws-sf-bulk-queue-space.py
@@ -20,9 +20,16 @@ def countTranscribeJobsInState(status, client, filesLimit):
response = client.list_transcription_jobs(Status=status)
found = len(response["TranscriptionJobSummaries"])
while ("NextToken" in response) and (found <= filesLimit):
- response = client.list_transcription_jobs(Status="IN_PROGRESS", NextToken=response["NextToken"])
+ response = client.list_transcription_jobs(Status=status, NextToken=response["NextToken"])
found += len(response["TranscriptionJobSummaries"])
+ # now count the call analytics jobs
+ response = client.list_call_analytics_jobs(Status=status)
+ found += len(response["CallAnalyticsJobSummaries"])
+ while ("NextToken" in response) and (found <= filesLimit):
+ response = client.list_call_analytics_jobs(Status=status, NextToken=response["NextToken"])
+ found += len(response["CallAnalyticsJobSummaries"])
+
return found
diff --git a/pca-server/src/pca/pca-aws-sf-summarize.py b/pca-server/src/pca/pca-aws-sf-summarize.py
index 93862869..810a2375 100644
--- a/pca-server/src/pca/pca-aws-sf-summarize.py
+++ b/pca-server/src/pca/pca-aws-sf-summarize.py
@@ -26,12 +26,14 @@
FETCH_TRANSCRIPT_LAMBDA_ARN = os.getenv('FETCH_TRANSCRIPT_LAMBDA_ARN','')
BEDROCK_MODEL_ID = os.environ.get("BEDROCK_MODEL_ID","amazon.titan-text-express-v1")
BEDROCK_ENDPOINT_URL = os.environ.get("ENDPOINT_URL", f'https://bedrock-runtime.{AWS_REGION}.amazonaws.com')
+LLM_TABLE_NAME = os.getenv('LLM_TABLE_NAME')
MAX_TOKENS = int(os.getenv('MAX_TOKENS','256'))
lambda_client = boto3.client('lambda')
-ssmClient = boto3.client("ssm")
bedrock_client = None
+s3Client = boto3.client('s3')
+dynamodb_client = boto3.client('dynamodb')
config = Config(
retries = {
@@ -134,27 +136,31 @@ def generate_sagemaker_summary(transcript):
summary = "No summary"
return summary
-def get_templates_from_ssm():
+def get_templates_from_dynamodb():
templates = []
try:
- SUMMARY_PROMPT_TEMPLATE = ssmClient.get_parameter(Name=cf.CONF_LLM_PROMPT_SUMMARY_TEMPLATE)["Parameter"]["Value"]
+ SUMMARY_PROMPT_TEMPLATE = dynamodb_client.get_item(Key={'LLMPromptTemplateId': {'S': 'LLMPromptSummaryTemplate'}},
+ TableName=LLM_TABLE_NAME)
- prompt_templates = json.loads(SUMMARY_PROMPT_TEMPLATE)
- for k, v in prompt_templates.items():
- prompt = v.replace("
", "\n")
- templates.append({ k:prompt })
- except:
- prompt = SUMMARY_PROMPT_TEMPLATE.replace("
", "\n")
- templates.append({
- "Summary": prompt
- })
- print("Prompt: ",prompt)
+ print ("Prompt Template:", SUMMARY_PROMPT_TEMPLATE['Item'])
+
+ prompt_templates = SUMMARY_PROMPT_TEMPLATE["Item"]
+
+ for k in sorted(prompt_templates):
+ if (k != "LLMPromptTemplateId"):
+ prompt = prompt_templates[k]['S'].replace("
", "\n")
+ index = k.find('#')
+ k_stripped = k[index+1:]
+ templates.append({ k_stripped:prompt })
+ except Exception as e:
+ print ("Exception:", e)
+ raise (e)
return templates
def generate_anthropic_summary(transcript):
# first check to see if this is one prompt, or many prompts as a json
- templates = get_templates_from_ssm()
+ templates = get_templates_from_dynamodb()
result = {}
for item in templates:
key = list(item.keys())[0]
@@ -175,15 +181,25 @@ def generate_anthropic_summary(transcript):
summary = json.loads(response.text)["completion"].strip()
result[key] = summary
if len(result.keys()) == 1:
- # there's only one summary in here, so let's return just that.
- # this may contain json or a string.
- return result[list(result.keys())[0]]
+ # This is a single node JSON with value that can be either:
+ # A single inference that returns a string value
+ # OR
+ # A single inference that returns a JSON, enclosed in a string.
+ # Refer to https://github.com/aws-samples/amazon-transcribe-post-call-analytics/blob/develop/docs/generative_ai.md#generative-ai-insights
+ # for more details.
+ try:
+ parsed_json = json.loads(result[list(result.keys())[0]])
+ print("Nested JSON...")
+ return json.dumps(parsed_json)
+ except:
+ print("Not nested JSON...")
+ return json.dumps(result)
return json.dumps(result)
def generate_bedrock_summary(transcript):
# first check to see if this is one prompt, or many prompts as a json
- templates = get_templates_from_ssm()
+ templates = get_templates_from_dynamodb()
result = {}
for item in templates:
key = list(item.keys())[0]
@@ -195,9 +211,19 @@ def generate_bedrock_summary(transcript):
generated_text = call_bedrock(parameters, prompt)
result[key] = generated_text
if len(result.keys()) == 1:
- # there's only one summary in here, so let's return just that.
- # this may contain json or a string.
- return result[list(result.keys())[0]]
+ # This is a single node JSON with value that can be either:
+ # A single inference that returns a string value
+ # OR
+ # A single inference that returns a JSON, enclosed in a string.
+ # Refer to https://github.com/aws-samples/amazon-transcribe-post-call-analytics/blob/develop/docs/generative_ai.md#generative-ai-insights
+ # for more details.
+ try:
+ parsed_json = json.loads(result[list(result.keys())[0]])
+ print("Nested JSON...")
+ return json.dumps(parsed_json)
+ except:
+ print("Not nested JSON...")
+ return json.dumps(result)
return json.dumps(result)
diff --git a/pca-server/src/pca/pcaconfiguration.py b/pca-server/src/pca/pcaconfiguration.py
index 9f717cd7..ddb3d614 100644
--- a/pca-server/src/pca/pcaconfiguration.py
+++ b/pca-server/src/pca/pcaconfiguration.py
@@ -49,8 +49,6 @@
CONF_TRANSCRIBE_API = "TranscribeApiMode"
CONF_REDACTION_TRANSCRIPT = "CallRedactionTranscript"
CONF_REDACTION_AUDIO = "CallRedactionAudio"
-CONF_LLM_PROMPT_SUMMARY_TEMPLATE = "LLMPromptSummaryTemplate"
-CONF_LLM_PROMPT_QUERY_TEMPLATE = "LLMPromptQueryTemplate"
# Parameter store fieldnames used by bulk import
BULK_S3_BUCKET = "BulkUploadBucket"
diff --git a/pca-ssm/cfn/ssm.template b/pca-ssm/cfn/ssm.template
index ecf4772f..ba26e089 100644
--- a/pca-ssm/cfn/ssm.template
+++ b/pca-ssm/cfn/ssm.template
@@ -84,32 +84,6 @@ Parameters:
Folder that holds Transcripts from other applications (e.g. Live Call Analytics) that are to be
processed as if PCA had processed that audio
- LLMPromptSummaryTemplate:
- Type: String
- Description: >-
- Prompt to use to generate insights for a call. This can be a single string where an LLM returns a string,
- or a single string where the LLM returns a JSON object with key/value pairs, or a string that contains
- a JSON Object with key/value pairs, where the LLM will run one inference on each key/value pair with the value
- containing the prompt. Use {transcript} as a placeholder for where the call transcript will be injected.
- Default: >-
- {
- "Summary":"
Human: Answer the questions below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
What is a summary of the transcript?
{transcript}
Assistant:",
- "Topic":"
Human: Answer the questions below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
What is the topic of the call? For example, iphone issue, billing issue, cancellation. Only reply with the topic, nothing more.
{transcript}
Assistant:",
- "Product":"
Human: Answer the questions below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
What product did the customer call about? For example, internet, broadband, mobile phone, mobile plans. Only reply with the product, nothing more.
{transcript}
Assistant:",
- "Resolved":"
Human: Answer the questions below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
Did the agent resolve the customer's questions? Only reply with yes or no, nothing more.
{transcript}
Assistant:",
- "Callback":"
Human: Answer the questions below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
Was this a callback? (yes or no) Only reply with yes or no, nothing more.
{transcript}
Assistant:",
- "Politeness":"
Human: Answer the question below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
Was the agent polite and professional? (yes or no) Only reply with yes or no, nothing more.
{transcript}
Assistant:",
- "Actions":"
Human: Answer the question below, defined in based on the transcript defined in . If you cannot answer the question, reply with 'n/a'. Use gender neutral pronouns. When you reply, only respond with the answer.
What actions did the Agent take?
{transcript}
Assistant:"
- }
-
- LLMPromptQueryTemplate:
- Type: String
- Description: This is the LLM prompt template to use when querying an individual call transcript.
- Default: >-
-
Human: You are an AI chatbot. Carefully read the following transcript within tags. Provide a
- short answer to the question at the end. If the answer cannot be determined from the transcript, then reply saying Sorry,
- I don't know. Use gender neutral pronouns. Do not use XML tags in the answer.
{transcript}
{question}
Assistant:
-
MaxSpeakers:
Type: String
Default: "2"
@@ -410,26 +384,6 @@ Resources:
processed as if PCA had processed that audio
Value: !Ref InputBucketOrigTranscripts
- LLMPromptSummaryTemplateParameter:
- Type: "AWS::SSM::Parameter"
- Properties:
- Name: LLMPromptSummaryTemplate
- Type: String
- Description: >
- Prompt to use to generate insights for a call. This can be a single string where an LLM returns a string,
- or a single string where the LLM returns a JSON object with key/value pairs, or a string that contains
- a JSON Object with key/value pairs, where the LLM will run one inference on each key/value pair with the value
- containing the prompt. Use {transcript} as a placeholder for where the call transcript will be injected.
- Value: !Ref LLMPromptSummaryTemplate
-
- LLMPromptQueryTemplateParameter:
- Type: "AWS::SSM::Parameter"
- Properties:
- Name: LLMPromptQueryTemplate
- Type: String
- Description: This is the LLM prompt template to use when querying an individual call transcript.
- Value: !Ref LLMPromptQueryTemplate
-
MaxSpeakersParameter:
Type: "AWS::SSM::Parameter"
Properties:
@@ -690,10 +644,3 @@ Resources:
Type: String
Value: !Ref DatabaseName
Description: PCA Glue catalog database name
-
-Outputs:
- LLMPromptSummaryTemplateParameter:
- Value: !Ref LLMPromptSummaryTemplateParameter
-
- LLMPromptQueryTemplateParameter:
- Value: !Ref LLMPromptQueryTemplateParameter
\ No newline at end of file
diff --git a/pca-ui/cfn/lib/api.template b/pca-ui/cfn/lib/api.template
index 216ad4b4..712e63cf 100644
--- a/pca-ui/cfn/lib/api.template
+++ b/pca-ui/cfn/lib/api.template
@@ -66,6 +66,10 @@ Parameters:
Type: String
Description: External PyUtils Layer Arn to use.
+ LLMTableName:
+ Type: String
+ Description: The DynamoDB table name where the summary and query prompt templates are stored.
+
Conditions:
ShouldAddBoto3Layer: !Not [!Equals [!Ref Boto3LayerArn, '']]
HasAnthropicSummary: !Equals [!Ref GenAIQueryType, 'ANTHROPIC']
@@ -268,6 +272,7 @@ Resources:
ANTHROPIC_MODEL_IDENTIFIER: "claude-2"
ANTHROPIC_ENDPOINT_URL: "https://api.anthropic.com/v1/complete"
ANTHROPIC_API_KEY: !Ref LLMThirdPartyApiKey
+ LLM_TABLE_NAME: !Ref LLMTableName
Events:
APIEvent:
Type: Api
@@ -287,6 +292,11 @@ Resources:
Action:
- lambda:InvokeFunction
Resource: !Ref FetchTranscriptArn
+ - Sid: DynamoDBAccess
+ Effect: Allow
+ Resource: !Sub arn:${AWS::Partition}:dynamodb:${AWS::Region}:${AWS::AccountId}:table/${LLMTableName}
+ Action:
+ - 'dynamodb:GetItem'
- Sid: InvokeBedrock
Effect: Allow
Action:
diff --git a/pca-ui/cfn/lib/cognito.template b/pca-ui/cfn/lib/cognito.template
index c2cfcd81..592d06f2 100644
--- a/pca-ui/cfn/lib/cognito.template
+++ b/pca-ui/cfn/lib/cognito.template
@@ -20,7 +20,7 @@ Parameters:
Type: String
Conditions:
- IsProd: !Equals [!Ref Environment, true]
+ IsProd: !Equals [!Ref Environment, PROD]
Resources:
diff --git a/pca-ui/cfn/pca-ui.template b/pca-ui/cfn/pca-ui.template
index 66697684..1c469613 100644
--- a/pca-ui/cfn/pca-ui.template
+++ b/pca-ui/cfn/pca-ui.template
@@ -89,6 +89,10 @@ Parameters:
Type: String
Description: External PyUtils Layer Arn to use.
+ LLMTableName:
+ Type: String
+ Description: The DynamoDB table name where the summary and query prompt templates are stored.
+
Conditions:
isMainStackNameEmpty: !Equals [!Ref MainStackName, '']
ShouldEnableGenAIQuery: !Not [!Equals [!Ref GenAIQueryType, 'DISABLED']]
@@ -153,6 +157,7 @@ Resources:
SummarizerArn: !Ref SummarizerArn
Boto3LayerArn: !Ref Boto3LayerArn
PyUtilsLayerArn: !Ref PyUtilsLayerArn
+ LLMTableName: !Ref LLMTableName
Deploy:
Type: AWS::CloudFormation::Stack
diff --git a/pca-ui/src/genai/index.py b/pca-ui/src/genai/index.py
index 1b9719f1..026753dc 100644
--- a/pca-ui/src/genai/index.py
+++ b/pca-ui/src/genai/index.py
@@ -23,11 +23,11 @@
FETCH_TRANSCRIPT_LAMBDA_ARN = os.getenv('FETCH_TRANSCRIPT_LAMBDA_ARN','')
BEDROCK_MODEL_ID = os.getenv("BEDROCK_MODEL_ID","amazon.text-express-v1")
BEDROCK_ENDPOINT_URL = os.getenv("ENDPOINT_URL", f'https://bedrock-runtime.{AWS_REGION}.amazonaws.com')
-CONF_LLM_PROMPT_QUERY_TEMPLATE = os.getenv("CONF_LLM_PROMPT_QUERY_TEMPLATE","LLMPromptQueryTemplate")
+LLM_TABLE_NAME = os.getenv('LLM_TABLE_NAME', '')
MAX_TOKENS = int(os.getenv('MAX_TOKENS','256'))
lambda_client = boto3.client('lambda')
-ssmClient = boto3.client("ssm")
+dynamodb_client = boto3.client('dynamodb')
bedrock_client = None
def get_third_party_llm_secret():
@@ -104,18 +104,23 @@ def call_bedrock(parameters, prompt):
generated_text = get_bedrock_generate_text(modelId, response)
return generated_text
-def get_template_from_ssm():
+def get_template_from_dynamodb():
try:
- prompt_template = ssmClient.get_parameter(Name=CONF_LLM_PROMPT_QUERY_TEMPLATE)["Parameter"]["Value"]
+ QUERY_PROMPT_TEMPLATE = dynamodb_client.get_item(Key={'LLMPromptTemplateId': {'S': 'LLMPromptQueryTemplate'}},
+ TableName=LLM_TABLE_NAME)
+ print ("Prompt Template:", QUERY_PROMPT_TEMPLATE['Item']['LLMPromptTemplateValue']['S'])
+
+ prompt_template = QUERY_PROMPT_TEMPLATE["Item"]['LLMPromptTemplateValue']['S']
prompt_template = prompt_template.replace("
", "\n")
- except:
- prompt_template = "Human: Answer the following question in 1 sentence based on the transcript. If the question is not relevant to the transcript, reply with I'm sorry, this is not relevant. \n{question}\n\n\n{transcript}\n\nAssistant: Based on the transcript: "
+ except Exception as e:
+ print("Exception", e)
+ prompt_template = "Human: Answer the following question in 1 sentence based on the transcript. If the question is not relevant to the transcript, reply with I'm sorry, this is not relevant. \n{question}\n\n\n{transcript}\n\nAssistant: Based on the transcript: "
return prompt_template
def generate_anthropic_query(transcript, question):
# first check to see if this is one prompt, or many prompts as a json
- prompt = get_template_from_ssm()
+ prompt = get_template_from_dynamodb()
prompt = prompt.replace("{transcript}", transcript)
prompt = prompt.replace("{question}", question)
@@ -142,7 +147,7 @@ def generate_anthropic_query(transcript, question):
def generate_bedrock_query(transcript, question):
# first check to see if this is one prompt, or many prompts as a json
- prompt = get_template_from_ssm()
+ prompt = get_template_from_dynamodb()
prompt = prompt.replace("{transcript}", transcript)
prompt = prompt.replace("{question}", question)