From d30575ad0c55c8761b1d03da602732efdb641048 Mon Sep 17 00:00:00 2001 From: Steph Milovic Date: Fri, 17 Jan 2025 14:30:02 -0700 Subject: [PATCH 1/5] wip --- .../group3/type_registrations.test.ts | 1 + .../context/fixtures/integration.nginx.ts | 1 + .../context/fixtures/integration.okta.ts | 1 + .../package_to_package_policy.test.ts | 1 + .../shared/fleet/common/types/models/epm.ts | 2 + .../integrations/sections/epm/constants.tsx | 6 +++ .../fleet/server/routes/epm/index.test.ts | 1 + ...kage_policies_to_agent_permissions.test.ts | 6 +++ .../services/epm/kibana/assets/install.ts | 4 +- .../elastic_assistant/common/constants.ts | 3 ++ .../lib/prompt/saved_object_mappings.ts | 43 +++++++++++++++++++ .../elastic_assistant/server/plugin.ts | 3 ++ .../elastic_assistant/server/saved_objects.ts | 13 ++++++ 13 files changed, 84 insertions(+), 1 deletion(-) create mode 100644 x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/saved_object_mappings.ts create mode 100644 x-pack/solutions/security/plugins/elastic_assistant/server/saved_objects.ts diff --git a/src/core/server/integration_tests/saved_objects/migrations/group3/type_registrations.test.ts b/src/core/server/integration_tests/saved_objects/migrations/group3/type_registrations.test.ts index cee7f307ee67d..97bce6b66c919 100644 --- a/src/core/server/integration_tests/saved_objects/migrations/group3/type_registrations.test.ts +++ b/src/core/server/integration_tests/saved_objects/migrations/group3/type_registrations.test.ts @@ -123,6 +123,7 @@ const previouslyRegisteredTypes = [ 'search', 'search-session', 'search-telemetry', + 'security-ai-prompt', 'security-rule', 'security-solution-signals-migration', 'risk-engine-configuration', diff --git a/x-pack/platform/plugins/shared/fleet/.storybook/context/fixtures/integration.nginx.ts b/x-pack/platform/plugins/shared/fleet/.storybook/context/fixtures/integration.nginx.ts index 8f47d564c44a2..ea89331156566 100644 --- a/x-pack/platform/plugins/shared/fleet/.storybook/context/fixtures/integration.nginx.ts +++ b/x-pack/platform/plugins/shared/fleet/.storybook/context/fixtures/integration.nginx.ts @@ -251,6 +251,7 @@ export const item: GetInfoResponse['item'] = { index_pattern: [], lens: [], map: [], + security_ai_prompt: [], security_rule: [], csp_rule_template: [], tag: [], diff --git a/x-pack/platform/plugins/shared/fleet/.storybook/context/fixtures/integration.okta.ts b/x-pack/platform/plugins/shared/fleet/.storybook/context/fixtures/integration.okta.ts index 8778938443661..ddae02803faf0 100644 --- a/x-pack/platform/plugins/shared/fleet/.storybook/context/fixtures/integration.okta.ts +++ b/x-pack/platform/plugins/shared/fleet/.storybook/context/fixtures/integration.okta.ts @@ -106,6 +106,7 @@ export const item: GetInfoResponse['item'] = { ml_module: [], osquery_pack_asset: [], osquery_saved_query: [], + security_ai_prompt: [], security_rule: [], csp_rule_template: [], tag: [], diff --git a/x-pack/platform/plugins/shared/fleet/common/services/package_to_package_policy.test.ts b/x-pack/platform/plugins/shared/fleet/common/services/package_to_package_policy.test.ts index 8f96c3ffc197d..c800bc4b02151 100644 --- a/x-pack/platform/plugins/shared/fleet/common/services/package_to_package_policy.test.ts +++ b/x-pack/platform/plugins/shared/fleet/common/services/package_to_package_policy.test.ts @@ -33,6 +33,7 @@ describe('Fleet - packageToPackagePolicy', () => { map: [], lens: [], ml_module: [], + security_ai_prompt: [], security_rule: [], tag: [], osquery_pack_asset: [], diff --git a/x-pack/platform/plugins/shared/fleet/common/types/models/epm.ts b/x-pack/platform/plugins/shared/fleet/common/types/models/epm.ts index af8a0acf9b2d4..048814979540d 100644 --- a/x-pack/platform/plugins/shared/fleet/common/types/models/epm.ts +++ b/x-pack/platform/plugins/shared/fleet/common/types/models/epm.ts @@ -59,6 +59,7 @@ export enum KibanaAssetType { indexPattern = 'index_pattern', map = 'map', mlModule = 'ml_module', + securityAIPrompt = 'security_ai_prompt', securityRule = 'security_rule', cloudSecurityPostureRuleTemplate = 'csp_rule_template', osqueryPackAsset = 'osquery_pack_asset', @@ -77,6 +78,7 @@ export enum KibanaSavedObjectType { indexPattern = 'index-pattern', map = 'map', mlModule = 'ml-module', + securityAIPrompt = 'security-ai-prompt', securityRule = 'security-rule', cloudSecurityPostureRuleTemplate = 'csp-rule-template', osqueryPackAsset = 'osquery-pack-asset', diff --git a/x-pack/platform/plugins/shared/fleet/public/applications/integrations/sections/epm/constants.tsx b/x-pack/platform/plugins/shared/fleet/public/applications/integrations/sections/epm/constants.tsx index c65e5c8d56440..266aae8636710 100644 --- a/x-pack/platform/plugins/shared/fleet/public/applications/integrations/sections/epm/constants.tsx +++ b/x-pack/platform/plugins/shared/fleet/public/applications/integrations/sections/epm/constants.tsx @@ -47,6 +47,12 @@ export const AssetTitleMap: Record< map: i18n.translate('xpack.fleet.epm.assetTitles.maps', { defaultMessage: 'Maps', }), + 'security-ai-prompt': i18n.translate('xpack.fleet.epm.assetTitles.securityAIPrompt', { + defaultMessage: 'Security AI prompt', + }), + security_ai_prompt: i18n.translate('xpack.fleet.epm.assetTitles.securityAIPrompt', { + defaultMessage: 'Security AI prompt', + }), 'security-rule': i18n.translate('xpack.fleet.epm.assetTitles.securityRules', { defaultMessage: 'Security rules', }), diff --git a/x-pack/platform/plugins/shared/fleet/server/routes/epm/index.test.ts b/x-pack/platform/plugins/shared/fleet/server/routes/epm/index.test.ts index d736cb7c318ba..d89b9ce85c05a 100644 --- a/x-pack/platform/plugins/shared/fleet/server/routes/epm/index.test.ts +++ b/x-pack/platform/plugins/shared/fleet/server/routes/epm/index.test.ts @@ -193,6 +193,7 @@ describe('schema validation', () => { map: [], index_pattern: [], ml_module: [], + security_ai_prompt: [], security_rule: [], tag: [], csp_rule_template: [], diff --git a/x-pack/platform/plugins/shared/fleet/server/services/agent_policies/package_policies_to_agent_permissions.test.ts b/x-pack/platform/plugins/shared/fleet/server/services/agent_policies/package_policies_to_agent_permissions.test.ts index 55d1f0b36ca8e..214117cb15fdb 100644 --- a/x-pack/platform/plugins/shared/fleet/server/services/agent_policies/package_policies_to_agent_permissions.test.ts +++ b/x-pack/platform/plugins/shared/fleet/server/services/agent_policies/package_policies_to_agent_permissions.test.ts @@ -38,6 +38,7 @@ packageInfoCache.set('test_package-0.0.0', { index_pattern: [], map: [], lens: [], + security_ai_prompt: [], security_rule: [], ml_module: [], tag: [], @@ -122,6 +123,7 @@ packageInfoCache.set('osquery_manager-0.3.0', { index_pattern: [], map: [], lens: [], + security_ai_prompt: [], security_rule: [], ml_module: [], tag: [], @@ -172,6 +174,7 @@ packageInfoCache.set('profiler_symbolizer-8.8.0-preview', { index_pattern: [], map: [], lens: [], + security_ai_prompt: [], security_rule: [], ml_module: [], tag: [], @@ -222,6 +225,7 @@ packageInfoCache.set('profiler_collector-8.9.0-preview', { index_pattern: [], map: [], lens: [], + security_ai_prompt: [], security_rule: [], ml_module: [], tag: [], @@ -264,6 +268,7 @@ packageInfoCache.set('apm-8.9.0-preview', { index_pattern: [], map: [], lens: [], + security_ai_prompt: [], security_rule: [], ml_module: [], tag: [], @@ -306,6 +311,7 @@ packageInfoCache.set('elastic_connectors-1.0.0', { index_pattern: [], map: [], lens: [], + security_ai_prompt: [], security_rule: [], ml_module: [], tag: [], diff --git a/x-pack/platform/plugins/shared/fleet/server/services/epm/kibana/assets/install.ts b/x-pack/platform/plugins/shared/fleet/server/services/epm/kibana/assets/install.ts index d5594f54300b2..5adf1708eb25a 100644 --- a/x-pack/platform/plugins/shared/fleet/server/services/epm/kibana/assets/install.ts +++ b/x-pack/platform/plugins/shared/fleet/server/services/epm/kibana/assets/install.ts @@ -33,9 +33,10 @@ import { deleteKibanaSavedObjectsAssets } from '../../packages/remove'; import { FleetError, KibanaSOReferenceError } from '../../../../errors'; import { withPackageSpan } from '../../packages/utils'; +import { appContextService } from '../../..'; + import { tagKibanaAssets } from './tag_assets'; import { getSpaceAwareSaveobjectsClients } from './saved_objects'; -import { appContextService } from '../../..'; const MAX_ASSETS_TO_INSTALL_IN_PARALLEL = 1000; @@ -70,6 +71,7 @@ export const KibanaSavedObjectTypeMapping: Record { + savedObjects.registerType(promptType); +}; From 0217ad1f99d20a29cad8146d572b70e54b6608a8 Mon Sep 17 00:00:00 2001 From: Steph Milovic Date: Tue, 21 Jan 2025 08:12:30 -0700 Subject: [PATCH 2/5] mapping update --- .../server/lib/prompt/saved_object_mappings.ts | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/saved_object_mappings.ts b/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/saved_object_mappings.ts index 98e4d0e12c660..66c02add1bdc2 100644 --- a/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/saved_object_mappings.ts +++ b/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/saved_object_mappings.ts @@ -15,7 +15,13 @@ export const promptSavedObjectMappings: SavedObjectsType['mappings'] = { type: 'text', }, name: { - type: 'text', + type: 'keyword', + }, + llm: { + type: 'keyword', + }, + model: { + type: 'keyword', }, version: { type: 'long', From f131f3582dae03cea9b9abb227dda06fa7b063eb Mon Sep 17 00:00:00 2001 From: Steph Milovic Date: Tue, 21 Jan 2025 08:36:56 -0700 Subject: [PATCH 3/5] rm version --- .../server/lib/prompt/saved_object_mappings.ts | 3 --- 1 file changed, 3 deletions(-) diff --git a/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/saved_object_mappings.ts b/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/saved_object_mappings.ts index 66c02add1bdc2..5424eee49d4a4 100644 --- a/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/saved_object_mappings.ts +++ b/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/saved_object_mappings.ts @@ -23,9 +23,6 @@ export const promptSavedObjectMappings: SavedObjectsType['mappings'] = { model: { type: 'keyword', }, - version: { - type: 'long', - }, prompt: { properties: { // ISO 639 two-letter language code From 2cf3950634aeb59f2c2b3feeab3391d1fffa4bad Mon Sep 17 00:00:00 2001 From: Steph Milovic Date: Tue, 21 Jan 2025 09:35:47 -0700 Subject: [PATCH 4/5] better naming --- .../server/lib/prompt/saved_object_mappings.ts | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/saved_object_mappings.ts b/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/saved_object_mappings.ts index 5424eee49d4a4..1aaeee712e836 100644 --- a/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/saved_object_mappings.ts +++ b/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/saved_object_mappings.ts @@ -14,7 +14,7 @@ export const promptSavedObjectMappings: SavedObjectsType['mappings'] = { description: { type: 'text', }, - name: { + promptId: { type: 'keyword', }, llm: { @@ -25,10 +25,11 @@ export const promptSavedObjectMappings: SavedObjectsType['mappings'] = { }, prompt: { properties: { - // ISO 639 two-letter language code - en: { + // English is default + default: { type: 'text', }, + // optionally, add ISO 639 two-letter language code to support more translations }, }, }, From 51b6eb53e2d904052ba3803a116079c46ad84120 Mon Sep 17 00:00:00 2001 From: Steph Milovic Date: Wed, 22 Jan 2025 15:10:49 -0700 Subject: [PATCH 5/5] getPrompts --- .../server/lib/langchain/executors/types.ts | 2 + .../graphs/default_assistant_graph/index.ts | 19 +- .../server/lib/prompt/get_prompt.test.ts | 386 ++++++++++++++++++ .../server/lib/prompt/get_prompt.ts | 197 +++++++++ .../server/lib/prompt/prompts.ts | 78 ++++ .../server/routes/chat/chat_complete_route.ts | 2 + .../server/routes/helpers.ts | 4 + .../routes/post_actions_connector_execute.ts | 2 + .../server/routes/request_context_factory.ts | 2 +- .../plugins/elastic_assistant/server/types.ts | 4 +- 10 files changed, 689 insertions(+), 7 deletions(-) create mode 100644 x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/get_prompt.test.ts create mode 100644 x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/get_prompt.ts create mode 100644 x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/prompts.ts diff --git a/x-pack/solutions/security/plugins/elastic_assistant/server/lib/langchain/executors/types.ts b/x-pack/solutions/security/plugins/elastic_assistant/server/lib/langchain/executors/types.ts index abef39d8b2e25..deb737d9ad36a 100644 --- a/x-pack/solutions/security/plugins/elastic_assistant/server/lib/langchain/executors/types.ts +++ b/x-pack/solutions/security/plugins/elastic_assistant/server/lib/langchain/executors/types.ts @@ -18,6 +18,7 @@ import type { InferenceServerStart } from '@kbn/inference-plugin/server'; import { AnalyticsServiceSetup } from '@kbn/core-analytics-server'; import { TelemetryParams } from '@kbn/langchain/server/tracers/telemetry/telemetry_tracer'; import type { LlmTasksPluginStart } from '@kbn/llm-tasks-plugin/server'; +import { SavedObjectsClientContract } from '@kbn/core-saved-objects-api-server'; import { ResponseBody } from '../types'; import type { AssistantTool } from '../../../types'; import { AIAssistantKnowledgeBaseDataClient } from '../../../ai_assistant_data_clients/knowledge_base'; @@ -57,6 +58,7 @@ export interface AgentExecutorParams { onLlmResponse?: OnLlmResponse; request: KibanaRequest; response?: KibanaResponseFactory; + savedObjectsClient: SavedObjectsClientContract; size?: number; systemPrompt?: string; telemetry: AnalyticsServiceSetup; diff --git a/x-pack/solutions/security/plugins/elastic_assistant/server/lib/langchain/graphs/default_assistant_graph/index.ts b/x-pack/solutions/security/plugins/elastic_assistant/server/lib/langchain/graphs/default_assistant_graph/index.ts index 2e94e4bcd4ea0..49f5b0c3846d2 100644 --- a/x-pack/solutions/security/plugins/elastic_assistant/server/lib/langchain/graphs/default_assistant_graph/index.ts +++ b/x-pack/solutions/security/plugins/elastic_assistant/server/lib/langchain/graphs/default_assistant_graph/index.ts @@ -14,6 +14,7 @@ import { } from 'langchain/agents'; import { APMTracer } from '@kbn/langchain/server/tracers/apm'; import { TelemetryTracer } from '@kbn/langchain/server/tracers/telemetry'; +import { getPrompt, promptDictionary } from '../../../prompt/get_prompt'; import { getLlmClass } from '../../../../routes/utils'; import { EsAnonymizationFieldsSchema } from '../../../../ai_assistant_data_clients/anonymization_fields/types'; import { AssistantToolParams } from '../../../../types'; @@ -44,6 +45,7 @@ export const callAssistantGraph: AgentExecutor = async ({ onNewReplacements, replacements, request, + savedObjectsClient, size, systemPrompt, telemetry, @@ -130,22 +132,29 @@ export const callAssistantGraph: AgentExecutor = async ({ } } + const defaultSystemPrompt = await getPrompt({ + savedObjectsClient, + llm: llmType, + // use oss as model when using openai and oss + model: llmType === 'openai' && isOssModel ? 'oss' : request.body.model, + promptId: promptDictionary.systemPrompt, + actionsClient, + connectorId, + }); + const agentRunnable = isOpenAI || llmType === 'inference' ? await createOpenAIToolsAgent({ llm: createLlmInstance(), tools, - prompt: formatPrompt(systemPrompts.openai, systemPrompt), + prompt: formatPrompt(defaultSystemPrompt, systemPrompt), streamRunnable: isStream, }) : llmType && ['bedrock', 'gemini'].includes(llmType) ? await createToolCallingAgent({ llm: createLlmInstance(), tools, - prompt: - llmType === 'bedrock' - ? formatPrompt(systemPrompts.bedrock, systemPrompt) - : formatPrompt(systemPrompts.gemini, systemPrompt), + prompt: formatPrompt(defaultSystemPrompt, systemPrompt), streamRunnable: isStream, }) : // used with OSS models diff --git a/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/get_prompt.test.ts b/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/get_prompt.test.ts new file mode 100644 index 0000000000000..7b11932ce695a --- /dev/null +++ b/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/get_prompt.test.ts @@ -0,0 +1,386 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { getPrompt, promptDictionary } from './get_prompt'; +import { SavedObjectsClientContract } from '@kbn/core-saved-objects-api-server'; +import { ActionsClient } from '@kbn/actions-plugin/server'; +import { BEDROCK_SYSTEM_PROMPT, DEFAULT_SYSTEM_PROMPT, STRUCTURED_SYSTEM_PROMPT } from './prompts'; + +jest.mock('@kbn/core-saved-objects-api-server'); +jest.mock('@kbn/actions-plugin/server'); +const defaultConnector = { + id: 'mock', + name: 'Mock', + isPreconfigured: false, + isDeprecated: false, + isSystemAction: false, + actionTypeId: '.inference', +}; +describe('getPrompt', () => { + let savedObjectsClient: jest.Mocked; + let actionsClient: jest.Mocked; + + beforeEach(() => { + jest.clearAllMocks(); + savedObjectsClient = { + find: jest.fn().mockResolvedValue({ + page: 1, + per_page: 20, + total: 3, + saved_objects: [ + { + type: 'security-ai-prompt', + id: '977b39b8-5bb9-4530-9a39-7aa7084fb5c0', + attributes: { + promptId: promptDictionary.systemPrompt, + llm: 'openai', + model: 'gpt-4o', + description: 'Default prompt for AI Assistant system prompt.', + prompt: { + default: 'Hello world this is a system prompt', + }, + }, + references: [], + managed: false, + updated_at: '2025-01-22T18:44:35.271Z', + updated_by: 'u_mGBROF_q5bmFCATbLXAcCwKa0k8JvONAwSruelyKA5E_0', + created_at: '2025-01-22T18:44:35.271Z', + created_by: 'u_mGBROF_q5bmFCATbLXAcCwKa0k8JvONAwSruelyKA5E_0', + version: 'Wzk0MiwxXQ==', + coreMigrationVersion: '8.8.0', + score: 0.13353139, + }, + { + type: 'security-ai-prompt', + id: 'd6dacb9b-1029-4c4c-85e1-e4f97b31c7f4', + attributes: { + promptId: promptDictionary.systemPrompt, + llm: 'openai', + description: 'Default prompt for AI Assistant system prompt.', + prompt: { + default: 'Hello world this is a system prompt no model', + }, + }, + references: [], + managed: false, + updated_at: '2025-01-22T19:11:48.806Z', + updated_by: 'u_mGBROF_q5bmFCATbLXAcCwKa0k8JvONAwSruelyKA5E_0', + created_at: '2025-01-22T19:11:48.806Z', + created_by: 'u_mGBROF_q5bmFCATbLXAcCwKa0k8JvONAwSruelyKA5E_0', + version: 'Wzk4MCwxXQ==', + coreMigrationVersion: '8.8.0', + score: 0.13353139, + }, + { + type: 'security-ai-prompt', + id: 'd6dacb9b-1029-4c4c-85e1-e4f97b31c7f4', + attributes: { + promptId: promptDictionary.systemPrompt, + llm: 'bedrock', + description: 'Default prompt for AI Assistant system prompt.', + prompt: { + default: 'Hello world this is a system prompt for bedrock', + }, + }, + references: [], + managed: false, + updated_at: '2025-01-22T19:11:48.806Z', + updated_by: 'u_mGBROF_q5bmFCATbLXAcCwKa0k8JvONAwSruelyKA5E_0', + created_at: '2025-01-22T19:11:48.806Z', + created_by: 'u_mGBROF_q5bmFCATbLXAcCwKa0k8JvONAwSruelyKA5E_0', + version: 'Wzk4MCwxXQ==', + coreMigrationVersion: '8.8.0', + score: 0.13353139, + }, + { + type: 'security-ai-prompt', + id: 'd6dacb9b-1029-4c4c-85e1-e4f97b31c7f4', + attributes: { + promptId: promptDictionary.systemPrompt, + llm: 'bedrock', + model: 'us.anthropic.claude-3-5-sonnet-20240620-v1:0', + description: 'Default prompt for AI Assistant system prompt.', + prompt: { + default: 'Hello world this is a system prompt for bedrock claude-3-5-sonnet', + }, + }, + references: [], + managed: false, + updated_at: '2025-01-22T19:11:48.806Z', + updated_by: 'u_mGBROF_q5bmFCATbLXAcCwKa0k8JvONAwSruelyKA5E_0', + created_at: '2025-01-22T19:11:48.806Z', + created_by: 'u_mGBROF_q5bmFCATbLXAcCwKa0k8JvONAwSruelyKA5E_0', + version: 'Wzk4MCwxXQ==', + coreMigrationVersion: '8.8.0', + score: 0.13353139, + }, + { + type: 'security-ai-prompt', + id: 'da530fad-87ce-49c3-a088-08073e5034d6', + attributes: { + promptId: promptDictionary.systemPrompt, + description: 'Default prompt for AI Assistant system prompt.', + prompt: { + default: 'Hello world this is a system prompt no model, no llm', + }, + }, + references: [], + managed: false, + updated_at: '2025-01-22T19:12:12.911Z', + updated_by: 'u_mGBROF_q5bmFCATbLXAcCwKa0k8JvONAwSruelyKA5E_0', + created_at: '2025-01-22T19:12:12.911Z', + created_by: 'u_mGBROF_q5bmFCATbLXAcCwKa0k8JvONAwSruelyKA5E_0', + version: 'Wzk4MiwxXQ==', + coreMigrationVersion: '8.8.0', + score: 0.13353139, + }, + ], + }), + } as unknown as jest.Mocked; + + actionsClient = { + get: jest.fn().mockResolvedValue({ + config: { + provider: 'openai', + providerConfig: { model_id: 'gpt-4o' }, + }, + }), + } as unknown as jest.Mocked; + }); + + it('should return the prompt matching llm and model', async () => { + const result = await getPrompt({ + savedObjectsClient, + promptId: promptDictionary.systemPrompt, + llm: 'openai', + model: 'gpt-4o', + actionsClient, + connectorId: 'connector-123', + }); + expect(actionsClient.get).not.toHaveBeenCalled(); + + expect(result).toBe('Hello world this is a system prompt'); + }); + + it('should return the prompt matching llm when model does not have a match', async () => { + const result = await getPrompt({ + savedObjectsClient, + promptId: promptDictionary.systemPrompt, + llm: 'openai', + model: 'gpt-4o-mini', + actionsClient, + connectorId: 'connector-123', + }); + expect(actionsClient.get).not.toHaveBeenCalled(); + + expect(result).toBe('Hello world this is a system prompt no model'); + }); + + it('should return the prompt matching llm when model is not provided', async () => { + const result = await getPrompt({ + savedObjectsClient, + promptId: promptDictionary.systemPrompt, + llm: 'openai', + actionsClient, + connectorId: 'connector-123', + }); + expect(actionsClient.get).toHaveBeenCalled(); + + expect(result).toBe('Hello world this is a system prompt no model'); + }); + + it('should return the default prompt when there is no match on llm', async () => { + const result = await getPrompt({ + savedObjectsClient, + promptId: promptDictionary.systemPrompt, + llm: 'badone', + actionsClient, + connectorId: 'connector-123', + }); + + expect(result).toBe('Hello world this is a system prompt no model, no llm'); + }); + + it('should default llm to bedrock if llm is "inference"', async () => { + actionsClient.get.mockResolvedValue(defaultConnector); + + const result = await getPrompt({ + savedObjectsClient, + promptId: promptDictionary.systemPrompt, + llm: 'inference', + model: 'gpt-4o', + actionsClient, + connectorId: 'connector-123', + }); + + expect(result).toBe('Hello world this is a system prompt for bedrock'); + }); + + it('should return the expected prompt from when llm is "elastic" and model matches in elasticModelDictionary', async () => { + actionsClient.get.mockResolvedValue({ + ...defaultConnector, + config: { + provider: 'elastic', + providerConfig: { model_id: 'rainbow-sprinkles' }, + }, + }); + + const result = await getPrompt({ + savedObjectsClient, + promptId: promptDictionary.systemPrompt, + llm: 'inference', + actionsClient, + connectorId: 'connector-123', + }); + + expect(result).toBe('Hello world this is a system prompt for bedrock claude-3-5-sonnet'); + }); + + it('should return the bedrock prompt when llm is "elastic" but model does not match elasticModelDictionary', async () => { + actionsClient.get.mockResolvedValue({ + ...defaultConnector, + config: { + provider: 'elastic', + providerConfig: { model_id: 'unknown-model' }, + }, + }); + + const result = await getPrompt({ + savedObjectsClient, + promptId: promptDictionary.systemPrompt, + llm: 'inference', + actionsClient, + connectorId: 'connector-123', + }); + + expect(result).toBe('Hello world this is a system prompt for bedrock'); + }); + + it('should return the model prompt when no prompts are found and model is provided', async () => { + savedObjectsClient.find.mockResolvedValue({ + page: 1, + per_page: 20, + total: 0, + saved_objects: [], + }); + + const result = await getPrompt({ + savedObjectsClient, + promptId: promptDictionary.systemPrompt, + actionsClient, + llm: 'bedrock', + connectorId: 'connector-123', + }); + + expect(result).toBe(BEDROCK_SYSTEM_PROMPT); + }); + + it('should return the default prompt when no prompts are found', async () => { + savedObjectsClient.find.mockResolvedValue({ + page: 1, + per_page: 20, + total: 0, + saved_objects: [], + }); + + const result = await getPrompt({ + savedObjectsClient, + promptId: promptDictionary.systemPrompt, + actionsClient, + connectorId: 'connector-123', + }); + + expect(result).toBe(DEFAULT_SYSTEM_PROMPT); + }); + + it('should return an empty string when no prompts are found', async () => { + savedObjectsClient.find.mockResolvedValue({ + page: 1, + per_page: 20, + total: 0, + saved_objects: [], + }); + + const result = await getPrompt({ + savedObjectsClient, + promptId: 'nonexistent-prompt', + actionsClient, + connectorId: 'connector-123', + }); + + expect(result).toBe(''); + }); + + it('should handle invalid connector configuration gracefully when llm is "inference"', async () => { + actionsClient.get.mockResolvedValue({ + ...defaultConnector, + config: {}, + }); + const result = await getPrompt({ + savedObjectsClient, + promptId: promptDictionary.systemPrompt, + llm: 'inference', + actionsClient, + connectorId: 'connector-123', + }); + + expect(result).toBe('Hello world this is a system prompt for bedrock'); + }); + + it('should retrieve the connector when no model or llm is provided', async () => { + actionsClient.get.mockResolvedValue({ + ...defaultConnector, + actionTypeId: '.bedrock', + config: { + defaultModel: 'us.anthropic.claude-3-5-sonnet-20240620-v1:0', + }, + }); + const result = await getPrompt({ + savedObjectsClient, + promptId: promptDictionary.systemPrompt, + actionsClient, + connectorId: 'connector-123', + }); + expect(actionsClient.get).toHaveBeenCalled(); + + expect(result).toBe('Hello world this is a system prompt for bedrock claude-3-5-sonnet'); + }); + + it('should retrieve the connector when no model is provided', async () => { + actionsClient.get.mockResolvedValue({ + ...defaultConnector, + actionTypeId: '.bedrock', + config: { + defaultModel: 'us.anthropic.claude-3-5-sonnet-20240620-v1:0', + }, + }); + const result = await getPrompt({ + savedObjectsClient, + promptId: promptDictionary.systemPrompt, + llm: 'bedrock', + actionsClient, + connectorId: 'connector-123', + }); + expect(actionsClient.get).toHaveBeenCalled(); + + expect(result).toBe('Hello world this is a system prompt for bedrock claude-3-5-sonnet'); + }); + + it('should return the OSS prompt matching llm and model', async () => { + const result = await getPrompt({ + savedObjectsClient, + promptId: promptDictionary.systemPrompt, + llm: 'openai', + model: 'oss', + actionsClient, + connectorId: 'connector-123', + }); + expect(actionsClient.get).not.toHaveBeenCalled(); + + expect(result).toBe(STRUCTURED_SYSTEM_PROMPT); + }); +}); diff --git a/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/get_prompt.ts b/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/get_prompt.ts new file mode 100644 index 0000000000000..39de7b4b4b5ba --- /dev/null +++ b/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/get_prompt.ts @@ -0,0 +1,197 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +import { SavedObject, SavedObjectsClientContract } from '@kbn/core-saved-objects-api-server'; +import { PublicMethodsOf } from '@kbn/utility-types'; +import { ActionsClient } from '@kbn/actions-plugin/server'; +import { + BEDROCK_SYSTEM_PROMPT, + DEFAULT_SYSTEM_PROMPT, + GEMINI_SYSTEM_PROMPT, + GEMINI_USER_PROMPT, + STRUCTURED_SYSTEM_PROMPT, +} from './prompts'; +import { getLlmType } from '../../routes/utils'; +import { promptSavedObjectType } from '../../../common/constants'; + +export const promptDictionary = { + systemPrompt: 'systemPrompt-default', + userPrompt: 'userPrompt-default', +}; + +interface GetPromptArgs { + savedObjectsClient: SavedObjectsClientContract; + promptId: string; + llm?: string; + model?: string; + actionsClient: PublicMethodsOf; + connectorId: string; +} +interface ElasticModelDictionary { + [key: string]: { + llm: string; + model: string; + }; +} + +interface Prompt { + promptId: string; + prompt: { + default: string; + }; + llm?: string; + model?: string; + description?: string; +} + +const elasticModelDictionary: ElasticModelDictionary = { + 'rainbow-sprinkles': { + llm: 'bedrock', + model: 'us.anthropic.claude-3-5-sonnet-20240620-v1:0', + }, +}; +export const getPrompt = async ({ + savedObjectsClient, + promptId, + model: providedModel, + llm: providedLlm, + actionsClient, + connectorId, +}: GetPromptArgs): Promise => { + let model = providedModel; + let llm = providedLlm; + if (!llm || !model || llm === 'inference') { + const connector = await actionsClient.get({ id: connectorId }); + // At least one of 'llm' or 'model' is missing, get it from connector details + if (llm === 'inference' && !!connector.config) { + llm = connector.config.provider || llm; + model = connector.config.providerConfig?.model_id || model; + if (llm === 'elastic' && !!model) { + // default back to inference if no llm exists for the model + llm = elasticModelDictionary[model]?.llm || 'inference'; + model = elasticModelDictionary[model]?.model; + } + } else if (llm !== 'inference' && !!connector.config) { + llm = llm || getLlmType(connector.actionTypeId); + model = model || connector.config.defaultModel; + } + } + + // if after all of this, llm is still inference, treat as Bedrock + if (llm === 'inference') { + llm = 'bedrock'; + } + + const prompts = await savedObjectsClient.find({ + type: promptSavedObjectType, + searchFields: ['promptId'], + search: promptId, + fields: ['llm', 'model', 'prompt'], + }); + + return findPromptEntry(prompts.saved_objects, promptId, llm, model) || ''; +}; + +const findPromptEntry = ( + prompts: Array>, + promptId: string, + llm?: string, + model?: string +) => { + const backupPrompts = localPrompts.filter((p) => p.attributes.promptId === promptId); + + // Try to find the entry with matching llm and model + let entry = + prompts.find((prompt) => prompt.attributes.llm === llm && prompt.attributes.model === model) || + backupPrompts.find( + (prompt) => prompt.attributes.llm === llm && prompt.attributes.model === model + ); + if (!entry) { + // If no match, try to find an entry with matching llm + entry = + prompts.find((prompt) => prompt.attributes.llm === llm && !prompt.attributes.model) || + backupPrompts.find((prompt) => prompt.attributes.llm === llm && !prompt.attributes.model); + } + + if (!entry) { + // If still no match, find the entry without llm or model + entry = + prompts.find((prompt) => !prompt.attributes.llm && !prompt.attributes.model) || + backupPrompts.find((prompt) => prompt.attributes.llm === llm && !prompt.attributes.model); + } + + return entry?.attributes?.prompt?.default; +}; + +const defaultSavedObject = { + id: '', + references: [], + type: 'security-ai-prompt', +}; + +const localPrompts: Array> = [ + { + ...defaultSavedObject, + attributes: { + promptId: promptDictionary.systemPrompt, + llm: 'openai', + prompt: { + default: DEFAULT_SYSTEM_PROMPT, + }, + }, + }, + { + ...defaultSavedObject, + attributes: { + promptId: promptDictionary.systemPrompt, + prompt: { + default: DEFAULT_SYSTEM_PROMPT, + }, + }, + }, + { + ...defaultSavedObject, + attributes: { + promptId: promptDictionary.systemPrompt, + llm: 'bedrock', + prompt: { + default: BEDROCK_SYSTEM_PROMPT, + }, + }, + }, + { + ...defaultSavedObject, + attributes: { + promptId: promptDictionary.systemPrompt, + llm: 'gemini', + prompt: { + default: GEMINI_SYSTEM_PROMPT, + }, + }, + }, + { + ...defaultSavedObject, + attributes: { + promptId: promptDictionary.systemPrompt, + llm: 'openai', + model: 'oss', + prompt: { + default: STRUCTURED_SYSTEM_PROMPT, + }, + }, + }, + { + ...defaultSavedObject, + attributes: { + promptId: promptDictionary.userPrompt, + llm: 'gemini', + prompt: { + default: GEMINI_USER_PROMPT, + }, + }, + }, +]; diff --git a/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/prompts.ts b/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/prompts.ts new file mode 100644 index 0000000000000..d3bffd2e009c3 --- /dev/null +++ b/x-pack/solutions/security/plugins/elastic_assistant/server/lib/prompt/prompts.ts @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +const YOU_ARE_A_HELPFUL_EXPERT_ASSISTANT = + 'You are a security analyst and expert in resolving security incidents. Your role is to assist by answering questions about Elastic Security.'; +const IF_YOU_DONT_KNOW_THE_ANSWER = 'Do not answer questions unrelated to Elastic Security.'; +export const KNOWLEDGE_HISTORY = + 'If available, use the Knowledge History provided to try and answer the question. If not provided, you can try and query for additional knowledge via the KnowledgeBaseRetrievalTool.'; + +export const DEFAULT_SYSTEM_PROMPT = `${YOU_ARE_A_HELPFUL_EXPERT_ASSISTANT} ${IF_YOU_DONT_KNOW_THE_ANSWER} ${KNOWLEDGE_HISTORY}`; +// system prompt from @afirstenberg +const BASE_GEMINI_PROMPT = + 'You are an assistant that is an expert at using tools and Elastic Security, doing your best to use these tools to answer questions or follow instructions. It is very important to use tools to answer the question or follow the instructions rather than coming up with your own answer. Tool calls are good. Sometimes you may need to make several tool calls to accomplish the task or get an answer to the question that was asked. Use as many tool calls as necessary.'; +const KB_CATCH = + 'If the knowledge base tool gives empty results, do your best to answer the question from the perspective of an expert security analyst.'; +export const GEMINI_SYSTEM_PROMPT = `${BASE_GEMINI_PROMPT} ${KB_CATCH}`; +export const BEDROCK_SYSTEM_PROMPT = `Use tools as often as possible, as they have access to the latest data and syntax. Always return value from NaturalLanguageESQLTool as is. Never return tags in the response, but make sure to include tags content in the response. Do not reflect on the quality of the returned search results in your response.`; +export const GEMINI_USER_PROMPT = `Now, always using the tools at your disposal, step by step, come up with a response to this request:\n\n`; + +export const STRUCTURED_SYSTEM_PROMPT = `Respond to the human as helpfully and accurately as possible. ${KNOWLEDGE_HISTORY} You have access to the following tools: + +{tools} + +The tool action_input should ALWAYS follow the tool JSON schema args. + +Valid "action" values: "Final Answer" or {tool_names} + +Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input strictly adhering to the tool JSON schema args). + +Provide only ONE action per $JSON_BLOB, as shown: + +\`\`\` + +{{ + + "action": $TOOL_NAME, + + "action_input": $TOOL_INPUT + +}} + +\`\`\` + +Follow this format: + +Question: input question to answer + +Thought: consider previous and subsequent steps + +Action: + +\`\`\` + +$JSON_BLOB + +\`\`\` + +Observation: action result + +... (repeat Thought/Action/Observation N times) + +Thought: I know what to respond + +Action: + +\`\`\` + +{{ + + "action": "Final Answer", + + "action_input": "Final response to human"}} + +Begin! Reminder to ALWAYS respond with a valid json blob of a single action with no additional output. When using tools, ALWAYS input the expected JSON schema args. Your answer will be parsed as JSON, so never use double quotes within the output and instead use backticks. Single quotes may be used, such as apostrophes. Response format is Action:\`\`\`$JSON_BLOB\`\`\`then Observation`; diff --git a/x-pack/solutions/security/plugins/elastic_assistant/server/routes/chat/chat_complete_route.ts b/x-pack/solutions/security/plugins/elastic_assistant/server/routes/chat/chat_complete_route.ts index 56ccca59da220..bda4da8413120 100644 --- a/x-pack/solutions/security/plugins/elastic_assistant/server/routes/chat/chat_complete_route.ts +++ b/x-pack/solutions/security/plugins/elastic_assistant/server/routes/chat/chat_complete_route.ts @@ -106,6 +106,7 @@ export const chatCompleteRoute = ( const connector = connectors.length > 0 ? connectors[0] : undefined; actionTypeId = connector?.actionTypeId ?? '.gen-ai'; const isOssModel = isOpenSourceModel(connector); + const savedObjectsClient = ctx.elasticAssistant.savedObjectsClient; // replacements const anonymizationFieldsRes = @@ -221,6 +222,7 @@ export const chatCompleteRoute = ( response, telemetry, responseLanguage: request.body.responseLanguage, + savedObjectsClient, ...(productDocsAvailable ? { llmTasks: ctx.elasticAssistant.llmTasks } : {}), }); } catch (err) { diff --git a/x-pack/solutions/security/plugins/elastic_assistant/server/routes/helpers.ts b/x-pack/solutions/security/plugins/elastic_assistant/server/routes/helpers.ts index 25d4ce1a2ec45..c66f0482661fa 100644 --- a/x-pack/solutions/security/plugins/elastic_assistant/server/routes/helpers.ts +++ b/x-pack/solutions/security/plugins/elastic_assistant/server/routes/helpers.ts @@ -12,6 +12,7 @@ import { KibanaRequest, KibanaResponseFactory, Logger, + SavedObjectsClientContract, } from '@kbn/core/server'; import { StreamResponseWithHeaders } from '@kbn/ml-response-stream/server'; @@ -235,6 +236,7 @@ export interface LangChainExecuteParams { getElser: GetElser; response: KibanaResponseFactory; responseLanguage?: string; + savedObjectsClient: SavedObjectsClientContract; systemPrompt?: string; } export const langChainExecute = async ({ @@ -258,6 +260,7 @@ export const langChainExecute = async ({ response, responseLanguage, isStream = true, + savedObjectsClient, systemPrompt, }: LangChainExecuteParams) => { // Fetch any tools registered by the request's originating plugin @@ -316,6 +319,7 @@ export const langChainExecute = async ({ request, replacements, responseLanguage, + savedObjectsClient, size: request.body.size, systemPrompt, telemetry, diff --git a/x-pack/solutions/security/plugins/elastic_assistant/server/routes/post_actions_connector_execute.ts b/x-pack/solutions/security/plugins/elastic_assistant/server/routes/post_actions_connector_execute.ts index 59dd35bc0f61f..8cd6dd24c24c7 100644 --- a/x-pack/solutions/security/plugins/elastic_assistant/server/routes/post_actions_connector_execute.ts +++ b/x-pack/solutions/security/plugins/elastic_assistant/server/routes/post_actions_connector_execute.ts @@ -99,6 +99,7 @@ export const postActionsConnectorExecuteRoute = ( // get the actions plugin start contract from the request context: const actions = ctx.elasticAssistant.actions; const inference = ctx.elasticAssistant.inference; + const savedObjectsClient = ctx.elasticAssistant.savedObjectsClient; const productDocsAvailable = (await ctx.elasticAssistant.llmTasks.retrieveDocumentationAvailable()) ?? false; const actionsClient = await actions.getActionsClientWithRequest(request); @@ -153,6 +154,7 @@ export const postActionsConnectorExecuteRoute = ( request, response, telemetry, + savedObjectsClient, systemPrompt, ...(productDocsAvailable ? { llmTasks: ctx.elasticAssistant.llmTasks } : {}), }); diff --git a/x-pack/solutions/security/plugins/elastic_assistant/server/routes/request_context_factory.ts b/x-pack/solutions/security/plugins/elastic_assistant/server/routes/request_context_factory.ts index 08ef1bb1828d6..7580fc8c0989d 100644 --- a/x-pack/solutions/security/plugins/elastic_assistant/server/routes/request_context_factory.ts +++ b/x-pack/solutions/security/plugins/elastic_assistant/server/routes/request_context_factory.ts @@ -80,7 +80,7 @@ export class RequestContextFactory implements IRequestContextFactory { }, llmTasks: startPlugins.llmTasks, inference: startPlugins.inference, - + savedObjectsClient: coreStart.savedObjects.getScopedClient(request), telemetry: core.analytics, // Note: modelIdOverride is used here to enable setting up the KB using a different ELSER model, which diff --git a/x-pack/solutions/security/plugins/elastic_assistant/server/types.ts b/x-pack/solutions/security/plugins/elastic_assistant/server/types.ts index 1102d4b7b8441..53d12e014ff94 100755 --- a/x-pack/solutions/security/plugins/elastic_assistant/server/types.ts +++ b/x-pack/solutions/security/plugins/elastic_assistant/server/types.ts @@ -9,7 +9,7 @@ import type { PluginSetupContract as ActionsPluginSetup, PluginStartContract as ActionsPluginStart, } from '@kbn/actions-plugin/server'; -import type { +import { AuthenticatedUser, CoreRequestHandlerContext, CoreSetup, @@ -19,6 +19,7 @@ import type { KibanaRequest, Logger, AuditLogger, + SavedObjectsClientContract, } from '@kbn/core/server'; import type { LlmTasksPluginStart } from '@kbn/llm-tasks-plugin/server'; import { type MlPluginSetup } from '@kbn/ml-plugin/server'; @@ -140,6 +141,7 @@ export interface ElasticAssistantApiRequestHandlerContext { getAIAssistantAnonymizationFieldsDataClient: () => Promise; llmTasks: LlmTasksPluginStart; inference: InferenceServerStart; + savedObjectsClient: SavedObjectsClientContract; telemetry: AnalyticsServiceSetup; } /**