1
1
import json
2
+ import re
2
3
from typing import Dict , List , Optional , Union
3
4
from enum import Enum
4
5
8
9
from ai .LLM .LLMServiceMixin import LLMServiceMixin
9
10
from data .Finding import Finding
10
11
from ai .LLM .Strategies .openai_prompts import (
11
- CLASSIFY_KIND_TEMPLATE ,
12
- SHORT_RECOMMENDATION_TEMPLATE ,
13
- GENERIC_LONG_RECOMMENDATION_TEMPLATE ,
14
- SEARCH_TERMS_TEMPLATE ,
15
- META_PROMPT_GENERATOR_TEMPLATE ,
16
- LONG_RECOMMENDATION_TEMPLATE , COMBINE_DESCRIPTIONS_TEMPLATE ,
12
+ OPENAI_CLASSIFY_KIND_TEMPLATE ,
13
+ OPENAI_SHORT_RECOMMENDATION_TEMPLATE ,
14
+ OPENAI_GENERIC_LONG_RECOMMENDATION_TEMPLATE ,
15
+ OPENAI_SEARCH_TERMS_TEMPLATE ,
16
+ OPENAI_META_PROMPT_GENERATOR_TEMPLATE ,
17
+ OPENAI_LONG_RECOMMENDATION_TEMPLATE ,
18
+ OPENAI_COMBINE_DESCRIPTIONS_TEMPLATE ,
19
+ OPENAI_AGGREGATED_SOLUTION_TEMPLATE , OPENAI_SUBDIVISION_PROMPT_TEMPLATE ,
17
20
)
18
21
from utils .text_tools import clean
19
22
from config import config
@@ -70,7 +73,7 @@ def get_url(self) -> str:
70
73
"""Get the URL for the Anthropic API (placeholder method)."""
71
74
return "-"
72
75
73
- def _generate (self , prompt : str ) -> Dict [str , str ]:
76
+ def _generate (self , prompt : str , json = False ) -> Dict [str , str ]:
74
77
"""
75
78
Generate a response using the Anthropic API.
76
79
@@ -81,29 +84,34 @@ def _generate(self, prompt: str) -> Dict[str, str]:
81
84
Dict[str, str]: A dictionary containing the generated response.
82
85
"""
83
86
try :
87
+ messages = [{"role" : "user" , "content" : prompt }]
88
+ if json :
89
+ messages .append ({"role" : "assistant" , "content" : "Here is the JSON requested:\n {" })
84
90
message = self .client .messages .create (
85
91
max_tokens = 1024 ,
86
- messages = [{ "role" : "user" , "content" : prompt }] ,
92
+ messages = messages ,
87
93
model = self .model ,
88
94
)
89
95
content = message .content [0 ].text
96
+ if json :
97
+ content = "{" + content
90
98
return {"response" : content }
91
99
except Exception as e :
92
100
return self .handle_api_error (e )
93
101
94
102
def _get_classification_prompt (self , options : str , field_name : str , finding_str : str ) -> str :
95
103
"""Generate the classification prompt for Anthropic."""
96
- return CLASSIFY_KIND_TEMPLATE .format (options = options , field_name = field_name , data = finding_str )
104
+ return OPENAI_CLASSIFY_KIND_TEMPLATE .format (options = options , field_name = field_name , data = finding_str )
97
105
98
106
def _get_recommendation_prompt (self , finding : Finding , short : bool ) -> str :
99
107
"""Generate the recommendation prompt for Anthropic."""
100
108
if short :
101
- return SHORT_RECOMMENDATION_TEMPLATE .format (data = str (finding ))
109
+ return OPENAI_SHORT_RECOMMENDATION_TEMPLATE .format (data = str (finding ))
102
110
elif finding .solution and finding .solution .short_description :
103
111
finding .solution .add_to_metadata ("used_meta_prompt" , True )
104
112
return self ._generate_prompt_with_meta_prompts (finding )
105
113
else :
106
- return GENERIC_LONG_RECOMMENDATION_TEMPLATE
114
+ return OPENAI_GENERIC_LONG_RECOMMENDATION_TEMPLATE
107
115
108
116
def _process_recommendation_response (self , response : Dict [str , str ], finding : Finding , short : bool ) -> Union [
109
117
str , List [str ]]:
@@ -117,11 +125,11 @@ def _process_recommendation_response(self, response: Dict[str, str], finding: Fi
117
125
def _generate_prompt_with_meta_prompts (self , finding : Finding ) -> str :
118
126
"""Generate a prompt with meta-prompts for long recommendations."""
119
127
short_recommendation = finding .solution .short_description
120
- meta_prompt_generator = META_PROMPT_GENERATOR_TEMPLATE .format (finding = str (finding ))
128
+ meta_prompt_generator = OPENAI_META_PROMPT_GENERATOR_TEMPLATE .format (finding = str (finding ))
121
129
meta_prompt_response = self .generate (meta_prompt_generator )
122
130
meta_prompts = clean (meta_prompt_response .get ("response" , "" ), llm_service = self )
123
131
124
- long_prompt = LONG_RECOMMENDATION_TEMPLATE .format (meta_prompts = meta_prompts )
132
+ long_prompt = OPENAI_LONG_RECOMMENDATION_TEMPLATE .format (meta_prompts = meta_prompts )
125
133
126
134
finding .solution .add_to_metadata (
127
135
"prompt_long_breakdown" ,
@@ -135,7 +143,7 @@ def _generate_prompt_with_meta_prompts(self, finding: Finding) -> str:
135
143
136
144
def _get_search_terms_prompt (self , finding : Finding ) -> str :
137
145
"""Generate the search terms prompt for Anthropic."""
138
- return SEARCH_TERMS_TEMPLATE .format (data = str (finding ))
146
+ return OPENAI_SEARCH_TERMS_TEMPLATE .format (data = str (finding ))
139
147
140
148
def _process_search_terms_response (self , response : Dict [str , str ], finding : Finding ) -> str :
141
149
"""Process the search terms response from Anthropic."""
@@ -144,6 +152,59 @@ def _process_search_terms_response(self, response: Dict[str, str], finding: Find
144
152
return ""
145
153
return clean (response ["response" ], llm_service = self )
146
154
155
+ def _get_subdivision_prompt (self , findings : List [Finding ]) -> str :
156
+ findings_str = self ._get_findings_str_for_aggregation (findings )
157
+ return OPENAI_SUBDIVISION_PROMPT_TEMPLATE .format (data = findings_str )
158
+
159
+ def _process_subdivision_response (self , response : Dict [str , str ], findings : List [Finding ]) -> List [Tuple [List [Finding ], Dict ]]:
160
+ if "response" not in response :
161
+ logger .warning ("Failed to subdivide findings" )
162
+ return [(findings , {})] # Return all findings as a single group if subdivision fails
163
+
164
+ try :
165
+ response = response ["response" ]
166
+ # remove prefix ```json and suffix ```
167
+ response = re .sub (r'^```json' , '' , response )
168
+ response = re .sub (r'```$' , '' , response )
169
+ subdivisions = json .loads (response )["subdivisions" ]
170
+ except json .JSONDecodeError :
171
+ logger .error ("Failed to parse JSON response" )
172
+ return [(findings , {})]
173
+ except KeyError :
174
+ logger .error ("Unexpected JSON structure in response" )
175
+ return [(findings , {})]
176
+
177
+ result = []
178
+ for subdivision in subdivisions :
179
+ try :
180
+ group_indices = [int (i .strip ()) - 1 for i in subdivision ["group" ].split (',' )]
181
+ group = [findings [i ] for i in group_indices if i < len (findings )]
182
+ meta_info = {"reason" : subdivision .get ("reason" , "" )}
183
+ if len (group ) == 1 :
184
+ continue # Skip single-element groups for *aggregated* solutions
185
+ result .append ((group , meta_info ))
186
+ except ValueError :
187
+ logger .error (f"Failed to parse group indices: { subdivision ['group' ]} " )
188
+ continue
189
+ except KeyError :
190
+ logger .error ("Unexpected subdivision structure" )
191
+ continue
192
+
193
+ return result
194
+
195
+ def _get_aggregated_solution_prompt (self , findings : List [Finding ], meta_info : Dict ) -> str :
196
+ findings_str = self ._get_findings_str_for_aggregation (findings , details = True )
197
+ return OPENAI_AGGREGATED_SOLUTION_TEMPLATE .format (
198
+ data = findings_str ,
199
+ meta_info = meta_info .get ("reason" , "" )
200
+ )
201
+
202
+ def _process_aggregated_solution_response (self , response : Dict [str , str ]) -> str :
203
+ if "response" not in response :
204
+ logger .warning ("Failed to generate an aggregated solution" )
205
+ return ""
206
+ return clean (response ["response" ], llm_service = self )
207
+
147
208
def convert_dict_to_str (self , data : Dict ) -> str :
148
209
"""
149
210
Convert a dictionary to a string representation.
@@ -171,7 +232,7 @@ def combine_descriptions(self, descriptions: List[str]) -> str:
171
232
if len (descriptions ) <= 1 :
172
233
return descriptions [0 ] if descriptions else ""
173
234
174
- prompt = COMBINE_DESCRIPTIONS_TEMPLATE .format (data = descriptions )
235
+ prompt = OPENAI_COMBINE_DESCRIPTIONS_TEMPLATE .format (data = descriptions )
175
236
176
237
response = self .generate (prompt )
177
238
if "response" not in response :
0 commit comments