11
11
SHORT_RECOMMENDATION_TEMPLATE ,
12
12
GENERIC_LONG_RECOMMENDATION_TEMPLATE ,
13
13
SEARCH_TERMS_TEMPLATE ,
14
- CONVERT_DICT_TO_STR_TEMPLATE , META_PROMPT_GENERATOR_TEMPLATE , LONG_RECOMMENDATION_TEMPLATE
14
+ CONVERT_DICT_TO_STR_TEMPLATE ,
15
+ META_PROMPT_GENERATOR_TEMPLATE ,
16
+ LONG_RECOMMENDATION_TEMPLATE ,
15
17
)
16
18
from utils .text_tools import clean
17
19
20
+ from config import config
21
+
18
22
import logging
23
+
19
24
logger = logging .getLogger (__name__ )
20
25
21
26
22
27
class OpenAIService (BaseLLMService ):
23
- def __init__ (self , api_key : str = os . getenv ( "OPENAI_API_KEY" , None ) , model : str = "gpt-4o" ):
28
+ def __init__ (self , api_key : str = config . openai_api_key , model : str = "gpt-4o" ):
24
29
if api_key is None :
25
- raise ValueError ("API key not provided and OPENAI_API_KEY environment variable not set." )
30
+ raise ValueError (
31
+ "API key not provided and OPENAI_API_KEY environment variable not set."
32
+ )
26
33
openai .api_key = api_key
27
34
self .model = model
28
35
@@ -34,25 +41,28 @@ def get_url(self) -> str:
34
41
35
42
def generate (self , prompt : str ) -> Dict [str , str ]:
36
43
response = openai .chat .completions .create (
37
- model = self .model ,
38
- messages = [{"role" : "user" , "content" : prompt }]
44
+ model = self .model , messages = [{"role" : "user" , "content" : prompt }]
39
45
)
40
46
content = response .choices [0 ].message .content
41
47
return {"response" : content }
42
48
43
- def classify_kind (self , finding : Finding , options : Optional [List [FindingKind ]] = None ) -> FindingKind :
49
+ def classify_kind (
50
+ self , finding : Finding , options : Optional [List [FindingKind ]] = None
51
+ ) -> FindingKind :
44
52
if options is None :
45
53
options = list (FindingKind )
46
54
47
- options_str = ', ' .join ([kind .name for kind in options ])
55
+ options_str = ", " .join ([kind .name for kind in options ])
48
56
prompt = CLASSIFY_KIND_TEMPLATE .format (options = options_str , data = str (finding ))
49
57
response = self .generate (prompt )
50
- if ' response' not in response :
58
+ if " response" not in response :
51
59
logger .warning (f"Failed to classify the finding: { finding .title } " )
52
60
return FindingKind .DEFAULT
53
- return FindingKind [response [' response' ].strip ()]
61
+ return FindingKind [response [" response" ].strip ()]
54
62
55
- def get_recommendation (self , finding : Finding , short : bool = True ) -> Union [str , List [str ]]:
63
+ def get_recommendation (
64
+ self , finding : Finding , short : bool = True
65
+ ) -> Union [str , List [str ]]:
56
66
if short :
57
67
prompt = SHORT_RECOMMENDATION_TEMPLATE .format (data = str (finding ))
58
68
else : # long recommendation
@@ -62,45 +72,55 @@ def get_recommendation(self, finding: Finding, short: bool = True) -> Union[str,
62
72
else :
63
73
prompt = GENERIC_LONG_RECOMMENDATION_TEMPLATE
64
74
65
- finding .solution .add_to_metadata (f"prompt_{ 'short' if short else 'long' } " , prompt )
75
+ finding .solution .add_to_metadata (
76
+ f"prompt_{ 'short' if short else 'long' } " , prompt
77
+ )
66
78
response = self .generate (prompt )
67
79
68
- if ' response' not in response :
80
+ if " response" not in response :
69
81
logger .warning (
70
- f"Failed to generate a { 'short' if short else 'long' } recommendation for the finding: { finding .title } " )
71
- return '' if short else ['' ]
82
+ f"Failed to generate a { 'short' if short else 'long' } recommendation for the finding: { finding .title } "
83
+ )
84
+ return "" if short else ["" ]
72
85
73
- return clean (response [' response' ], llm_service = self )
86
+ return clean (response [" response" ], llm_service = self )
74
87
75
88
def _generate_prompt_with_meta_prompts (self , finding : Finding ) -> str :
76
89
short_recommendation = finding .solution .short_description
77
- meta_prompt_generator = META_PROMPT_GENERATOR_TEMPLATE .format (finding = str (finding ))
78
- meta_prompt_response = self .generate (meta_prompt_generator )
79
- meta_prompts = clean (
80
- meta_prompt_response .get ("response" , "" ), llm_service = self
90
+ meta_prompt_generator = META_PROMPT_GENERATOR_TEMPLATE .format (
91
+ finding = str (finding )
81
92
)
93
+ meta_prompt_response = self .generate (meta_prompt_generator )
94
+ meta_prompts = clean (meta_prompt_response .get ("response" , "" ), llm_service = self )
82
95
83
96
long_prompt = LONG_RECOMMENDATION_TEMPLATE .format (meta_prompts = meta_prompts )
84
97
85
- finding .solution .add_to_metadata ("prompt_long_breakdown" , {
86
- "short_recommendation" : short_recommendation ,
87
- "meta_prompts" : meta_prompts
88
- })
98
+ finding .solution .add_to_metadata (
99
+ "prompt_long_breakdown" ,
100
+ {
101
+ "short_recommendation" : short_recommendation ,
102
+ "meta_prompts" : meta_prompts ,
103
+ },
104
+ )
89
105
90
106
return long_prompt
91
107
92
108
def get_search_terms (self , finding : Finding ) -> str :
93
109
prompt = SEARCH_TERMS_TEMPLATE .format (data = str (finding ))
94
110
response = self .generate (prompt )
95
- if 'response' not in response :
96
- logger .warning (f"Failed to generate search terms for the finding: { finding .title } " )
111
+ if "response" not in response :
112
+ logger .warning (
113
+ f"Failed to generate search terms for the finding: { finding .title } "
114
+ )
97
115
return ""
98
- return clean (response [' response' ], llm_service = self )
116
+ return clean (response [" response" ], llm_service = self )
99
117
100
118
def convert_dict_to_str (self , data ) -> str :
101
119
prompt = CONVERT_DICT_TO_STR_TEMPLATE .format (data = json .dumps (data ))
102
120
response = self .generate (prompt )
103
- if 'response' not in response :
104
- logger .info (f"Failed to convert dictionary to string, returning it as str conversion." )
121
+ if "response" not in response :
122
+ logger .info (
123
+ f"Failed to convert dictionary to string, returning it as str conversion."
124
+ )
105
125
return str (data )
106
- return clean (response [' response' ], llm_service = self )
126
+ return clean (response [" response" ], llm_service = self )
0 commit comments