Skip to content

Commit

Permalink
Refactor static strings into constants module
Browse files Browse the repository at this point in the history
Update tests to reference constant strings for more specific testing
  • Loading branch information
NeonDaniel committed Dec 3, 2024
1 parent 415d607 commit 8a270c3
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 10 deletions.
7 changes: 4 additions & 3 deletions neon_llm_core/chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
from neon_data_models.models.api.llm import LLMPersona

from neon_llm_core.utils.config import LLMMQConfig
from neon_llm_core.utils.constants import DEFAULT_RESPONSE, DEFAULT_VOTE


class LLMBot(ChatBot):
Expand Down Expand Up @@ -68,7 +69,7 @@ def ask_chatbot(self, user: str, shout: str, timestamp: str,
self.prompt_id_to_shout[prompt_id] = shout
LOG.debug(f"Getting response to {shout}")
response = self._get_llm_api_response(shout=shout)
return response.response if response else "I have nothing to say here..."
return response.response if response else DEFAULT_RESPONSE

def ask_discusser(self, options: dict, context: dict = None) -> str:
"""
Expand All @@ -85,7 +86,7 @@ def ask_discusser(self, options: dict, context: dict = None) -> str:
LOG.info(f'prompt_sentence={prompt_sentence}, options={options}')
opinion = self._get_llm_api_opinion(prompt=prompt_sentence,
options=options)
return opinion.opinion if opinion else "I have nothing to say here..."
return opinion.opinion if opinion else DEFAULT_RESPONSE

def ask_appraiser(self, options: dict, context: dict = None) -> str:
"""
Expand All @@ -112,7 +113,7 @@ def ask_appraiser(self, options: dict, context: dict = None) -> str:
LOG.info(f'Received answer_data={answer_data}')
if answer_data and answer_data.sorted_answer_indexes:
return bots[answer_data.sorted_answer_indexes[0]]
return "abstain"
return DEFAULT_VOTE

def _get_llm_api_response(self, shout: str) -> Optional[LLMProposeResponse]:
"""
Expand Down
2 changes: 2 additions & 0 deletions neon_llm_core/utils/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,3 +25,5 @@
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

LLM_VHOST = '/llm'
DEFAULT_RESPONSE = "I have nothing to say here..."
DEFAULT_VOTE = "abstain"
17 changes: 10 additions & 7 deletions tests/test_chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,10 +90,12 @@ def test_ask_chatbot(self, get_api_response):

# Invalid response
get_api_response.return_value = None
self.assertIsInstance(self.mock_chatbot.ask_chatbot(valid_user,
valid_shout,
valid_timestamp,
valid_context), str)
from neon_llm_core.utils.constants import DEFAULT_RESPONSE
self.assertEqual(self.mock_chatbot.ask_chatbot(valid_user,
valid_shout,
valid_timestamp,
valid_context),
DEFAULT_RESPONSE)
get_api_response.assert_called_with(shout=valid_shout)

@patch.object(mock_chatbot, '_get_llm_api_opinion')
Expand All @@ -115,9 +117,10 @@ def test_ask_discusser(self, get_api_opinion):

# Invalid response
get_api_opinion.return_value = None
self.assertIsInstance(self.mock_chatbot.ask_discusser(valid_options,
valid_context),
str)
from neon_llm_core.utils.constants import DEFAULT_RESPONSE
self.assertEqual(self.mock_chatbot.ask_discusser(valid_options,
valid_context),
DEFAULT_RESPONSE)
get_api_opinion.assert_called_with(prompt=valid_prompt,
options=valid_options)

Expand Down

0 comments on commit 8a270c3

Please sign in to comment.