From 8a270c32a54544c5a9b480ef234e94ddc6b7c7f8 Mon Sep 17 00:00:00 2001 From: Daniel McKnight Date: Tue, 3 Dec 2024 10:16:03 -0800 Subject: [PATCH] Refactor static strings into `constants` module Update tests to reference constant strings for more specific testing --- neon_llm_core/chatbot.py | 7 ++++--- neon_llm_core/utils/constants.py | 2 ++ tests/test_chatbot.py | 17 ++++++++++------- 3 files changed, 16 insertions(+), 10 deletions(-) diff --git a/neon_llm_core/chatbot.py b/neon_llm_core/chatbot.py index d1cbaa9..dac4f9f 100644 --- a/neon_llm_core/chatbot.py +++ b/neon_llm_core/chatbot.py @@ -34,6 +34,7 @@ from neon_data_models.models.api.llm import LLMPersona from neon_llm_core.utils.config import LLMMQConfig +from neon_llm_core.utils.constants import DEFAULT_RESPONSE, DEFAULT_VOTE class LLMBot(ChatBot): @@ -68,7 +69,7 @@ def ask_chatbot(self, user: str, shout: str, timestamp: str, self.prompt_id_to_shout[prompt_id] = shout LOG.debug(f"Getting response to {shout}") response = self._get_llm_api_response(shout=shout) - return response.response if response else "I have nothing to say here..." + return response.response if response else DEFAULT_RESPONSE def ask_discusser(self, options: dict, context: dict = None) -> str: """ @@ -85,7 +86,7 @@ def ask_discusser(self, options: dict, context: dict = None) -> str: LOG.info(f'prompt_sentence={prompt_sentence}, options={options}') opinion = self._get_llm_api_opinion(prompt=prompt_sentence, options=options) - return opinion.opinion if opinion else "I have nothing to say here..." + return opinion.opinion if opinion else DEFAULT_RESPONSE def ask_appraiser(self, options: dict, context: dict = None) -> str: """ @@ -112,7 +113,7 @@ def ask_appraiser(self, options: dict, context: dict = None) -> str: LOG.info(f'Received answer_data={answer_data}') if answer_data and answer_data.sorted_answer_indexes: return bots[answer_data.sorted_answer_indexes[0]] - return "abstain" + return DEFAULT_VOTE def _get_llm_api_response(self, shout: str) -> Optional[LLMProposeResponse]: """ diff --git a/neon_llm_core/utils/constants.py b/neon_llm_core/utils/constants.py index 7acff7a..80d6263 100644 --- a/neon_llm_core/utils/constants.py +++ b/neon_llm_core/utils/constants.py @@ -25,3 +25,5 @@ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. LLM_VHOST = '/llm' +DEFAULT_RESPONSE = "I have nothing to say here..." +DEFAULT_VOTE = "abstain" diff --git a/tests/test_chatbot.py b/tests/test_chatbot.py index 08ddaa7..7e7ccbb 100644 --- a/tests/test_chatbot.py +++ b/tests/test_chatbot.py @@ -90,10 +90,12 @@ def test_ask_chatbot(self, get_api_response): # Invalid response get_api_response.return_value = None - self.assertIsInstance(self.mock_chatbot.ask_chatbot(valid_user, - valid_shout, - valid_timestamp, - valid_context), str) + from neon_llm_core.utils.constants import DEFAULT_RESPONSE + self.assertEqual(self.mock_chatbot.ask_chatbot(valid_user, + valid_shout, + valid_timestamp, + valid_context), + DEFAULT_RESPONSE) get_api_response.assert_called_with(shout=valid_shout) @patch.object(mock_chatbot, '_get_llm_api_opinion') @@ -115,9 +117,10 @@ def test_ask_discusser(self, get_api_opinion): # Invalid response get_api_opinion.return_value = None - self.assertIsInstance(self.mock_chatbot.ask_discusser(valid_options, - valid_context), - str) + from neon_llm_core.utils.constants import DEFAULT_RESPONSE + self.assertEqual(self.mock_chatbot.ask_discusser(valid_options, + valid_context), + DEFAULT_RESPONSE) get_api_opinion.assert_called_with(prompt=valid_prompt, options=valid_options)