From 5bdc38269fd8cc657e55cb84892b8c3475adfcc5 Mon Sep 17 00:00:00 2001 From: JarbasAi Date: Sat, 30 Dec 2023 00:36:48 +0000 Subject: [PATCH 1/3] requirements/latest_stable --- __init__.py | 2 +- requirements.txt | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/__init__.py b/__init__.py index 38064f7..c59816a 100644 --- a/__init__.py +++ b/__init__.py @@ -21,9 +21,9 @@ from ovos_plugin_manager.templates.solvers import QuestionSolver from ovos_utils import classproperty from ovos_utils.gui import can_use_gui -from ovos_utils.intents import IntentBuilder from ovos_utils.process_utils import RuntimeRequirements from ovos_workshop.decorators import intent_handler +from ovos_workshop.intents import IntentBuilder from ovos_workshop.skills.common_query_skill import CommonQuerySkill, CQSMatchLevel diff --git a/requirements.txt b/requirements.txt index 824a6cd..fff01f5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ ovos-translate-server-plugin -ovos-config>=0.0.11a10 -ovos-utils~=0.0, >=0.0.28a4 -ovos_workshop~=0.0, >=0.0.11a4 +ovos-config>=0.0.12 +ovos-utils~=0.0, >=0.0.38 +ovos_workshop~=0.0, >=0.0.15 ovos-plugin-manager \ No newline at end of file From 9d6540beb180e6484e19b6c28f7d6224cff6cfe1 Mon Sep 17 00:00:00 2001 From: JarbasAi Date: Fri, 12 Jan 2024 06:33:02 +0000 Subject: [PATCH 2/3] refactor/performance reuse solver object, avoid reinit of translate plugin switch to spoken answers api, more natural and faster skip translation if request lang is english session support --- __init__.py | 144 +++++++++++++++++++--------------------------------- 1 file changed, 52 insertions(+), 92 deletions(-) diff --git a/__init__.py b/__init__.py index c59816a..58d0d8f 100644 --- a/__init__.py +++ b/__init__.py @@ -17,13 +17,13 @@ import requests from ovos_backend_client.api import WolframAlphaApi as _WA from ovos_bus_client import Message +from ovos_bus_client.session import SessionManager from ovos_config import Configuration from ovos_plugin_manager.templates.solvers import QuestionSolver from ovos_utils import classproperty from ovos_utils.gui import can_use_gui from ovos_utils.process_utils import RuntimeRequirements from ovos_workshop.decorators import intent_handler -from ovos_workshop.intents import IntentBuilder from ovos_workshop.skills.common_query_skill import CommonQuerySkill, CQSMatchLevel @@ -211,45 +211,11 @@ def get_expanded_answer(self, query, context=None): class WolframAlphaSkill(CommonQuerySkill): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - # continuous dialog, "tell me more" - self.idx = 0 - self.last_query = None - self.results = [] - - # answer processing options - self.skip_images = True # some wolfram results are pictures with no speech - # if a gui is available the title is read and image displayed - - # These results are usually unwanted as spoken responses - # they are either spammy or cant be handled by TTS properly - self.skips = [ - # quantities, eg speed of light - 'Comparison', # spammy - 'Corresponding quantities', # spammy - 'Basic unit dimensions', # TTS will fail hard 99% of time - # when asking about word definitions - 'American pronunciation', # can not pronounce IPA phonemes - 'Translations', # TTS wont handle other langs or charsets - 'Hyphenation', # spammy - 'Anagrams', # spammy - 'Lexically close words', # spammy - 'Overall typical frequency', # spammy - 'Crossword puzzle clues', # spammy - 'Scrabble score', # spammy - 'Other notable uses' # spammy - ] - - @property - def wolfie(self): - # property to allow api key changes in config - try: - return WolframAlphaSolver({ - "units": self.config_core['system_unit'], - "appid": self.settings.get("api_key") - }) - except Exception as err: - self.log.error("WolframAlphaSkill failed to initialize: %s", err) - return None + self.session_results = {} # session_id: {} + self.wolfie = WolframAlphaSolver({ + "units": self.config_core['system_unit'], + "appid": self.settings.get("api_key") + }) @classproperty def runtime_requirements(self): @@ -267,28 +233,33 @@ def runtime_requirements(self): @intent_handler("search_wolfie.intent") def handle_search(self, message: Message): query = message.data["query"] + sess = SessionManager.get(message) + self.session_results[sess.session_id] = {"phrase": query, + "image": None, + "spoken_answer": ""} response = self.ask_the_wolf(query) if response: - self.speak_result() + self.session_results[sess.session_id]["spoken_answer"] = response + self.speak(response) else: self.speak_dialog("no_answer") - @intent_handler(IntentBuilder("WolfieMore").require("More"). - require("WolfieKnows")) - def handle_tell_more(self, message: Message): - """ Follow up query handler, "tell me more".""" - self.speak_result() - # common query integration def CQS_match_query_phrase(self, phrase: str): self.log.debug("WolframAlpha query: " + phrase) if self.wolfie is None: self.log.error("WolframAlphaSkill not initialized, no response") return + + sess = SessionManager.get() + self.session_results[sess.session_id] = {"phrase": phrase, + "image": None, + "spoken_answer": None} + response = self.ask_the_wolf(phrase) if response: - self.idx += 1 # spoken by common query framework - self.log.debug("WolframAlpha response: %s", response) + self.session_results[sess.session_id]["spoken_answer"] = response + self.log.debug(f"WolframAlpha response: {response}") return (phrase, CQSMatchLevel.GENERAL, response, {'query': phrase, 'answer': response}) @@ -297,60 +268,49 @@ def CQS_action(self, phrase: str, data: dict): self.display_wolfie() # wolfram integration - def ask_the_wolf(self, query: str): - # context for follow up questions - self.set_context("WolfieKnows", query) - results = self.wolfie.long_answer(query, - context={"lang": self.lang}) - self.idx = 0 - self.last_query = query - self.results = [s for s in results if s.get("title") not in self.skips] - if len(self.results): - return self.results[0]["summary"] - self.log.debug("WolframAlpha had no answers for %s", query) + def ask_the_wolf(self, query: str, lang: str = None): + lang = lang or self.lang + if lang.startswith("en"): + self.log.debug(f"skipping auto translation for wolfram alpha, " + f"{lang} is supported") + WolframAlphaSolver.enable_tx = False + else: + self.log.info(f"enabling auto translation for wolfram alpha, " + f"{lang} is not supported internally") + WolframAlphaSolver.enable_tx = True + return self.wolfie.spoken_answer(query, context={"lang": lang}) def display_wolfie(self): if not can_use_gui(self.bus): return - image = None - # issues can happen if skill reloads - # eg. "tell me more" -> invalid self.idx - if self.idx < len(self.results): - image = self.results[self.idx].get("img") - if self.last_query: - image = image or self.wolfie.visual_answer(self.last_query, - context={"lang": self.lang}) + + # generate image for the last query this session made + # only after skill was selected for speed + sess = SessionManager.get() + res = self.session_results.get(sess.session_id) + if not res or not res["spoken_response"]: + return + + image = res.get("image") or self.wolfie.visual_answer(res["phrase"], + context={"lang": self.lang}) if image: self.gui["wolfram_image"] = image # scrollable full result page - self.gui.show_page(join(self.root_dir, "ui", "wolf.qml"), override_idle=45) + self.gui.show_page(join(self.root_dir, "ui", "wolf"), override_idle=45) - def speak_result(self): - if self.idx + 1 > len(self.results): - self.speak_dialog("thats all") - self.remove_context("WolfieKnows") - self.idx = 0 - else: - if not self.results[self.idx].get("summary"): - if not self.skip_images and can_use_gui(self.bus): - self.speak(self.results[self.idx]["title"]) - self.display_wolfie() - else: - # skip image only result - self.idx += 1 - self.speak_result() - return - else: - self.display_wolfie() - # make it more speech friendly - ans = self.results[self.idx]["summary"] - ans = ans.replace(" | ", "; ") - self.speak(ans) - self.idx += 1 + def stop_session(self, sess): + if sess.session_id in self.session_results: + self.session_results.pop(sess.session_id) if __name__ == "__main__": - d = WolframAlphaSolver() + from ovos_utils.fakebus import FakeBus + + d = WolframAlphaSkill(bus=FakeBus(), skill_id="fake.wolf") + + print(d.ask_the_wolf("what is the speed of light")) + + exit() query = "who is Isaac Newton" From b8ee0763fc025035be2cb0c94ce5223469c37a20 Mon Sep 17 00:00:00 2001 From: JarbasAi Date: Fri, 12 Jan 2024 06:37:21 +0000 Subject: [PATCH 3/3] refactor/performance reuse solver object, avoid reinit of translate plugin switch to spoken answers api, more natural and faster skip translation if request lang is english session support --- test/unittests/test_continuous_dialog.py | 84 ------------------------ 1 file changed, 84 deletions(-) delete mode 100644 test/unittests/test_continuous_dialog.py diff --git a/test/unittests/test_continuous_dialog.py b/test/unittests/test_continuous_dialog.py deleted file mode 100644 index cb82705..0000000 --- a/test/unittests/test_continuous_dialog.py +++ /dev/null @@ -1,84 +0,0 @@ -import json -import unittest -from unittest.mock import Mock - -from ovos_utils.messagebus import FakeBus, Message -from skill_ovos_wolfie import WolframAlphaSkill -from time import sleep - -class TestDialog(unittest.TestCase): - def setUp(self): - self.bus = FakeBus() - self.bus.emitted_msgs = [] - - def get_msg(msg): - self.bus.emitted_msgs.append(json.loads(msg)) - - self.bus.on("message", get_msg) - - self.skill = WolframAlphaSkill() - self.skill._startup(self.bus, "wolfie.test") - self.skill.wolfie.long_answer = Mock() - self.skill.wolfie.long_answer.return_value = [ - {"title": f"title 1", "summary": f"this is the answer number 1"}, - {"title": f"title 2", "summary": f"this is the answer number 2"} - ] - self.skill.has_context = False - - def set_context(message): - self.skill.has_context = True - - def unset_context(message): - self.skill.has_context = False - - self.bus.on('add_context', set_context) - self.bus.on('remove_context', unset_context) - - def test_continuous_dialog(self): - self.bus.emitted_msgs = [] - - # "ask the wolf X" - self.assertFalse(self.skill.has_context) - self.skill.handle_search(Message("search_wolfie.intent", - {"query": "what is the speed of light"})) - sleep(0.5) - self.assertEqual(self.bus.emitted_msgs[0], - {'context': {'skill_id': 'wolfie.test'}, - 'data': {'context': 'wolfie_testWolfieKnows', - 'origin': '', - 'word': 'what is the speed of light'}, - 'type': 'add_context'}) - self.assertEqual(self.bus.emitted_msgs[-1], - {'context': {'skill_id': 'wolfie.test'}, - 'data': {'expect_response': False, - 'lang': 'en-us', - 'meta': {'skill': 'wolfie.test'}, - 'utterance': 'this is the answer number 1'}, - 'type': 'speak'}) - - # "tell me more" - self.assertTrue(self.skill.has_context) - self.skill.handle_tell_more(Message("WolfieMore")) - sleep(0.5) - self.assertEqual(self.bus.emitted_msgs[-1], - {'context': {'skill_id': 'wolfie.test'}, - 'data': {'expect_response': False, - 'lang': 'en-us', - 'meta': {'skill': 'wolfie.test'}, - 'utterance': 'this is the answer number 2'}, - 'type': 'speak'}) - self.assertTrue(self.skill.has_context) - - # "tell me more" - no more data dialog - self.skill.handle_tell_more(Message("WolfieMore")) - sleep(0.5) - self.assertEqual(self.bus.emitted_msgs[-2]["type"], "speak") - self.assertEqual(self.bus.emitted_msgs[-2]["data"]["meta"], - {'data': {}, 'dialog': 'thats all', 'skill': 'wolfie.test'}) - - # removal of context to disable "tell me more" - self.assertEqual(self.bus.emitted_msgs[-1], - {'context': {'skill_id': 'wolfie.test'}, - 'data': {'context': 'wolfie_testWolfieKnows'}, - 'type': 'remove_context'}) - self.assertFalse(self.skill.has_context)