From c6accf2279e91224ec0b0c5768dd1c4b8f04d543 Mon Sep 17 00:00:00 2001 From: Dzmitry Hramyka Date: Sat, 28 Dec 2024 20:18:47 +0100 Subject: [PATCH 1/6] test utils --- .gitignore | 1 + coverage.lcov | 863 ++++++++++++++++++++++++++++++-------------- tests/test_utils.py | 61 ++++ 3 files changed, 654 insertions(+), 271 deletions(-) create mode 100644 tests/test_utils.py diff --git a/.gitignore b/.gitignore index e2fd31c..aad7894 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ qdrant_storage/ .chromadb/ persistent_q_table.json +coverage.lcov # ====== Default gitignore config ====== diff --git a/coverage.lcov b/coverage.lcov index 03cf711..99e7a0c 100644 --- a/coverage.lcov +++ b/coverage.lcov @@ -10,84 +10,86 @@ DA:8,0 DA:9,0 DA:10,0 DA:11,0 -DA:14,0 -DA:25,0 -DA:27,0 -DA:30,0 -DA:40,0 -DA:43,0 -DA:49,0 +DA:12,0 +DA:15,0 +DA:33,0 +DA:35,0 +DA:38,0 +DA:44,0 +DA:47,0 DA:51,0 -DA:52,0 DA:53,0 -DA:54,0 -DA:55,0 -DA:56,0 -DA:58,0 DA:59,0 DA:61,0 +DA:62,0 DA:63,0 +DA:64,0 DA:65,0 -DA:67,0 -DA:73,0 -DA:75,0 +DA:66,0 +DA:68,0 +DA:70,0 +DA:74,0 +DA:76,0 DA:78,0 -DA:79,0 -DA:80,0 -DA:82,0 -DA:83,0 DA:84,0 -DA:85,0 DA:86,0 -DA:88,0 DA:89,0 +DA:90,0 DA:91,0 -DA:92,0 DA:93,0 DA:94,0 DA:95,0 DA:96,0 +DA:97,0 DA:99,0 DA:100,0 +DA:102,0 DA:103,0 DA:104,0 -DA:109,0 -DA:110,0 -DA:116,0 -DA:117,0 +DA:105,0 +DA:106,0 +DA:107,0 +DA:108,0 +DA:111,0 +DA:112,0 +DA:115,0 +DA:118,0 DA:119,0 -DA:120,0 -DA:123,0 -DA:124,0 DA:125,0 -DA:126,0 +DA:127,0 +DA:128,0 DA:129,0 -DA:130,0 +DA:132,0 DA:133,0 DA:134,0 DA:137,0 DA:138,0 -DA:139,0 +DA:141,0 DA:142,0 -DA:144,0 DA:145,0 DA:146,0 DA:147,0 -DA:148,0 -DA:149,0 -LF:74 +DA:150,0 +DA:151,0 +DA:153,0 +DA:154,0 +DA:155,0 +DA:156,0 +DA:157,0 +DA:158,0 +LF:76 LH:0 -FN:25,43,Agent.__init__ +FN:33,53,Agent.__init__ FNDA:0,Agent.__init__ -FN:49,59,Agent._update_state +FN:59,68,Agent._update_state FNDA:0,Agent._update_state -FN:61,63,Agent._update_planning_policy +FN:70,74,Agent._update_planning_policy FNDA:0,Agent._update_planning_policy -FN:65,67,Agent._collect_feedback +FN:76,78,Agent._collect_feedback FNDA:0,Agent._collect_feedback -FN:73,110,Agent._perform_planned_action +FN:84,119,Agent._perform_planned_action FNDA:0,Agent._perform_planned_action -FN:116,149,Agent.start_runtime_loop +FN:125,158,Agent.start_runtime_loop FNDA:0,Agent.start_runtime_loop FNF:6 FNH:0 @@ -95,73 +97,92 @@ end_of_record SF:src/core/__init__.py end_of_record SF:src/core/config.py -DA:1,0 -DA:3,0 -DA:4,0 -DA:6,0 -DA:9,0 -DA:12,0 -DA:19,0 -DA:22,0 -DA:25,0 -DA:28,0 -DA:29,0 -DA:30,0 -DA:33,0 -DA:34,0 -DA:35,0 -DA:36,0 -DA:41,0 -DA:44,0 -DA:49,0 -DA:52,0 -DA:57,0 -DA:60,0 -DA:63,0 -DA:66,0 -DA:71,0 -DA:74,0 -DA:77,0 -DA:80,0 -DA:84,0 -DA:85,0 -DA:87,0 -DA:88,0 -DA:89,0 -DA:90,0 -DA:91,0 -DA:92,0 -DA:96,0 -DA:97,0 -DA:99,0 -DA:100,0 -DA:101,0 -DA:103,0 -DA:105,0 -DA:106,0 -DA:107,0 -DA:108,0 -DA:109,0 -DA:112,0 -LF:48 -LH:0 -FN:85,94,Settings.validate_environment -FNDA:0,Settings.validate_environment -FN:97,101,Settings.parse_reviewer_chat_ids -FNDA:0,Settings.parse_reviewer_chat_ids -FN:103,109,Settings.validate_memory_settings +DA:1,1 +DA:3,1 +DA:4,1 +DA:6,1 +DA:9,1 +DA:12,1 +DA:23,1 +DA:26,1 +DA:31,1 +DA:34,1 +DA:35,1 +DA:36,1 +DA:41,1 +DA:44,1 +DA:47,1 +DA:50,1 +DA:53,1 +DA:56,1 +DA:60,1 +DA:63,1 +DA:64,1 +DA:67,1 +DA:68,1 +DA:69,1 +DA:76,1 +DA:88,1 +DA:91,1 +DA:100,1 +DA:103,1 +DA:108,1 +DA:111,1 +DA:114,1 +DA:117,1 +DA:122,1 +DA:125,1 +DA:128,1 +DA:129,1 +DA:140,1 +DA:146,1 +DA:147,1 +DA:149,1 +DA:150,0 +DA:151,1 +DA:152,1 +DA:153,0 +DA:154,0 +DA:158,1 +DA:160,0 +DA:161,0 +DA:162,0 +DA:163,0 +DA:164,0 +DA:167,1 +LF:53 +LH:45 +FN:147,156,Settings.validate_environment +FNDA:1,Settings.validate_environment +FN:158,164,Settings.validate_memory_settings FNDA:0,Settings.validate_memory_settings -FNF:3 -FNH:0 +FNF:2 +FNH:1 end_of_record SF:src/core/defs.py -DA:3,0 -DA:6,0 -DA:9,0 -DA:10,0 -DA:11,0 -LF:5 -LH:0 +DA:3,1 +DA:6,1 +DA:9,1 +DA:10,1 +DA:11,1 +DA:14,1 +DA:17,1 +DA:18,1 +DA:19,1 +DA:22,1 +DA:25,1 +DA:26,1 +DA:27,1 +DA:28,1 +DA:29,1 +DA:32,1 +DA:35,1 +DA:36,1 +DA:39,1 +DA:42,1 +DA:43,1 +LF:21 +LH:21 end_of_record SF:src/core/exceptions.py DA:4,0 @@ -176,7 +197,9 @@ DA:28,0 DA:31,0 DA:34,0 DA:37,0 -LF:12 +DA:40,0 +DA:43,0 +LF:14 LH:0 end_of_record SF:src/feedback/__init__.py @@ -226,77 +249,160 @@ DA:3,0 DA:4,0 DA:5,0 DA:7,0 -DA:10,0 -DA:13,0 -DA:25,0 +DA:8,0 +DA:11,0 +DA:14,0 DA:26,0 -DA:28,0 -DA:42,0 +DA:27,0 +DA:29,0 DA:43,0 -DA:46,0 -DA:48,0 +DA:44,0 +DA:47,0 DA:49,0 DA:50,0 -DA:53,0 +DA:51,0 DA:54,0 -DA:56,0 +DA:55,0 DA:57,0 DA:58,0 -LF:21 +DA:59,0 +LF:22 LH:0 -FN:13,26,EmbeddingGenerator.__init__ +FN:14,27,EmbeddingGenerator.__init__ FNDA:0,EmbeddingGenerator.__init__ -FN:28,58,EmbeddingGenerator.get_embedding +FN:29,59,EmbeddingGenerator.get_embedding FNDA:0,EmbeddingGenerator.get_embedding FNF:2 FNH:0 end_of_record -SF:src/llm/oai_client.py +SF:src/llm/llm.py DA:1,0 DA:3,0 DA:4,0 +DA:6,0 DA:7,0 -DA:12,0 +DA:8,0 +DA:9,0 +DA:10,0 DA:13,0 -DA:14,0 -DA:17,0 DA:18,0 -LF:9 +DA:23,0 +DA:24,0 +DA:26,0 +DA:38,0 +DA:39,0 +DA:43,0 +DA:45,0 +DA:46,0 +DA:47,0 +DA:48,0 +DA:50,0 +DA:54,0 +DA:55,0 +LF:23 LH:0 -FN:7,14,call_openai_api -FNDA:0,call_openai_api -FN:17,18,get_oai_client +FN:18,24,LLM.__init__ +FNDA:0,LLM.__init__ +FN:26,50,LLM.generate_response +FNDA:0,LLM.generate_response +FN:54,55,get_oai_client FNDA:0,get_oai_client -FNF:2 +FNF:3 FNH:0 end_of_record -SF:src/main.py +SF:src/llm/providers/__init__.py +end_of_record +SF:src/llm/providers/anthropic.py DA:1,0 DA:3,0 -DA:5,0 -DA:8,0 -DA:11,0 -DA:15,0 -DA:18,0 -DA:20,0 +DA:4,0 +DA:6,0 +DA:7,0 +DA:10,0 DA:22,0 -DA:23,0 DA:24,0 -DA:26,0 +DA:25,0 +DA:28,0 DA:29,0 +DA:30,0 +DA:31,0 +DA:32,0 DA:33,0 -DA:34,0 DA:35,0 DA:36,0 +DA:38,0 +DA:40,0 +DA:41,0 +DA:47,0 +DA:48,0 +DA:49,0 +DA:50,0 +DA:51,0 +DA:52,0 +LF:26 +LH:0 +FN:10,52,call_anthropic +FNDA:0,call_anthropic +FNF:1 +FNH:0 +end_of_record +SF:src/llm/providers/oai.py +DA:1,0 +DA:3,0 +DA:4,0 +DA:6,0 +DA:7,0 +DA:10,0 +DA:22,0 +DA:24,0 +DA:25,0 +DA:27,0 +DA:31,0 +DA:32,0 DA:37,0 DA:38,0 +DA:40,0 DA:41,0 DA:42,0 -LF:21 +DA:43,0 +DA:44,0 +DA:45,0 +LF:20 LH:0 -FN:11,26,async_main +FN:10,45,call_openai +FNDA:0,call_openai +FNF:1 +FNH:0 +end_of_record +SF:src/main.py +DA:1,0 +DA:3,0 +DA:5,0 +DA:6,0 +DA:9,0 +DA:12,0 +DA:16,0 +DA:17,0 +DA:20,0 +DA:22,0 +DA:24,0 +DA:25,0 +DA:26,0 +DA:28,0 +DA:31,0 +DA:35,0 +DA:36,0 +DA:37,0 +DA:38,0 +DA:39,0 +DA:40,0 +DA:43,0 +DA:44,0 +LF:23 +LH:0 +FN:12,28,async_main FNDA:0,async_main -FN:29,38,main +FN:31,40,main FNDA:0,main FNF:2 FNH:0 @@ -305,77 +411,165 @@ SF:src/memory/__init__.py end_of_record SF:src/memory/backends/__init__.py end_of_record -SF:src/memory/backends/in_memory_backend.py +SF:src/memory/backends/chroma.py DA:1,0 -LF:1 +DA:2,0 +DA:3,0 +DA:4,0 +DA:6,0 +DA:7,0 +DA:8,0 +DA:10,0 +DA:13,0 +DA:16,0 +DA:17,0 +DA:26,0 +DA:28,0 +DA:29,0 +DA:31,0 +DA:34,0 +DA:37,0 +DA:43,0 +DA:48,0 +DA:49,0 +DA:52,0 +DA:53,0 +DA:54,0 +DA:55,0 +DA:57,0 +DA:75,0 +DA:76,0 +DA:78,0 +DA:79,0 +DA:80,0 +DA:84,0 +DA:86,0 +DA:87,0 +DA:93,0 +DA:94,0 +DA:95,0 +DA:96,0 +DA:98,0 +DA:109,0 +DA:111,0 +DA:112,0 +DA:115,0 +DA:116,0 +DA:117,0 +DA:118,0 +DA:119,0 +DA:120,0 +DA:121,0 +DA:123,0 +DA:124,0 +DA:125,0 +DA:126,0 +LF:52 LH:0 +FN:17,26,MemoryBackend.store +FNDA:0,MemoryBackend.store +FN:29,31,MemoryBackend.search +FNDA:0,MemoryBackend.search +FN:37,55,ChromaBackend.__init__ +FNDA:0,ChromaBackend.__init__ +FN:57,96,ChromaBackend.store +FNDA:0,ChromaBackend.store +FN:98,126,ChromaBackend.search +FNDA:0,ChromaBackend.search +FNF:5 +FNH:0 end_of_record -SF:src/memory/backends/qdrant_backend.py +SF:src/memory/backends/qdrant.py DA:1,0 -LF:1 +DA:2,0 +DA:3,0 +DA:5,0 +DA:6,0 +DA:7,0 +DA:8,0 +DA:10,0 +DA:11,0 +DA:14,0 +DA:17,0 +DA:25,0 +DA:26,0 +DA:27,0 +DA:30,0 +DA:31,0 +DA:32,0 +DA:33,0 +DA:34,0 +DA:35,0 +DA:42,0 +DA:60,0 +DA:61,0 +DA:63,0 +DA:69,0 +DA:70,0 +DA:72,0 +DA:73,0 +DA:83,0 +DA:84,0 +DA:85,0 +DA:86,0 +DA:88,0 +DA:99,0 +DA:100,0 +DA:105,0 +DA:106,0 +DA:107,0 +DA:108,0 +LF:39 LH:0 +FN:17,40,QdrantBackend.__init__ +FNDA:0,QdrantBackend.__init__ +FN:42,86,QdrantBackend.store +FNDA:0,QdrantBackend.store +FN:88,108,QdrantBackend.search +FNDA:0,QdrantBackend.search +FNF:3 +FNH:0 end_of_record SF:src/memory/memory_module.py DA:1,0 -DA:2,0 DA:3,0 -DA:5,0 +DA:4,0 DA:6,0 DA:7,0 DA:8,0 DA:9,0 +DA:10,0 DA:11,0 -DA:12,0 -DA:13,0 -DA:16,0 -DA:17,0 -DA:51,0 -DA:65,0 -DA:66,0 -DA:67,0 -DA:68,0 +DA:14,0 +DA:15,0 +DA:38,0 +DA:41,0 +DA:42,0 +DA:43,0 +DA:49,0 +DA:50,0 +DA:55,0 +DA:56,0 +DA:58,0 +DA:70,0 DA:71,0 DA:72,0 DA:73,0 -DA:74,0 -DA:75,0 -DA:76,0 -DA:83,0 -DA:89,0 -DA:90,0 -DA:91,0 +DA:81,0 DA:92,0 +DA:93,0 DA:94,0 DA:100,0 -DA:101,0 -DA:103,0 -DA:104,0 -DA:114,0 -DA:115,0 -DA:116,0 -DA:118,0 -DA:123,0 -DA:124,0 -DA:125,0 -DA:130,0 -DA:131,0 -DA:132,0 -DA:133,0 -DA:134,0 -DA:135,0 -DA:136,0 -DA:137,0 -DA:140,0 -DA:142,0 -LF:51 +DA:105,0 +LF:30 LH:0 -FN:17,81,MemoryModule.__init__ +FN:15,56,MemoryModule.__init__ FNDA:0,MemoryModule.__init__ -FN:83,116,MemoryModule.store +FN:58,79,MemoryModule.store FNDA:0,MemoryModule.store -FN:118,137,MemoryModule.search +FN:81,97,MemoryModule.search FNDA:0,MemoryModule.search -FN:140,142,get_memory_module +FN:100,105,get_memory_module FNDA:0,get_memory_module FNF:4 FNH:0 @@ -388,65 +582,69 @@ DA:2,0 DA:3,0 DA:5,0 DA:7,0 -DA:10,0 -DA:13,0 -DA:37,0 -DA:38,0 -DA:44,0 -DA:47,0 -DA:48,0 +DA:8,0 +DA:11,0 +DA:14,0 DA:49,0 +DA:50,0 DA:52,0 DA:55,0 +DA:56,0 DA:57,0 -DA:64,0 +DA:60,0 +DA:63,0 DA:65,0 -DA:66,0 -DA:67,0 -DA:68,0 -DA:69,0 -DA:70,0 -DA:71,0 DA:72,0 +DA:73,0 DA:74,0 +DA:75,0 +DA:76,0 +DA:77,0 DA:78,0 DA:79,0 DA:80,0 -DA:81,0 DA:82,0 -DA:83,0 -DA:85,0 -DA:96,0 -DA:97,0 -DA:98,0 -DA:101,0 -DA:103,0 +DA:86,0 +DA:87,0 +DA:88,0 +DA:89,0 +DA:90,0 +DA:91,0 +DA:93,0 +DA:104,0 +DA:105,0 DA:106,0 DA:107,0 -DA:108,0 -DA:109,0 -DA:111,0 -DA:122,0 -DA:123,0 -DA:124,0 -DA:125,0 -DA:127,0 -DA:128,0 -DA:131,0 -DA:132,0 +DA:110,0 +DA:112,0 +DA:115,0 +DA:116,0 +DA:117,0 +DA:118,0 +DA:120,0 +DA:133,0 +DA:134,0 DA:135,0 +DA:136,0 +DA:137,0 DA:138,0 -LF:53 +DA:140,0 +DA:141,0 +DA:144,0 +DA:145,0 +DA:148,0 +DA:151,0 +LF:57 LH:0 -FN:13,55,PlanningModule.__init__ +FN:14,63,PlanningModule.__init__ FNDA:0,PlanningModule.__init__ -FN:57,72,PlanningModule._load_q_table +FN:65,80,PlanningModule._load_q_table FNDA:0,PlanningModule._load_q_table -FN:74,83,PlanningModule._save_q_table +FN:82,91,PlanningModule._save_q_table FNDA:0,PlanningModule._save_q_table -FN:85,109,PlanningModule.get_action +FN:93,118,PlanningModule.get_action FNDA:0,PlanningModule.get_action -FN:111,138,PlanningModule.update_q_table +FN:120,151,PlanningModule.update_q_table FNDA:0,PlanningModule.update_q_table FNF:5 FNH:0 @@ -456,25 +654,93 @@ end_of_record SF:src/tools/get_signal.py DA:1,0 DA:2,0 +DA:4,0 DA:5,0 +DA:7,0 +DA:8,0 +DA:11,0 DA:14,0 -DA:16,0 -DA:17,0 -DA:18,0 -DA:19,0 DA:20,0 -DA:21,0 DA:22,0 +DA:23,0 DA:24,0 DA:25,0 DA:26,0 -DA:27,0 -DA:28,0 -LF:16 +DA:32,0 +DA:33,0 +DA:34,0 +DA:35,0 +DA:37,0 +DA:38,0 +DA:39,0 +DA:40,0 +DA:41,0 +DA:44,0 +DA:54,0 +DA:55,0 +DA:56,0 +DA:57,0 +DA:58,0 +DA:59,0 +DA:60,0 +DA:61,0 +DA:62,0 +DA:64,0 +DA:65,0 +DA:66,0 +DA:67,0 +DA:68,0 +LF:38 LH:0 -FN:5,28,fetch_signal +FN:20,41,get_coinstats_news +FNDA:0,get_coinstats_news +FN:44,68,fetch_signal FNDA:0,fetch_signal -FNF:1 +FNF:2 +FNH:0 +end_of_record +SF:src/tools/search_with_perplexity.py +DA:1,0 +DA:2,0 +DA:4,0 +DA:5,0 +DA:8,0 +DA:17,0 +DA:18,0 +DA:19,0 +DA:20,0 +DA:21,0 +DA:23,0 +DA:53,0 +DA:58,0 +DA:61,0 +DA:64,0 +DA:65,0 +DA:67,0 +DA:68,0 +DA:73,0 +DA:76,0 +DA:77,0 +DA:78,0 +DA:79,0 +DA:80,0 +DA:81,0 +DA:82,0 +DA:83,0 +DA:84,0 +DA:85,0 +DA:88,0 +DA:99,0 +DA:102,0 +DA:105,0 +DA:106,0 +LF:34 +LH:0 +FN:8,85,search_with_perplexity +FNDA:0,search_with_perplexity +FN:88,106,estimate_perplexity_cost_per_request +FNDA:0,estimate_perplexity_cost_per_request +FNF:2 FNH:0 end_of_record SF:src/tools/tg.py @@ -487,37 +753,35 @@ DA:8,0 DA:9,0 DA:12,0 DA:14,0 -DA:16,0 -DA:18,0 -DA:21,0 -DA:24,0 +DA:17,0 +DA:20,0 +DA:32,0 +DA:33,0 +DA:35,0 DA:36,0 DA:37,0 +DA:38,0 DA:39,0 DA:40,0 -DA:41,0 -DA:42,0 DA:43,0 -DA:44,0 -DA:47,0 +DA:57,0 +DA:58,0 +DA:59,0 DA:61,0 DA:62,0 -DA:63,0 -DA:65,0 -DA:66,0 -DA:72,0 -DA:73,0 -DA:74,0 +DA:68,0 +DA:69,0 +DA:70,0 +DA:71,0 DA:75,0 +DA:77,0 +DA:78,0 DA:79,0 -DA:81,0 -DA:82,0 -DA:83,0 -LF:35 +LF:33 LH:0 -FN:24,44,split_long_message +FN:20,40,split_long_message FNDA:0,split_long_message -FN:47,83,post_summary_to_telegram +FN:43,79,post_summary_to_telegram FNDA:0,post_summary_to_telegram FNF:2 FNH:0 @@ -594,26 +858,80 @@ FNDA:0,post_twitter_thread FNF:4 FNH:0 end_of_record +SF:src/utils.py +DA:1,1 +DA:3,1 +DA:6,1 +DA:8,1 +DA:9,1 +DA:10,1 +DA:13,1 +DA:14,1 +DA:15,1 +DA:16,1 +DA:17,1 +DA:20,1 +DA:21,1 +DA:22,1 +DA:23,1 +DA:26,1 +DA:27,1 +DA:28,1 +DA:29,1 +DA:30,1 +DA:31,1 +DA:33,1 +LF:22 +LH:22 +FN:6,33,log_settings +FNDA:1,log_settings +FNF:1 +FNH:1 +end_of_record SF:src/workflows/__init__.py end_of_record SF:src/workflows/analyze_signal.py DA:1,0 DA:3,0 DA:5,0 +DA:6,0 +DA:7,0 DA:8,0 +DA:11,0 +DA:13,0 DA:14,0 DA:15,0 -DA:16,0 DA:17,0 +DA:18,0 DA:19,0 -DA:20,0 -DA:21,0 DA:22,0 +DA:23,0 DA:24,0 DA:25,0 -LF:14 +DA:27,0 +DA:29,0 +DA:30,0 +DA:33,0 +DA:34,0 +DA:39,0 +DA:40,0 +DA:43,0 +DA:46,0 +DA:47,0 +DA:48,0 +DA:51,0 +DA:58,0 +DA:60,0 +DA:61,0 +DA:62,0 +DA:64,0 +DA:65,0 +DA:67,0 +DA:68,0 +DA:69,0 +LF:38 LH:0 -FN:8,25,analyze_signal +FN:11,69,analyze_signal FNDA:0,analyze_signal FNF:1 FNH:0 @@ -625,22 +943,25 @@ DA:5,0 DA:6,0 DA:7,0 DA:10,0 +DA:15,0 +DA:16,0 DA:18,0 -DA:20,0 -DA:21,0 -DA:24,0 -DA:25,0 +DA:19,0 +DA:22,0 +DA:23,0 DA:28,0 -DA:31,0 +DA:29,0 DA:32,0 -DA:33,0 -DA:34,0 DA:35,0 DA:36,0 DA:37,0 -LF:19 +DA:38,0 +DA:39,0 +DA:40,0 +DA:41,0 +LF:22 LH:0 -FN:10,37,analyze_news_workflow +FN:10,41,analyze_news_workflow FNDA:0,analyze_news_workflow FNF:1 FNH:0 diff --git a/tests/test_utils.py b/tests/test_utils.py new file mode 100644 index 0000000..1cb1688 --- /dev/null +++ b/tests/test_utils.py @@ -0,0 +1,61 @@ +from unittest.mock import MagicMock +import pytest +from loguru import logger + +from src.utils import log_settings + + +@pytest.fixture +def mock_settings(monkeypatch): + """Mock settings for testing.""" + # arrange: + monkeypatch.setattr("src.core.config.settings.ENVIRONMENT", "test") + monkeypatch.setattr("src.core.config.settings.PERSISTENT_Q_TABLE_PATH", "/test/path") + monkeypatch.setattr("src.core.config.settings.MEMORY_BACKEND_TYPE", "test_memory") + monkeypatch.setattr("src.core.config.settings.LLM_PROVIDER", "test_llm") + monkeypatch.setattr("src.core.config.settings.AGENT_PERSONALITY", "test_personality") + monkeypatch.setattr("src.core.config.settings.AGENT_GOAL", "test_goal") + monkeypatch.setattr("src.core.config.settings.AGENT_REST_TIME", 60) + monkeypatch.setattr("src.core.config.settings.TELEGRAM_BOT_TOKEN", "test_token") + monkeypatch.setattr("src.core.config.settings.TWITTER_API_KEY", "test_key") + monkeypatch.setattr("src.core.config.settings.PERPLEXITY_API_KEY", "test_key") + + +@pytest.fixture +def mock_logger(monkeypatch): + """Mock logger for testing.""" + # arrange: + mock_info = MagicMock() + monkeypatch.setattr(logger, "info", mock_info) + return mock_info + + +def test_log_settings(mock_settings, mock_logger): + """Test logging settings function.""" + # act: + log_settings() + + # assert: + assert mock_logger.call_count >= 13 # At least 13 log calls expected + + # Verify some specific log messages + expected_calls = [ + "=" * 40, + "Current Settings", + "=" * 40, + "General Settings:", + " Environment: test", + " Planning Module table path: /test/path", + " Agent's memory powered by: test_memory", + " Agent's intelligence powered by: test_llm", + "Agent Settings:", + " Agent's personality: test_personality", + " Agent's goal: test_goal", + " Agent Rest Time: 60s", + "Telegram Integration: Configured", + "Twitter Integration: Configured", + "Perplexity Integration: Configured", + ] + + for expected_call in expected_calls: + mock_logger.assert_any_call(expected_call) From 3b8ddbea2f299ce62c9e7498a7881c23f704e5da Mon Sep 17 00:00:00 2001 From: Dzmitry Hramyka Date: Sun, 29 Dec 2024 00:08:30 +0100 Subject: [PATCH 2/6] tests temp --- coverage.lcov | 882 ++++++++++----------- src/tools/tg.py | 11 +- tests/planning/test_planning_module.py | 180 ++++- tests/test_agent.py | 195 +++++ tests/test_agent_runtime.py | 160 ++++ tests/test_main.py | 102 +++ tests/test_utils.py | 7 +- tests/tools/test_get_signal.py | 138 ++++ tests/tools/test_search_with_perplexity.py | 0 tests/tools/test_tg.py | 176 +++- tests/tools/test_twitter.py | 233 +++++- tests/workflows/test_analyze_signal.py | 169 +++- tests/workflows/test_research_news.py | 162 +++- 13 files changed, 1917 insertions(+), 498 deletions(-) create mode 100644 tests/test_agent_runtime.py create mode 100644 tests/tools/test_get_signal.py create mode 100644 tests/tools/test_search_with_perplexity.py diff --git a/coverage.lcov b/coverage.lcov index 99e7a0c..6c5e5a6 100644 --- a/coverage.lcov +++ b/coverage.lcov @@ -1,98 +1,98 @@ SF:src/__init__.py end_of_record SF:src/agent.py -DA:1,0 -DA:2,0 -DA:4,0 -DA:6,0 -DA:7,0 -DA:8,0 -DA:9,0 -DA:10,0 -DA:11,0 -DA:12,0 -DA:15,0 -DA:33,0 -DA:35,0 -DA:38,0 -DA:44,0 -DA:47,0 -DA:51,0 -DA:53,0 -DA:59,0 -DA:61,0 -DA:62,0 -DA:63,0 -DA:64,0 -DA:65,0 -DA:66,0 -DA:68,0 -DA:70,0 -DA:74,0 -DA:76,0 -DA:78,0 -DA:84,0 -DA:86,0 -DA:89,0 -DA:90,0 -DA:91,0 -DA:93,0 -DA:94,0 -DA:95,0 -DA:96,0 -DA:97,0 -DA:99,0 -DA:100,0 -DA:102,0 -DA:103,0 -DA:104,0 -DA:105,0 -DA:106,0 -DA:107,0 -DA:108,0 -DA:111,0 -DA:112,0 -DA:115,0 -DA:118,0 -DA:119,0 -DA:125,0 -DA:127,0 -DA:128,0 -DA:129,0 -DA:132,0 -DA:133,0 -DA:134,0 -DA:137,0 -DA:138,0 -DA:141,0 -DA:142,0 -DA:145,0 -DA:146,0 -DA:147,0 -DA:150,0 -DA:151,0 -DA:153,0 -DA:154,0 -DA:155,0 -DA:156,0 -DA:157,0 -DA:158,0 +DA:1,1 +DA:2,1 +DA:4,1 +DA:6,1 +DA:7,1 +DA:8,1 +DA:9,1 +DA:10,1 +DA:11,1 +DA:12,1 +DA:15,1 +DA:33,1 +DA:35,1 +DA:38,1 +DA:44,1 +DA:47,1 +DA:51,1 +DA:53,1 +DA:59,1 +DA:61,1 +DA:62,1 +DA:63,1 +DA:64,1 +DA:65,1 +DA:66,1 +DA:68,1 +DA:70,1 +DA:74,1 +DA:76,1 +DA:78,1 +DA:84,1 +DA:86,1 +DA:89,1 +DA:90,1 +DA:91,1 +DA:93,1 +DA:94,1 +DA:95,1 +DA:96,1 +DA:97,1 +DA:99,1 +DA:100,1 +DA:102,1 +DA:103,1 +DA:104,1 +DA:105,1 +DA:106,1 +DA:107,1 +DA:108,1 +DA:111,1 +DA:112,1 +DA:115,1 +DA:118,1 +DA:119,1 +DA:125,1 +DA:127,1 +DA:128,1 +DA:129,1 +DA:132,1 +DA:133,1 +DA:134,1 +DA:137,1 +DA:138,1 +DA:141,1 +DA:142,1 +DA:145,1 +DA:146,1 +DA:147,1 +DA:150,1 +DA:151,1 +DA:153,1 +DA:154,1 +DA:155,1 +DA:156,1 +DA:157,1 +DA:158,1 LF:76 -LH:0 +LH:76 FN:33,53,Agent.__init__ -FNDA:0,Agent.__init__ +FNDA:1,Agent.__init__ FN:59,68,Agent._update_state -FNDA:0,Agent._update_state +FNDA:1,Agent._update_state FN:70,74,Agent._update_planning_policy -FNDA:0,Agent._update_planning_policy +FNDA:1,Agent._update_planning_policy FN:76,78,Agent._collect_feedback -FNDA:0,Agent._collect_feedback +FNDA:1,Agent._collect_feedback FN:84,119,Agent._perform_planned_action -FNDA:0,Agent._perform_planned_action +FNDA:1,Agent._perform_planned_action FN:125,158,Agent.start_runtime_loop -FNDA:0,Agent.start_runtime_loop +FNDA:1,Agent.start_runtime_loop FNF:6 -FNH:0 +FNH:6 end_of_record SF:src/core/__init__.py end_of_record @@ -185,32 +185,32 @@ LF:21 LH:21 end_of_record SF:src/core/exceptions.py -DA:4,0 -DA:7,0 -DA:10,0 -DA:13,0 -DA:16,0 -DA:19,0 -DA:22,0 -DA:25,0 -DA:28,0 -DA:31,0 -DA:34,0 -DA:37,0 -DA:40,0 -DA:43,0 +DA:4,1 +DA:7,1 +DA:10,1 +DA:13,1 +DA:16,1 +DA:19,1 +DA:22,1 +DA:25,1 +DA:28,1 +DA:31,1 +DA:34,1 +DA:37,1 +DA:40,1 +DA:43,1 LF:14 -LH:0 +LH:14 end_of_record SF:src/feedback/__init__.py end_of_record SF:src/feedback/feedback_module.py -DA:1,0 -DA:3,0 -DA:6,0 -DA:11,0 +DA:1,1 +DA:3,1 +DA:6,1 +DA:11,1 DA:13,0 -DA:15,0 +DA:15,1 DA:26,0 DA:29,0 DA:30,0 @@ -223,13 +223,13 @@ DA:41,0 DA:47,0 DA:49,0 DA:50,0 -DA:52,0 +DA:52,1 DA:62,0 -DA:64,0 +DA:64,1 DA:68,0 DA:69,0 LF:23 -LH:0 +LH:7 FN:11,13,FeedbackModule.__init__ FNDA:0,FeedbackModule.__init__ FN:15,50,FeedbackModule.collect_feedback @@ -244,17 +244,17 @@ end_of_record SF:src/llm/__init__.py end_of_record SF:src/llm/embeddings.py -DA:1,0 -DA:3,0 -DA:4,0 -DA:5,0 -DA:7,0 -DA:8,0 -DA:11,0 -DA:14,0 -DA:26,0 -DA:27,0 -DA:29,0 +DA:1,1 +DA:3,1 +DA:4,1 +DA:5,1 +DA:7,1 +DA:8,1 +DA:11,1 +DA:14,1 +DA:26,1 +DA:27,1 +DA:29,1 DA:43,0 DA:44,0 DA:47,0 @@ -267,28 +267,28 @@ DA:57,0 DA:58,0 DA:59,0 LF:22 -LH:0 +LH:11 FN:14,27,EmbeddingGenerator.__init__ -FNDA:0,EmbeddingGenerator.__init__ +FNDA:1,EmbeddingGenerator.__init__ FN:29,59,EmbeddingGenerator.get_embedding FNDA:0,EmbeddingGenerator.get_embedding FNF:2 -FNH:0 +FNH:1 end_of_record SF:src/llm/llm.py -DA:1,0 -DA:3,0 -DA:4,0 -DA:6,0 -DA:7,0 -DA:8,0 -DA:9,0 -DA:10,0 -DA:13,0 -DA:18,0 +DA:1,1 +DA:3,1 +DA:4,1 +DA:6,1 +DA:7,1 +DA:8,1 +DA:9,1 +DA:10,1 +DA:13,1 +DA:18,1 DA:23,0 DA:24,0 -DA:26,0 +DA:26,1 DA:38,0 DA:39,0 DA:43,0 @@ -297,28 +297,28 @@ DA:46,0 DA:47,0 DA:48,0 DA:50,0 -DA:54,0 -DA:55,0 +DA:54,1 +DA:55,1 LF:23 -LH:0 +LH:13 FN:18,24,LLM.__init__ FNDA:0,LLM.__init__ FN:26,50,LLM.generate_response FNDA:0,LLM.generate_response FN:54,55,get_oai_client -FNDA:0,get_oai_client +FNDA:1,get_oai_client FNF:3 -FNH:0 +FNH:1 end_of_record SF:src/llm/providers/__init__.py end_of_record SF:src/llm/providers/anthropic.py -DA:1,0 -DA:3,0 -DA:4,0 -DA:6,0 -DA:7,0 -DA:10,0 +DA:1,1 +DA:3,1 +DA:4,1 +DA:6,1 +DA:7,1 +DA:10,1 DA:22,0 DA:24,0 DA:25,0 @@ -340,19 +340,19 @@ DA:50,0 DA:51,0 DA:52,0 LF:26 -LH:0 +LH:6 FN:10,52,call_anthropic FNDA:0,call_anthropic FNF:1 FNH:0 end_of_record SF:src/llm/providers/oai.py -DA:1,0 -DA:3,0 -DA:4,0 -DA:6,0 -DA:7,0 -DA:10,0 +DA:1,1 +DA:3,1 +DA:4,1 +DA:6,1 +DA:7,1 +DA:10,1 DA:22,0 DA:24,0 DA:25,0 @@ -368,75 +368,75 @@ DA:43,0 DA:44,0 DA:45,0 LF:20 -LH:0 +LH:6 FN:10,45,call_openai FNDA:0,call_openai FNF:1 FNH:0 end_of_record SF:src/main.py -DA:1,0 -DA:3,0 -DA:5,0 -DA:6,0 -DA:9,0 -DA:12,0 -DA:16,0 -DA:17,0 -DA:20,0 -DA:22,0 -DA:24,0 -DA:25,0 -DA:26,0 -DA:28,0 -DA:31,0 -DA:35,0 -DA:36,0 -DA:37,0 -DA:38,0 -DA:39,0 -DA:40,0 -DA:43,0 +DA:1,1 +DA:3,1 +DA:5,1 +DA:6,1 +DA:9,1 +DA:12,1 +DA:16,1 +DA:17,1 +DA:20,1 +DA:22,1 +DA:24,1 +DA:25,1 +DA:26,1 +DA:28,1 +DA:31,1 +DA:35,1 +DA:36,1 +DA:37,1 +DA:38,1 +DA:39,1 +DA:40,1 +DA:43,1 DA:44,0 LF:23 -LH:0 +LH:22 FN:12,28,async_main -FNDA:0,async_main +FNDA:1,async_main FN:31,40,main -FNDA:0,main +FNDA:1,main FNF:2 -FNH:0 +FNH:2 end_of_record SF:src/memory/__init__.py end_of_record SF:src/memory/backends/__init__.py end_of_record SF:src/memory/backends/chroma.py -DA:1,0 -DA:2,0 -DA:3,0 -DA:4,0 -DA:6,0 -DA:7,0 -DA:8,0 -DA:10,0 -DA:13,0 -DA:16,0 -DA:17,0 +DA:1,1 +DA:2,1 +DA:3,1 +DA:4,1 +DA:6,1 +DA:7,1 +DA:8,1 +DA:10,1 +DA:13,1 +DA:16,1 +DA:17,1 DA:26,0 -DA:28,0 -DA:29,0 +DA:28,1 +DA:29,1 DA:31,0 -DA:34,0 -DA:37,0 -DA:43,0 -DA:48,0 -DA:49,0 -DA:52,0 +DA:34,1 +DA:37,1 +DA:43,1 +DA:48,1 +DA:49,1 +DA:52,1 DA:53,0 DA:54,0 DA:55,0 -DA:57,0 +DA:57,1 DA:75,0 DA:76,0 DA:78,0 @@ -449,7 +449,7 @@ DA:93,0 DA:94,0 DA:95,0 DA:96,0 -DA:98,0 +DA:98,1 DA:109,0 DA:111,0 DA:112,0 @@ -465,32 +465,32 @@ DA:124,0 DA:125,0 DA:126,0 LF:52 -LH:0 +LH:21 FN:17,26,MemoryBackend.store FNDA:0,MemoryBackend.store FN:29,31,MemoryBackend.search FNDA:0,MemoryBackend.search FN:37,55,ChromaBackend.__init__ -FNDA:0,ChromaBackend.__init__ +FNDA:1,ChromaBackend.__init__ FN:57,96,ChromaBackend.store FNDA:0,ChromaBackend.store FN:98,126,ChromaBackend.search FNDA:0,ChromaBackend.search FNF:5 -FNH:0 +FNH:1 end_of_record SF:src/memory/backends/qdrant.py -DA:1,0 -DA:2,0 -DA:3,0 -DA:5,0 -DA:6,0 -DA:7,0 -DA:8,0 -DA:10,0 -DA:11,0 -DA:14,0 -DA:17,0 +DA:1,1 +DA:2,1 +DA:3,1 +DA:5,1 +DA:6,1 +DA:7,1 +DA:8,1 +DA:10,1 +DA:11,1 +DA:14,1 +DA:17,1 DA:25,0 DA:26,0 DA:27,0 @@ -500,7 +500,7 @@ DA:32,0 DA:33,0 DA:34,0 DA:35,0 -DA:42,0 +DA:42,1 DA:60,0 DA:61,0 DA:63,0 @@ -512,7 +512,7 @@ DA:83,0 DA:84,0 DA:85,0 DA:86,0 -DA:88,0 +DA:88,1 DA:99,0 DA:100,0 DA:105,0 @@ -520,7 +520,7 @@ DA:106,0 DA:107,0 DA:108,0 LF:39 -LH:0 +LH:13 FN:17,40,QdrantBackend.__init__ FNDA:0,QdrantBackend.__init__ FN:42,86,QdrantBackend.store @@ -531,173 +531,173 @@ FNF:3 FNH:0 end_of_record SF:src/memory/memory_module.py -DA:1,0 -DA:3,0 -DA:4,0 -DA:6,0 -DA:7,0 -DA:8,0 -DA:9,0 -DA:10,0 -DA:11,0 -DA:14,0 -DA:15,0 -DA:38,0 -DA:41,0 -DA:42,0 +DA:1,1 +DA:3,1 +DA:4,1 +DA:6,1 +DA:7,1 +DA:8,1 +DA:9,1 +DA:10,1 +DA:11,1 +DA:14,1 +DA:15,1 +DA:38,1 +DA:41,1 +DA:42,1 DA:43,0 -DA:49,0 -DA:50,0 +DA:49,1 +DA:50,1 DA:55,0 -DA:56,0 -DA:58,0 +DA:56,1 +DA:58,1 DA:70,0 DA:71,0 DA:72,0 DA:73,0 -DA:81,0 +DA:81,1 DA:92,0 DA:93,0 DA:94,0 -DA:100,0 -DA:105,0 +DA:100,1 +DA:105,1 LF:30 -LH:0 +LH:21 FN:15,56,MemoryModule.__init__ -FNDA:0,MemoryModule.__init__ +FNDA:1,MemoryModule.__init__ FN:58,79,MemoryModule.store FNDA:0,MemoryModule.store FN:81,97,MemoryModule.search FNDA:0,MemoryModule.search FN:100,105,get_memory_module -FNDA:0,get_memory_module +FNDA:1,get_memory_module FNF:4 -FNH:0 +FNH:2 end_of_record SF:src/planning/__init__.py end_of_record SF:src/planning/planning_module.py -DA:1,0 -DA:2,0 -DA:3,0 -DA:5,0 -DA:7,0 -DA:8,0 -DA:11,0 -DA:14,0 -DA:49,0 -DA:50,0 -DA:52,0 -DA:55,0 -DA:56,0 -DA:57,0 -DA:60,0 -DA:63,0 -DA:65,0 -DA:72,0 -DA:73,0 -DA:74,0 -DA:75,0 -DA:76,0 -DA:77,0 +DA:1,1 +DA:2,1 +DA:3,1 +DA:5,1 +DA:7,1 +DA:8,1 +DA:11,1 +DA:14,1 +DA:49,1 +DA:50,1 +DA:52,1 +DA:55,1 +DA:56,1 +DA:57,1 +DA:60,1 +DA:63,1 +DA:65,1 +DA:72,1 +DA:73,1 +DA:74,1 +DA:75,1 +DA:76,1 +DA:77,1 DA:78,0 DA:79,0 -DA:80,0 -DA:82,0 -DA:86,0 -DA:87,0 -DA:88,0 -DA:89,0 -DA:90,0 -DA:91,0 -DA:93,0 -DA:104,0 -DA:105,0 -DA:106,0 -DA:107,0 -DA:110,0 +DA:80,1 +DA:82,1 +DA:86,1 +DA:87,1 +DA:88,1 +DA:89,1 +DA:90,1 +DA:91,1 +DA:93,1 +DA:104,1 +DA:105,1 +DA:106,1 +DA:107,1 +DA:110,1 DA:112,0 -DA:115,0 -DA:116,0 -DA:117,0 -DA:118,0 -DA:120,0 -DA:133,0 -DA:134,0 -DA:135,0 +DA:115,1 +DA:116,1 +DA:117,1 +DA:118,1 +DA:120,1 +DA:133,1 +DA:134,1 +DA:135,1 DA:136,0 -DA:137,0 +DA:137,1 DA:138,0 -DA:140,0 -DA:141,0 -DA:144,0 -DA:145,0 -DA:148,0 -DA:151,0 +DA:140,1 +DA:141,1 +DA:144,1 +DA:145,1 +DA:148,1 +DA:151,1 LF:57 -LH:0 +LH:52 FN:14,63,PlanningModule.__init__ -FNDA:0,PlanningModule.__init__ +FNDA:1,PlanningModule.__init__ FN:65,80,PlanningModule._load_q_table -FNDA:0,PlanningModule._load_q_table +FNDA:1,PlanningModule._load_q_table FN:82,91,PlanningModule._save_q_table -FNDA:0,PlanningModule._save_q_table +FNDA:1,PlanningModule._save_q_table FN:93,118,PlanningModule.get_action -FNDA:0,PlanningModule.get_action +FNDA:1,PlanningModule.get_action FN:120,151,PlanningModule.update_q_table -FNDA:0,PlanningModule.update_q_table +FNDA:1,PlanningModule.update_q_table FNF:5 -FNH:0 +FNH:5 end_of_record SF:src/tools/__init__.py end_of_record SF:src/tools/get_signal.py -DA:1,0 -DA:2,0 -DA:4,0 -DA:5,0 -DA:7,0 -DA:8,0 -DA:11,0 -DA:14,0 -DA:20,0 -DA:22,0 -DA:23,0 -DA:24,0 -DA:25,0 -DA:26,0 -DA:32,0 -DA:33,0 -DA:34,0 -DA:35,0 -DA:37,0 +DA:1,1 +DA:2,1 +DA:4,1 +DA:5,1 +DA:7,1 +DA:8,1 +DA:11,1 +DA:14,1 +DA:20,1 +DA:22,1 +DA:23,1 +DA:24,1 +DA:25,1 +DA:26,1 +DA:32,1 +DA:33,1 +DA:34,1 +DA:35,1 +DA:37,1 DA:38,0 -DA:39,0 -DA:40,0 -DA:41,0 -DA:44,0 -DA:54,0 -DA:55,0 -DA:56,0 -DA:57,0 -DA:58,0 -DA:59,0 -DA:60,0 -DA:61,0 -DA:62,0 -DA:64,0 -DA:65,0 -DA:66,0 -DA:67,0 -DA:68,0 +DA:39,1 +DA:40,1 +DA:41,1 +DA:44,1 +DA:54,1 +DA:55,1 +DA:56,1 +DA:57,1 +DA:58,1 +DA:59,1 +DA:60,1 +DA:61,1 +DA:62,1 +DA:64,1 +DA:65,1 +DA:66,1 +DA:67,1 +DA:68,1 LF:38 -LH:0 +LH:37 FN:20,41,get_coinstats_news -FNDA:0,get_coinstats_news +FNDA:1,get_coinstats_news FN:44,68,fetch_signal -FNDA:0,fetch_signal +FNDA:1,fetch_signal FNF:2 -FNH:0 +FNH:2 end_of_record SF:src/tools/search_with_perplexity.py DA:1,0 @@ -752,57 +752,55 @@ DA:6,0 DA:8,0 DA:9,0 DA:12,0 -DA:14,0 -DA:17,0 -DA:20,0 +DA:15,0 +DA:27,0 +DA:28,0 +DA:30,0 +DA:31,0 DA:32,0 DA:33,0 +DA:34,0 DA:35,0 -DA:36,0 -DA:37,0 DA:38,0 -DA:39,0 -DA:40,0 -DA:43,0 +DA:52,0 +DA:53,0 +DA:54,0 +DA:56,0 DA:57,0 -DA:58,0 -DA:59,0 -DA:61,0 -DA:62,0 -DA:68,0 -DA:69,0 +DA:63,0 +DA:64,0 +DA:65,0 +DA:66,0 DA:70,0 -DA:71,0 -DA:75,0 -DA:77,0 -DA:78,0 -DA:79,0 -LF:33 +DA:72,0 +DA:73,0 +DA:74,0 +LF:31 LH:0 -FN:20,40,split_long_message +FN:15,35,split_long_message FNDA:0,split_long_message -FN:43,79,post_summary_to_telegram +FN:38,74,post_summary_to_telegram FNDA:0,post_summary_to_telegram FNF:2 FNH:0 end_of_record SF:src/tools/twitter.py -DA:1,0 -DA:2,0 -DA:3,0 -DA:5,0 -DA:6,0 -DA:7,0 -DA:8,0 -DA:10,0 -DA:11,0 -DA:14,0 +DA:1,1 +DA:2,1 +DA:3,1 +DA:5,1 +DA:6,1 +DA:7,1 +DA:8,1 +DA:10,1 +DA:11,1 +DA:14,1 DA:21,0 DA:22,0 DA:23,0 -DA:26,0 +DA:26,1 DA:33,0 -DA:41,0 +DA:41,1 DA:51,0 DA:53,0 DA:59,0 @@ -821,7 +819,7 @@ DA:75,0 DA:76,0 DA:77,0 DA:78,0 -DA:81,0 +DA:81,1 DA:92,0 DA:93,0 DA:94,0 @@ -846,7 +844,7 @@ DA:123,0 DA:124,0 DA:126,0 LF:58 -LH:0 +LH:13 FN:14,23,get_twitter_conn_v1 FNDA:0,get_twitter_conn_v1 FN:26,38,get_twitter_conn_v2 @@ -891,78 +889,78 @@ end_of_record SF:src/workflows/__init__.py end_of_record SF:src/workflows/analyze_signal.py -DA:1,0 -DA:3,0 -DA:5,0 -DA:6,0 -DA:7,0 -DA:8,0 -DA:11,0 -DA:13,0 -DA:14,0 -DA:15,0 -DA:17,0 -DA:18,0 -DA:19,0 -DA:22,0 -DA:23,0 -DA:24,0 -DA:25,0 -DA:27,0 -DA:29,0 -DA:30,0 -DA:33,0 -DA:34,0 -DA:39,0 -DA:40,0 -DA:43,0 -DA:46,0 -DA:47,0 -DA:48,0 -DA:51,0 -DA:58,0 -DA:60,0 -DA:61,0 -DA:62,0 -DA:64,0 -DA:65,0 -DA:67,0 -DA:68,0 -DA:69,0 +DA:1,1 +DA:3,1 +DA:5,1 +DA:6,1 +DA:7,1 +DA:8,1 +DA:11,1 +DA:13,1 +DA:14,1 +DA:15,1 +DA:17,1 +DA:18,1 +DA:19,1 +DA:22,1 +DA:23,1 +DA:24,1 +DA:25,1 +DA:27,1 +DA:29,1 +DA:30,1 +DA:33,1 +DA:34,1 +DA:39,1 +DA:40,1 +DA:43,1 +DA:46,1 +DA:47,1 +DA:48,1 +DA:51,1 +DA:58,1 +DA:60,1 +DA:61,1 +DA:62,1 +DA:64,1 +DA:65,1 +DA:67,1 +DA:68,1 +DA:69,1 LF:38 -LH:0 +LH:38 FN:11,69,analyze_signal -FNDA:0,analyze_signal +FNDA:1,analyze_signal FNF:1 -FNH:0 +FNH:1 end_of_record SF:src/workflows/research_news.py -DA:1,0 -DA:3,0 -DA:5,0 -DA:6,0 -DA:7,0 -DA:10,0 -DA:15,0 -DA:16,0 -DA:18,0 -DA:19,0 -DA:22,0 -DA:23,0 -DA:28,0 -DA:29,0 -DA:32,0 -DA:35,0 -DA:36,0 -DA:37,0 -DA:38,0 -DA:39,0 -DA:40,0 -DA:41,0 +DA:1,1 +DA:3,1 +DA:5,1 +DA:6,1 +DA:7,1 +DA:10,1 +DA:15,1 +DA:16,1 +DA:18,1 +DA:19,1 +DA:22,1 +DA:23,1 +DA:28,1 +DA:29,1 +DA:32,1 +DA:35,1 +DA:36,1 +DA:37,1 +DA:38,1 +DA:39,1 +DA:40,1 +DA:41,1 LF:22 -LH:0 +LH:22 FN:10,41,analyze_news_workflow -FNDA:0,analyze_news_workflow +FNDA:1,analyze_news_workflow FNF:1 -FNH:0 +FNH:1 end_of_record diff --git a/src/tools/tg.py b/src/tools/tg.py index 7f66fb2..a05a7f1 100644 --- a/src/tools/tg.py +++ b/src/tools/tg.py @@ -8,13 +8,8 @@ from src.core.config import settings from src.core.exceptions import TelegramError as TelegramPostError -#: Telegram bot token -TELEGRAM_BOT_TOKEN = settings.TELEGRAM_BOT_TOKEN -#: Telegram chat ID -TELEGRAM_CHAT_ID = settings.TELEGRAM_CHAT_ID - # Initialize the bot -bot = Bot(token=TELEGRAM_BOT_TOKEN) +bot = Bot(token=settings.TELEGRAM_BOT_TOKEN) def split_long_message(message: str, chunk_size: int = MessageLimit.MAX_TEXT_LENGTH) -> List[str]: @@ -40,7 +35,7 @@ def split_long_message(message: str, chunk_size: int = MessageLimit.MAX_TEXT_LEN return chunks -async def post_summary_to_telegram(summary_html: str) -> List[int]: +async def post_summary_to_telegram(summary_html: str, bot: Bot = bot) -> List[int]: """ Post an HTML-formatted message to the Telegram channel. If the message is too long, it will be split into multiple messages. @@ -60,7 +55,7 @@ async def post_summary_to_telegram(summary_html: str) -> List[int]: for chunk in message_chunks: message = await bot.send_message( - chat_id=TELEGRAM_CHAT_ID, + chat_id=settings.TELEGRAM_CHAT_ID, text=chunk, parse_mode=ParseMode.HTML, disable_web_page_preview=False, diff --git a/tests/planning/test_planning_module.py b/tests/planning/test_planning_module.py index 1eda1b5..ec057cb 100644 --- a/tests/planning/test_planning_module.py +++ b/tests/planning/test_planning_module.py @@ -1,13 +1,175 @@ -def test_plan_generation(): - """Test plan generation functionality""" - pass # Add plan generation tests +import json +from unittest.mock import MagicMock +import pytest +from loguru import logger -def test_plan_execution(): - """Test plan execution functionality""" - pass # Add plan execution tests +from src.core.defs import AgentAction, AgentState +from src.planning.planning_module import PlanningModule -def test_plan_validation(): - """Test plan validation functionality""" - pass # Add plan validation tests +@pytest.fixture +def mock_logger(monkeypatch): + """Mock logger for testing.""" + mock_debug = MagicMock() + mock_error = MagicMock() + monkeypatch.setattr(logger, "debug", mock_debug) + monkeypatch.setattr(logger, "error", mock_error) + return mock_debug, mock_error + + +@pytest.fixture +def mock_q_table(): + """Create a mock Q-table for testing.""" + return { + "DEFAULT": [0.0, 0.1, 0.2], + "JUST_ANALYZED_SIGNAL": [0.3, 0.4, 0.5], + "JUST_ANALYZED_NEWS": [0.6, 0.7, 0.8], + } + + +@pytest.fixture +def planning_module(tmp_path): + """Create a PlanningModule instance with a temporary Q-table path.""" + q_table_path = tmp_path / "test_q_table.json" + return PlanningModule( + actions=[AgentAction.IDLE, AgentAction.CHECK_SIGNAL, AgentAction.ANALYZE_NEWS], + q_table_path=str(q_table_path), + planning_alpha=0.1, + planning_gamma=0.95, + planning_epsilon=0.1, + ) + + +def test_init_default_values(): + """Test PlanningModule initialization with default values.""" + module = PlanningModule() + + assert module.actions == list(AgentAction) + assert module.alpha == 0.1 # Default from settings + assert module.gamma == 0.95 # Default from settings + assert module.epsilon == 0.1 # Default from settings + assert isinstance(module.q_table, dict) + + +def test_init_custom_values(tmp_path): + """Test PlanningModule initialization with custom values.""" + custom_actions = [AgentAction.IDLE, AgentAction.CHECK_SIGNAL] + q_table_path = tmp_path / "custom_q_table.json" + + module = PlanningModule( + actions=custom_actions, + q_table_path=str(q_table_path), + planning_alpha=0.2, + planning_gamma=0.8, + planning_epsilon=0.15, + ) + + assert module.actions == custom_actions + assert module.alpha == 0.2 + assert module.gamma == 0.8 + assert module.epsilon == 0.15 + assert isinstance(module.q_table, dict) + + +def test_load_q_table_existing(mock_logger, mock_q_table, tmp_path): + """Test loading an existing Q-table from file.""" + q_table_path = tmp_path / "existing_q_table.json" + with open(q_table_path, "w") as f: + json.dump(mock_q_table, f) + + module = PlanningModule(q_table_path=str(q_table_path)) + mock_debug, _ = mock_logger + + assert module.q_table == mock_q_table + mock_debug.assert_called_once_with(f"Loaded Q-table from {q_table_path}") + + +def test_load_q_table_nonexistent(mock_logger): + """Test loading Q-table when file doesn't exist.""" + module = PlanningModule(q_table_path="nonexistent_path.json") + mock_debug, _ = mock_logger + + assert module.q_table == {} + mock_debug.assert_not_called() + + +def test_save_q_table_success(mock_logger, planning_module, mock_q_table): + """Test successful Q-table saving.""" + planning_module.q_table = mock_q_table + planning_module._save_q_table() + mock_debug, _ = mock_logger + + # Verify file contents + with open(planning_module.q_table_path, "r") as f: + saved_table = json.load(f) + + assert saved_table == mock_q_table + mock_debug.assert_called_once_with(f"Q-table saved to {planning_module.q_table_path}") + + +def test_save_q_table_failure(mock_logger): + """Test Q-table saving when there's an error.""" + module = PlanningModule(q_table_path="/invalid/path/q_table.json") + _, mock_error = mock_logger + + module._save_q_table() + mock_error.assert_called_once() + assert "Failed to save Q-table" in mock_error.call_args[0][0] + + +@pytest.mark.parametrize( + "new_state", + [AgentState.DEFAULT, AgentState.JUST_ANALYZED_SIGNAL, AgentState.JUST_ANALYZED_NEWS], +) +def test_get_action_new_state(planning_module, new_state): + """Test action selection for previously unseen states.""" + action = planning_module.get_action(new_state) + + # Verify action is valid + assert isinstance(action, AgentAction) + assert action in planning_module.actions + + # Verify Q-table was properly initialized for the state + assert new_state.value in planning_module.q_table + assert len(planning_module.q_table[new_state.value]) == len(planning_module.actions) + + # Verify Q-values are initialized to zeros + assert all(q == 0.0 for q in planning_module.q_table[new_state.value]) + + +def test_update_q_table(planning_module): + """Test Q-table update with new experience.""" + # Initialize states and action + state = AgentState.DEFAULT + action = AgentAction.CHECK_SIGNAL + reward = 1.0 + next_state = AgentState.JUST_ANALYZED_SIGNAL + + # First, get an action to ensure Q-table is initialized for both states + planning_module.get_action(state) + planning_module.get_action(next_state) + + # Get initial Q-value + action_idx = planning_module.actions.index(action) + initial_q = planning_module.q_table[state.value][action_idx] + + # Update Q-table + planning_module.update_q_table(state, action, reward, next_state) + + # Get updated Q-value + updated_q = planning_module.q_table[state.value][action_idx] + + # Verify Q-value was updated + assert updated_q != initial_q + # Verify both states exist in Q-table + assert state.value in planning_module.q_table + assert next_state.value in planning_module.q_table + # Verify Q-values array length matches number of actions + assert len(planning_module.q_table[state.value]) == len(planning_module.actions) + assert len(planning_module.q_table[next_state.value]) == len(planning_module.actions) + # Verify the update followed Q-learning formula + expected_q = initial_q + planning_module.alpha * ( + reward + planning_module.gamma * max(planning_module.q_table[next_state.value]) - initial_q + ) + assert abs(updated_q - expected_q) < 1e-10 # Using small epsilon for float comparison diff --git a/tests/test_agent.py b/tests/test_agent.py index e69de29..5f4d53c 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -0,0 +1,195 @@ +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest +from loguru import logger + +from src.agent import Agent +from src.core.defs import AgentAction, AgentState + + +@pytest.fixture +def mock_logger(monkeypatch): + """Mock logger for testing.""" + # arrange: + mock_info = MagicMock() + mock_debug = MagicMock() + monkeypatch.setattr(logger, "info", mock_info) + monkeypatch.setattr(logger, "debug", mock_debug) + return mock_info, mock_debug + + +@pytest.fixture +def agent(): + """Create an agent instance with mocked dependencies.""" + with ( + patch("src.agent.get_memory_module"), + patch("src.agent.PlanningModule"), + patch("src.agent.FeedbackModule"), + ): + agent = Agent() + # Make store method a coroutine + agent.memory_module.store = AsyncMock() + # Make search method a coroutine + agent.memory_module.search = AsyncMock() + return agent + + +@pytest.mark.parametrize( + "action,expected_state", + [ + (AgentAction.CHECK_SIGNAL, AgentState.JUST_ANALYZED_SIGNAL), + (AgentAction.ANALYZE_NEWS, AgentState.JUST_ANALYZED_NEWS), + (AgentAction.IDLE, AgentState.DEFAULT), + ], +) +def test_update_state(agent, mock_logger, action, expected_state): + """Test state updates based on different actions.""" + # act: + agent._update_state(action) + + # assert: + mock_info, _ = mock_logger + assert agent.state == expected_state + mock_info.assert_called_once_with(f"Agent state updated to: {expected_state.name}") + + +@pytest.mark.parametrize( + "state,action,reward,next_state", + [ + (AgentState.DEFAULT, AgentAction.CHECK_SIGNAL, 1.0, AgentState.JUST_ANALYZED_SIGNAL), + ( + AgentState.JUST_ANALYZED_SIGNAL, + AgentAction.ANALYZE_NEWS, + 0.5, + AgentState.JUST_ANALYZED_NEWS, + ), + (AgentState.JUST_ANALYZED_NEWS, AgentAction.IDLE, -0.1, AgentState.DEFAULT), + ], +) +def test_update_planning_policy(agent, state, action, reward, next_state): + """Test updating the planning policy with different state-action pairs.""" + # arrange: + agent.planning_module.update_q_table = MagicMock() + + # act: + agent._update_planning_policy(state, action, reward, next_state) + + # assert: + agent.planning_module.update_q_table.assert_called_once_with(state, action, reward, next_state) + + +@pytest.mark.parametrize( + "action,outcome,expected_reward", + [ + ("check_signal", "Signal detected", 1.0), + ("analyze_news", None, 0.0), + ("idle", "idle", 0.5), + ], +) +def test_collect_feedback(agent, action, outcome, expected_reward): + """Test collecting feedback for different actions and outcomes.""" + # arrange: + agent.feedback_module.collect_feedback = MagicMock(return_value=expected_reward) + + # act: + reward = agent._collect_feedback(action, outcome) + + # assert: + assert reward == expected_reward + agent.feedback_module.collect_feedback.assert_called_once_with(action, outcome) + + +@pytest.mark.asyncio +async def test_perform_planned_action_idle(agent, mock_logger): + """Test performing IDLE action.""" + # arrange: + mock_info, mock_debug = mock_logger + + # act: + outcome = await agent._perform_planned_action(AgentAction.IDLE) + + # assert: + assert outcome == "idle" + mock_info.assert_any_call("Agent is idling.") + mock_debug.assert_called_with("Stored performed action to memory.") + agent.memory_module.store.assert_called_once() + + +@pytest.mark.asyncio +async def test_perform_planned_action_check_signal(agent, mock_logger): + """Test performing CHECK_SIGNAL action.""" + # arrange: + mock_info, mock_debug = mock_logger + mock_analyze = AsyncMock(return_value="Signal detected") + + with patch("src.agent.analyze_signal", mock_analyze): + # act: + outcome = await agent._perform_planned_action(AgentAction.CHECK_SIGNAL) + + # assert: + assert outcome == "Signal detected" + mock_info.assert_any_call("Actionable signal perceived.") + mock_debug.assert_called_with("Stored performed action to memory.") + agent.memory_module.store.assert_called_once() + mock_analyze.assert_called_once() + + +@pytest.mark.asyncio +async def test_perform_planned_action_check_signal_no_signal(agent, mock_logger): + """Test performing CHECK_SIGNAL action with no signal detected.""" + # arrange: + mock_info, mock_debug = mock_logger + mock_analyze = AsyncMock(return_value=None) + + with patch("src.agent.analyze_signal", mock_analyze): + # act: + outcome = await agent._perform_planned_action(AgentAction.CHECK_SIGNAL) + + # assert: + assert outcome is None + mock_info.assert_any_call("No actionable signal detected.") + mock_debug.assert_called_with("Stored performed action to memory.") + agent.memory_module.store.assert_called_once() + mock_analyze.assert_called_once() + + +@pytest.mark.asyncio +async def test_perform_planned_action_analyze_news(agent, mock_logger): + """Test performing ANALYZE_NEWS action.""" + # arrange: + mock_info, mock_debug = mock_logger + agent.memory_module.search.return_value = [{"event": "Test news"}] + mock_analyze = AsyncMock(return_value="News analyzed") + + with patch("src.agent.analyze_news_workflow", mock_analyze): + # act: + outcome = await agent._perform_planned_action(AgentAction.ANALYZE_NEWS) + + # assert: + assert outcome == "News analyzed" + mock_debug.assert_any_call("Retrieved memories: [{'event': 'Test news'}]") + mock_debug.assert_any_call("Stored performed action to memory.") + agent.memory_module.search.assert_called_once_with("news", top_k=1) + agent.memory_module.store.assert_called_once() + mock_analyze.assert_called_once_with("Test news") + + +@pytest.mark.asyncio +async def test_perform_planned_action_analyze_news_no_news(agent, mock_logger): + """Test performing ANALYZE_NEWS action with no news found.""" + # arrange: + mock_info, mock_debug = mock_logger + agent.memory_module.search.return_value = [] + mock_analyze = AsyncMock(return_value="No news analyzed") + + with patch("src.agent.analyze_news_workflow", mock_analyze): + # act: + outcome = await agent._perform_planned_action(AgentAction.ANALYZE_NEWS) + + # assert: + assert outcome == "No news analyzed" + mock_debug.assert_any_call("Retrieved memories: []") + mock_debug.assert_any_call("Stored performed action to memory.") + agent.memory_module.search.assert_called_once_with("news", top_k=1) + agent.memory_module.store.assert_called_once() + mock_analyze.assert_called_once_with("No recent news found") diff --git a/tests/test_agent_runtime.py b/tests/test_agent_runtime.py new file mode 100644 index 0000000..339dc82 --- /dev/null +++ b/tests/test_agent_runtime.py @@ -0,0 +1,160 @@ +"""Test the runtime loop of the agent (start_runtime_loop).""" + +from unittest.mock import AsyncMock, MagicMock, call, patch + +import pytest +from loguru import logger + +from src.agent import Agent +from src.core.defs import AgentAction, AgentState + + +@pytest.fixture +def mock_runtime_logger(monkeypatch): + """Mock logger for runtime testing.""" + # arrange: + mock_info = MagicMock() + mock_error = MagicMock() + mock_critical = MagicMock() + monkeypatch.setattr(logger, "info", mock_info) + monkeypatch.setattr(logger, "error", mock_error) + monkeypatch.setattr(logger, "critical", mock_critical) + return mock_info, mock_error, mock_critical + + +@pytest.fixture +def runtime_agent(): + """Create an agent instance with mocked dependencies for runtime testing.""" + with ( + patch("src.agent.get_memory_module"), + patch("src.agent.PlanningModule"), + patch("src.agent.FeedbackModule"), + ): + agent = Agent() + # Mock memory module methods + agent.memory_module.store = AsyncMock() + agent.memory_module.search = AsyncMock() + # Mock planning module methods + agent.planning_module.get_action = MagicMock(return_value=AgentAction.IDLE) + agent.planning_module.update_q_table = MagicMock() + # Mock feedback module methods + agent.feedback_module.collect_feedback = MagicMock(return_value=1.0) + return agent + + +@pytest.mark.asyncio +async def test_runtime_loop_one_iteration(runtime_agent, mock_runtime_logger): + """Test one successful iteration of the runtime loop.""" + # arrange: + mock_info, mock_error, mock_critical = mock_runtime_logger + runtime_agent._perform_planned_action = AsyncMock(return_value="idle") + + # Mock asyncio.sleep to raise KeyboardInterrupt after first iteration + with patch("asyncio.sleep", side_effect=KeyboardInterrupt): + # act: + await runtime_agent.start_runtime_loop() + + # assert: + # Verify logging + mock_info.assert_any_call("Starting the autonomous agent runtime loop...") + mock_info.assert_any_call("Current state: default") + mock_info.assert_any_call("Action chosen: idle") + mock_info.assert_any_call("Outcome: idle") + mock_info.assert_any_call("Reward: 1.0") + mock_info.assert_any_call("Next state: default") + mock_info.assert_any_call("Let's rest a bit...") + mock_info.assert_any_call("Agent runtime loop interrupted by user.") + + # Verify method calls + runtime_agent.planning_module.get_action.assert_called_once_with(AgentState.DEFAULT) + runtime_agent._perform_planned_action.assert_called_once_with(AgentAction.IDLE) + runtime_agent.feedback_module.collect_feedback.assert_called_once_with("idle", "idle") + runtime_agent.planning_module.update_q_table.assert_called_once() + + # Verify no errors + mock_error.assert_not_called() + mock_critical.assert_not_called() + + +@pytest.mark.asyncio +async def test_runtime_loop_with_error(runtime_agent, mock_runtime_logger): + """Test runtime loop with a runtime error.""" + # arrange: + mock_info, mock_error, mock_critical = mock_runtime_logger + runtime_agent.planning_module.get_action = MagicMock(side_effect=RuntimeError("Test error")) + runtime_agent._perform_planned_action = AsyncMock() + + # act: + await runtime_agent.start_runtime_loop() + + # assert: + mock_info.assert_any_call("Starting the autonomous agent runtime loop...") + mock_error.assert_called_once_with("Error in runtime loop: Test error") + runtime_agent._perform_planned_action.assert_not_called() + runtime_agent.feedback_module.collect_feedback.assert_not_called() + runtime_agent.planning_module.update_q_table.assert_not_called() + mock_critical.assert_not_called() + + +@pytest.mark.asyncio +async def test_runtime_loop_keyboard_interrupt(runtime_agent, mock_runtime_logger): + """Test runtime loop with keyboard interrupt.""" + # arrange: + mock_info, mock_error, mock_critical = mock_runtime_logger + runtime_agent.planning_module.get_action = MagicMock(side_effect=KeyboardInterrupt) + runtime_agent._perform_planned_action = AsyncMock() + + # act: + await runtime_agent.start_runtime_loop() + + # assert: + mock_info.assert_any_call("Starting the autonomous agent runtime loop...") + mock_info.assert_any_call("Agent runtime loop interrupted by user.") + runtime_agent._perform_planned_action.assert_not_called() + runtime_agent.feedback_module.collect_feedback.assert_not_called() + runtime_agent.planning_module.update_q_table.assert_not_called() + mock_error.assert_not_called() + mock_critical.assert_not_called() + + +@pytest.mark.asyncio +async def test_runtime_loop_multiple_actions(runtime_agent, mock_runtime_logger): + """Test runtime loop with multiple different actions before interruption.""" + # arrange: + mock_info, mock_error, mock_critical = mock_runtime_logger + + # Setup sequence of actions + actions = [AgentAction.CHECK_SIGNAL, AgentAction.ANALYZE_NEWS, AgentAction.IDLE] + outcomes = ["signal detected", "news analyzed", "idle"] + rewards = [1.0, 0.5, 0.0] + + runtime_agent.planning_module.get_action = MagicMock(side_effect=actions) + runtime_agent._perform_planned_action = AsyncMock(side_effect=outcomes) + runtime_agent.feedback_module.collect_feedback = MagicMock(side_effect=rewards) + + # Make sleep raise KeyboardInterrupt after three iterations + sleep_counter = 0 + + async def mock_sleep(seconds): + nonlocal sleep_counter + sleep_counter += 1 + if sleep_counter >= 3: + raise KeyboardInterrupt + + with patch("asyncio.sleep", side_effect=mock_sleep): + # act: + await runtime_agent.start_runtime_loop() + + # assert: + assert runtime_agent.planning_module.get_action.call_count == 3 + assert runtime_agent._perform_planned_action.call_count == 3 + assert runtime_agent.feedback_module.collect_feedback.call_count == 3 + assert runtime_agent.planning_module.update_q_table.call_count == 3 + + # Verify action sequence + runtime_agent._perform_planned_action.assert_has_calls( + [call(AgentAction.CHECK_SIGNAL), call(AgentAction.ANALYZE_NEWS), call(AgentAction.IDLE)] + ) + + mock_error.assert_not_called() + mock_critical.assert_not_called() diff --git a/tests/test_main.py b/tests/test_main.py index e69de29..53dce9e 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -0,0 +1,102 @@ +from unittest.mock import MagicMock, patch + +import pytest +from loguru import logger + +from src.main import async_main, main + + +@pytest.fixture +def mock_logger(monkeypatch): + """Mock logger for testing.""" + # arrange: + mock_info = MagicMock() + mock_critical = MagicMock() + monkeypatch.setattr(logger, "info", mock_info) + monkeypatch.setattr(logger, "critical", mock_critical) + monkeypatch.setattr(logger, "add", MagicMock()) # Mock logger.add + return mock_info, mock_critical + + +@pytest.fixture +def mock_agent(): + """Mock Agent class for testing.""" + # arrange: + with patch("src.main.Agent") as mock: + agent_instance = MagicMock() + mock.return_value = agent_instance + yield agent_instance + + +@pytest.mark.asyncio +async def test_async_main_success(mock_logger, mock_agent): + """Test successful execution of async_main.""" + # arrange: + mock_info, mock_critical = mock_logger + + # act: + await async_main() + + # assert: + mock_info.assert_any_call("Starting the agent runtime...") + mock_info.assert_any_call("Agent runtime has stopped.") + mock_agent.start_runtime_loop.assert_called_once() + + +@pytest.mark.asyncio +async def test_async_main_error(mock_logger, mock_agent): + """Test async_main with runtime error.""" + # arrange: + mock_info, mock_critical = mock_logger + mock_agent.start_runtime_loop.side_effect = Exception("Test error") + + # act: + await async_main() + + # assert: + mock_info.assert_any_call("Starting the agent runtime...") + mock_info.assert_any_call("Agent runtime has stopped.") + mock_critical.assert_called_once_with("Fatal error in the runtime: Test error") + + +def test_main_success(mock_logger): + """Test successful execution of main.""" + # arrange: + mock_info, mock_critical = mock_logger + + with patch("asyncio.run") as mock_run: + # act: + main() + + # assert: + mock_run.assert_called_once() + mock_critical.assert_not_called() + + +def test_main_keyboard_interrupt(mock_logger): + """Test main with KeyboardInterrupt.""" + # arrange: + mock_info, mock_critical = mock_logger + + with patch("asyncio.run", side_effect=KeyboardInterrupt): + # act: + main() + + # assert: + mock_info.assert_called_with( + "Agent runtime interrupted by user. Shutting down gracefully..." + ) + mock_critical.assert_not_called() + + +def test_main_error(mock_logger): + """Test main with runtime error.""" + # arrange: + mock_info, mock_critical = mock_logger + + with patch("asyncio.run", side_effect=Exception("Test error")): + # act: + main() + + # assert: + mock_critical.assert_called_with("Fatal error in the main runtime: Test error") diff --git a/tests/test_utils.py b/tests/test_utils.py index 1cb1688..3e247ce 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,4 +1,5 @@ from unittest.mock import MagicMock + import pytest from loguru import logger @@ -34,10 +35,10 @@ def test_log_settings(mock_settings, mock_logger): """Test logging settings function.""" # act: log_settings() - + # assert: assert mock_logger.call_count >= 13 # At least 13 log calls expected - + # Verify some specific log messages expected_calls = [ "=" * 40, @@ -56,6 +57,6 @@ def test_log_settings(mock_settings, mock_logger): "Twitter Integration: Configured", "Perplexity Integration: Configured", ] - + for expected_call in expected_calls: mock_logger.assert_any_call(expected_call) diff --git a/tests/tools/test_get_signal.py b/tests/tools/test_get_signal.py new file mode 100644 index 0000000..ce3e0c5 --- /dev/null +++ b/tests/tools/test_get_signal.py @@ -0,0 +1,138 @@ +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest +from httpx import Response +from loguru import logger + +from src.core.exceptions import CoinstatsError +from src.tools.get_signal import fetch_signal, get_coinstats_news + + +@pytest.fixture +def mock_tool_logger(monkeypatch): + """Mock logger for tool testing.""" + mock_debug = MagicMock() + mock_error = MagicMock() + monkeypatch.setattr(logger, "debug", mock_debug) + monkeypatch.setattr(logger, "error", mock_error) + return mock_debug, mock_error + + +@pytest.fixture +def mock_httpx_client(): + """Create a mock httpx client.""" + mock_client = AsyncMock() + mock_client.get = AsyncMock() + return mock_client + + +@pytest.mark.skip(reason="Something is wrong with mocking the Coinstats API") +@pytest.mark.asyncio +async def test_get_coinstats_news_success(mock_tool_logger, mock_httpx_client): + """Test successful news retrieval from Coinstats.""" + # arrange: + mock_debug, mock_error = mock_tool_logger + mock_response = Response(200, json={"result": [{"title": "Test News"}]}) + mock_httpx_client.get.return_value = mock_response + + with patch("httpx.AsyncClient", return_value=mock_httpx_client): + # act: + result = await get_coinstats_news() + + # assert: + assert result == {"result": [{"title": "Test News"}]} + mock_debug.assert_any_call("RETRIEVING NEWS") + mock_debug.assert_any_call("COINSTATS NEWS | SUCCESSFULLY RETRIEVED 1 ARTICLES") + mock_error.assert_not_called() + + # Verify API call + mock_httpx_client.get.assert_called_once() + args, kwargs = mock_httpx_client.get.call_args + assert "openapiv1.coinstats.app/news" in args[0] + assert kwargs["headers"]["accept"] == "application/json" + + +@pytest.mark.asyncio +async def test_get_coinstats_news_http_error(mock_tool_logger, mock_httpx_client): + """Test error handling when Coinstats API returns an error.""" + # arrange: + mock_debug, mock_error = mock_tool_logger + mock_response = Response(500) + mock_httpx_client.get.return_value = mock_response + mock_response.raise_for_status = MagicMock(side_effect=Exception("HTTP Error")) + + with ( + patch("httpx.AsyncClient", return_value=mock_httpx_client), + pytest.raises(CoinstatsError) as exc_info, + ): + # act: + await get_coinstats_news() + + # assert: + assert str(exc_info.value) == "News data currently unavailable" + mock_debug.assert_called_once_with("RETRIEVING NEWS") + mock_error.assert_called_once() + assert "ERROR RETRIEVING NEWS" in mock_error.call_args[0][0] + + +@pytest.mark.asyncio +async def test_fetch_signal_success(mock_tool_logger): + """Test successful signal fetching.""" + # arrange: + mock_debug, mock_error = mock_tool_logger + news_data = {"result": [{"title": "Bitcoin reaches new high"}]} + + with patch("src.tools.get_signal.get_coinstats_news", return_value=news_data): + # act: + result = await fetch_signal() + + # assert: + assert result == {"status": "new_signal", "content": "Bitcoin reaches new high"} + mock_debug.assert_any_call("Signal fetched: {'title': 'Bitcoin reaches new high'}") + mock_error.assert_not_called() + + +@pytest.mark.asyncio +async def test_fetch_signal_no_news(mock_tool_logger): + """Test signal fetching when no news is available.""" + # arrange: + mock_debug, mock_error = mock_tool_logger + news_data = {"result": []} + + with patch("src.tools.get_signal.get_coinstats_news", return_value=news_data): + # act: + result = await fetch_signal() + + # assert: + assert result == {"status": "no_data"} + mock_error.assert_called_once_with("No news data available in the response") + + +@pytest.mark.asyncio +async def test_fetch_signal_no_title(mock_tool_logger): + """Test signal fetching when news item has no title.""" + # arrange: + mock_debug, mock_error = mock_tool_logger + news_data = {"result": [{"content": "Some content but no title"}]} + + with patch("src.tools.get_signal.get_coinstats_news", return_value=news_data): + # act: + result = await fetch_signal() + + # assert: + assert result == {"status": "no_data"} + + +@pytest.mark.asyncio +async def test_fetch_signal_api_error(mock_tool_logger): + """Test signal fetching when API call fails.""" + # arrange: + mock_debug, mock_error = mock_tool_logger + + with patch("src.tools.get_signal.get_coinstats_news", side_effect=CoinstatsError("Test error")): + # act: + result = await fetch_signal() + + # assert: + assert result == {"status": "error"} + mock_error.assert_called_once_with("Error fetching signal from Coinstats: Test error") diff --git a/tests/tools/test_search_with_perplexity.py b/tests/tools/test_search_with_perplexity.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/tools/test_tg.py b/tests/tools/test_tg.py index 6d17e14..f52237f 100644 --- a/tests/tools/test_tg.py +++ b/tests/tools/test_tg.py @@ -1,13 +1,171 @@ -def test_telegram_connection(): - """Test Telegram bot connection""" - pass # Add connection tests +# from unittest.mock import AsyncMock, MagicMock, patch +# import pytest +# from loguru import logger +# from telegram import Bot, Message +# from telegram.constants import MessageLimit +# from telegram.error import TelegramError as TGError +# from src.core.config import settings +# from src.core.exceptions import TelegramError +# from src.tools.tg import split_long_message, post_summary_to_telegram -def test_message_sending(): - """Test message sending functionality""" - pass # Add message sending tests +# @pytest.fixture +# def mock_tool_logger(monkeypatch): +# """Mock logger for tool testing.""" +# mock_debug = MagicMock() +# mock_error = MagicMock() +# monkeypatch.setattr(logger, "debug", mock_debug) +# monkeypatch.setattr(logger, "error", mock_error) +# return mock_debug, mock_error -def test_message_receiving(): - """Test message receiving functionality""" - pass # Add message receiving tests + +# @pytest.fixture +# def mock_telegram_settings(monkeypatch): +# """Mock Telegram settings.""" +# monkeypatch.setattr(settings, "TELEGRAM_CHAT_ID", "test_chat_id") +# return settings + + +# @pytest.fixture +# def mock_telegram_bot(): +# """Create a mock Telegram bot.""" +# mock_bot = AsyncMock(spec=Bot) +# return mock_bot + + +# def test_split_long_message_short(): +# """Test splitting a message that's already within limits.""" +# # arrange: +# message = "Short message" + +# # act: +# chunks = split_long_message(message) + +# # assert: +# assert len(chunks) == 1 +# assert chunks[0] == message + + +# def test_split_long_message_exact(): +# """Test splitting a message that's exactly at the limit.""" +# # arrange: +# chunk_size = 10 +# message = "A" * chunk_size + +# # act: +# chunks = split_long_message(message, chunk_size) + +# # assert: +# assert len(chunks) == 1 +# assert chunks[0] == message + + +# def test_split_long_message_multiple_chunks(): +# """Test splitting a long message into multiple chunks.""" +# # arrange: +# chunk_size = 10 +# message = "A" * 25 # Will create 3 chunks + +# # act: +# chunks = split_long_message(message, chunk_size) + +# # assert: +# assert len(chunks) == 3 +# assert all(len(chunk) <= chunk_size for chunk in chunks) +# assert "".join(chunks) == message + + +# def test_split_long_message_telegram_limit(): +# """Test splitting using Telegram's actual message limit.""" +# # arrange: +# message = "A" * (MessageLimit.MAX_TEXT_LENGTH + 100) + +# # act: +# chunks = split_long_message(message) + +# # assert: +# assert len(chunks) == 2 +# assert all(len(chunk) <= MessageLimit.MAX_TEXT_LENGTH for chunk in chunks) +# assert "".join(chunks) == message + + +# @pytest.mark.asyncio +# async def test_post_summary_to_telegram_success( +# mock_tool_logger, mock_telegram_settings, mock_telegram_bot +# ): +# """Test successful posting of a message to Telegram.""" +# # arrange: +# mock_debug, mock_error = mock_tool_logger +# summary = "Test message" +# mock_message = MagicMock(spec=Message) +# mock_message.message_id = 12345 +# mock_telegram_bot.send_message.return_value = mock_message + +# # act: +# message_ids = await post_summary_to_telegram(summary, bot=mock_telegram_bot) + +# # assert: +# assert message_ids == [12345] +# mock_telegram_bot.send_message.assert_called_once_with( +# chat_id=mock_telegram_settings.TELEGRAM_CHAT_ID, +# text=summary, +# parse_mode="HTML", +# disable_web_page_preview=False, +# ) +# mock_debug.assert_called_once() +# mock_error.assert_not_called() + + +# @pytest.mark.asyncio +# async def test_post_summary_to_telegram_long_message( +# mock_tool_logger, mock_telegram_settings, mock_telegram_bot +# ): +# """Test posting a long message that needs to be split.""" +# # arrange: +# mock_debug, mock_error = mock_tool_logger +# summary = "A" * (MessageLimit.MAX_TEXT_LENGTH + 100) +# mock_message1 = MagicMock(spec=Message, message_id=12345) +# mock_message2 = MagicMock(spec=Message, message_id=12346) +# mock_telegram_bot.send_message.side_effect = [mock_message1, mock_message2] + +# # act: +# message_ids = await post_summary_to_telegram(summary, bot=mock_telegram_bot) + +# # assert: +# assert message_ids == [12345, 12346] +# assert mock_telegram_bot.send_message.call_count == 2 +# assert mock_debug.call_count == 2 +# mock_error.assert_not_called() + + +# @pytest.mark.asyncio +# async def test_post_summary_to_telegram_no_message_id( +# mock_tool_logger, mock_telegram_settings, mock_telegram_bot +# ): +# """Test error handling when Telegram doesn't return a message ID.""" +# # arrange: +# mock_debug, mock_error = mock_tool_logger +# summary = "Test message" +# mock_message = MagicMock(spec=Message) +# mock_message.message_id = None +# mock_telegram_bot.send_message.return_value = mock_message + +# # act/assert: +# with pytest.raises(TelegramError, match="No message ID returned from Telegram"): +# await post_summary_to_telegram(summary, bot=mock_telegram_bot) + + +# @pytest.mark.asyncio +# async def test_post_summary_to_telegram_api_error( +# mock_tool_logger, mock_telegram_settings, mock_telegram_bot +# ): +# """Test error handling when Telegram API returns an error.""" +# # arrange: +# mock_debug, mock_error = mock_tool_logger +# summary = "Test message" +# mock_telegram_bot.send_message.side_effect = TGError("API Error") + +# # act/assert: +# with pytest.raises(TelegramError, match="Failed to send message to Telegram"): +# await post_summary_to_telegram(summary, bot=mock_telegram_bot) diff --git a/tests/tools/test_twitter.py b/tests/tools/test_twitter.py index d79ff65..3955565 100644 --- a/tests/tools/test_twitter.py +++ b/tests/tools/test_twitter.py @@ -1,13 +1,228 @@ -def test_twitter_connection(): - """Test Twitter API connection""" - pass # Add connection tests +# from io import BytesIO +# from unittest.mock import AsyncMock, MagicMock, patch +# import pytest +# from loguru import logger +# from PIL import Image +# from requests_html import HTMLSession +# import tweepy +# from src.core.exceptions import TwitterError +# from src.tools.twitter import ( +# get_twitter_conn_v1, +# get_twitter_conn_v2, +# upload_media_v1, +# post_twitter_thread, +# ) -def test_tweet_posting(): - """Test tweet posting functionality""" - pass # Add tweet posting tests +# @pytest.fixture +# def mock_tool_logger(monkeypatch): +# """Mock logger for tool testing.""" +# mock_debug = MagicMock() +# mock_error = MagicMock() +# monkeypatch.setattr(logger, "debug", mock_debug) +# monkeypatch.setattr(logger, "error", mock_error) +# return mock_debug, mock_error -def test_tweet_retrieval(): - """Test tweet retrieval functionality""" - pass # Add tweet retrieval tests + +# @pytest.fixture +# def mock_twitter_settings(): +# """Mock Twitter API settings.""" +# with patch("src.tools.twitter.settings") as mock_settings: +# mock_settings.TWITTER_API_KEY = "test_api_key" +# mock_settings.TWITTER_API_SECRET_KEY = "test_api_secret" +# mock_settings.TWITTER_ACCESS_TOKEN = "test_access_token" +# mock_settings.TWITTER_ACCESS_TOKEN_SECRET = "test_access_secret" +# return mock_settings + + +# @pytest.fixture +# def mock_tweepy_v1(): +# """Mock Tweepy API v1.1 client.""" +# mock_auth = MagicMock() +# mock_client = MagicMock() + +# with patch("tweepy.OAuth1UserHandler", return_value=mock_auth) as mock_handler, \ +# patch("tweepy.API", return_value=mock_client) as mock_api: +# mock_auth.set_access_token = MagicMock() +# yield mock_client + + +# @pytest.fixture +# def mock_tweepy_v2(): +# """Mock Tweepy API v2 client.""" +# with patch("tweepy.Client") as mock_client_class: +# mock_client = MagicMock() +# mock_client_class.return_value = mock_client +# return mock_client + + +# def test_get_twitter_conn_v1(mock_twitter_settings, mock_tweepy_v1): +# """Test Twitter API v1.1 connection creation.""" +# with patch("tweepy.OAuth1UserHandler") as mock_handler: +# mock_auth = MagicMock() +# mock_handler.return_value = mock_auth + +# # act: +# client = get_twitter_conn_v1() + +# # assert: +# mock_handler.assert_called_once_with( +# mock_twitter_settings.TWITTER_API_KEY, +# mock_twitter_settings.TWITTER_API_SECRET_KEY +# ) +# mock_auth.set_access_token.assert_called_once_with( +# mock_twitter_settings.TWITTER_ACCESS_TOKEN, +# mock_twitter_settings.TWITTER_ACCESS_TOKEN_SECRET +# ) +# tweepy.API.assert_called_once_with(mock_auth) + + +# def test_get_twitter_conn_v2(mock_twitter_settings, mock_tweepy_v2): +# """Test Twitter API v2 connection creation.""" +# # act: +# client = get_twitter_conn_v2() + +# # assert: +# assert client == mock_tweepy_v2 +# tweepy.Client.assert_called_once_with( +# consumer_key=mock_twitter_settings.TWITTER_API_KEY, +# consumer_secret=mock_twitter_settings.TWITTER_API_SECRET_KEY, +# access_token=mock_twitter_settings.TWITTER_ACCESS_TOKEN, +# access_token_secret=mock_twitter_settings.TWITTER_ACCESS_TOKEN_SECRET, +# ) + + +# @pytest.mark.asyncio +# async def test_upload_media_v1_success(mock_tool_logger, mock_tweepy_v1): +# """Test successful media upload.""" +# # arrange: +# mock_debug, mock_error = mock_tool_logger +# url = "http://example.com/image.jpg" +# media_id = "12345" + +# # Mock HTML session +# mock_session = MagicMock() +# mock_response = MagicMock() +# mock_response.content = b"fake_image_data" +# mock_session.get.return_value = mock_response + +# # Mock image processing +# mock_image = MagicMock(spec=Image.Image) +# mock_image.convert.return_value = mock_image + +# # Mock media upload +# mock_media = MagicMock() +# mock_media.media_id = media_id +# mock_tweepy_v1.media_upload.return_value = mock_media + +# with patch("src.tools.twitter.HTMLSession", return_value=mock_session), \ +# patch("src.tools.twitter.Image.open", return_value=mock_image), \ +# patch("src.tools.twitter.get_twitter_conn_v1", return_value=mock_tweepy_v1): +# # act: +# result = await upload_media_v1(url) + +# # assert: +# assert result == media_id +# mock_session.get.assert_called_once_with(url, headers=pytest.ANY) +# mock_image.convert.assert_called_once_with("L") +# mock_tweepy_v1.media_upload.assert_called_once() +# mock_debug.assert_called_once() +# mock_error.assert_not_called() + + +# @pytest.mark.asyncio +# async def test_upload_media_v1_error(mock_tool_logger): +# """Test media upload error handling.""" +# # arrange: +# mock_debug, mock_error = mock_tool_logger +# url = "http://example.com/image.jpg" + +# # Mock session with error +# mock_session = MagicMock() +# mock_session.get.side_effect = Exception("Network error") + +# with patch("src.tools.twitter.HTMLSession", return_value=mock_session): +# # act: +# result = await upload_media_v1(url) + +# # assert: +# assert result is None +# mock_error.assert_called_once() +# mock_debug.assert_not_called() + + +# @pytest.mark.asyncio +# async def test_post_twitter_thread_success(mock_tool_logger, mock_tweepy_v2): +# """Test successful thread posting.""" +# # arrange: +# mock_debug, mock_error = mock_tool_logger +# tweets = { +# "tweet1": "First tweet", +# "tweet2": "Second tweet" +# } +# tweet_ids = [12345, 67890] + +# # Mock tweet responses +# mock_tweepy_v2.create_tweet.side_effect = [ +# MagicMock(data={"id": tweet_id}) for tweet_id in tweet_ids +# ] + +# with patch("src.tools.twitter.get_twitter_conn_v2", return_value=mock_tweepy_v2), \ +# patch("asyncio.sleep"): # Mock sleep to speed up test +# # act: +# result = await post_twitter_thread(tweets) + +# # assert: +# assert result == tweet_ids +# assert mock_tweepy_v2.create_tweet.call_count == 2 +# mock_debug.assert_called() +# mock_error.assert_not_called() + + +# @pytest.mark.asyncio +# async def test_post_twitter_thread_with_media( +# mock_tool_logger, mock_tweepy_v1, mock_tweepy_v2 +# ): +# """Test thread posting with media.""" +# # arrange: +# mock_debug, mock_error = mock_tool_logger +# tweets = {"tweet1": "Tweet with media"} +# media_url = "http://example.com/image.jpg" +# tweet_id = 12345 +# media_id = "67890" + +# # Mock media upload +# with patch("src.tools.twitter.upload_media_v1", return_value=media_id): +# # Mock tweet creation +# mock_tweepy_v2.create_tweet.return_value = MagicMock(data={"id": tweet_id}) + +# with patch("src.tools.twitter.get_twitter_conn_v2", return_value=mock_tweepy_v2), \ +# patch("asyncio.sleep"): +# # act: +# result = await post_twitter_thread(tweets, media_url=media_url) + +# # assert: +# assert result == [tweet_id] +# mock_tweepy_v2.create_tweet.assert_called_once_with( +# text=tweets["tweet1"], +# media_ids=[media_id] +# ) + + +# @pytest.mark.asyncio +# async def test_post_twitter_thread_error(mock_tool_logger, mock_tweepy_v2): +# """Test error handling in thread posting.""" +# # arrange: +# mock_debug, mock_error = mock_tool_logger +# tweets = {"tweet1": "Test tweet"} +# mock_tweepy_v2.create_tweet.side_effect = Exception("API Error") + +# with patch("src.tools.twitter.get_twitter_conn_v2", return_value=mock_tweepy_v2), \ +# pytest.raises(TwitterError): +# # act: +# await post_twitter_thread(tweets) + +# # assert: +# mock_error.assert_called_once() +# mock_debug.assert_not_called() diff --git a/tests/workflows/test_analyze_signal.py b/tests/workflows/test_analyze_signal.py index 01b8800..210a70e 100644 --- a/tests/workflows/test_analyze_signal.py +++ b/tests/workflows/test_analyze_signal.py @@ -1,13 +1,164 @@ -def test_signal_analysis(): - """Test signal analysis workflow""" - pass # Add signal analysis tests +from unittest.mock import AsyncMock, MagicMock, call, patch +import pytest +from loguru import logger -def test_signal_validation(): - """Test signal validation""" - pass # Add signal validation tests +from src.workflows.analyze_signal import analyze_signal -def test_signal_processing(): - """Test signal processing steps""" - pass # Add signal processing tests +@pytest.fixture +def mock_workflow_logger(monkeypatch): + """Mock logger for workflow testing.""" + mock_info = MagicMock() + mock_warning = MagicMock() + mock_error = MagicMock() + monkeypatch.setattr(logger, "info", mock_info) + monkeypatch.setattr(logger, "warning", mock_warning) + monkeypatch.setattr(logger, "error", mock_error) + return mock_info, mock_warning, mock_error + + +@pytest.fixture +def mock_memory(): + """Create a mock memory module.""" + memory = MagicMock() + memory.search = AsyncMock() + memory.store = AsyncMock() + return memory + + +@pytest.mark.asyncio +async def test_analyze_signal_success(mock_workflow_logger, mock_memory): + """Test successful signal analysis and tweet posting.""" + # arrange: + mock_info, mock_warning, mock_error = mock_workflow_logger + signal_content = "Test signal content" + tweet_text = "Breaking News:\nTest analysis\n#CryptoNews" + tweet_id = "123456789" + + # Mock fetch_signal + mock_fetch = AsyncMock(return_value={"status": "new_signal", "content": signal_content}) + # Mock memory search (no recent signals) + mock_memory.search.side_effect = [ + [], # No recent signals + [ # Recent memories for context + {"event": "event1", "outcome": "outcome1"}, + {"event": "event2", "outcome": "outcome2"}, + ], + ] + # Mock LLM + mock_llm = AsyncMock() + mock_llm.generate_response = AsyncMock(return_value="Test analysis") + # Mock Twitter post + mock_post = AsyncMock(return_value=[tweet_id]) + + with ( + patch("src.workflows.analyze_signal.fetch_signal", mock_fetch), + patch("src.workflows.analyze_signal.LLM", return_value=mock_llm), + patch("src.workflows.analyze_signal.post_twitter_thread", mock_post), + ): + # act: + result = await analyze_signal(memory=mock_memory) + + # assert: + assert result == tweet_id + mock_fetch.assert_called_once() + mock_memory.search.assert_has_calls( + [call(signal_content, top_k=1), call("recent events", top_k=3)] + ) + mock_llm.generate_response.assert_called_once() + mock_post.assert_called_once_with(tweets={"tweet1": tweet_text}) + mock_memory.store.assert_called_once() + mock_info.assert_any_call(f"Received signal: {signal_content}") + mock_info.assert_any_call("Tweet posted successfully!") + mock_warning.assert_not_called() + mock_error.assert_not_called() + + +@pytest.mark.asyncio +async def test_analyze_signal_already_processed(mock_workflow_logger, mock_memory): + """Test when signal was already processed.""" + # arrange: + mock_info, mock_warning, mock_error = mock_workflow_logger + signal_content = "Test signal content" + + # Mock fetch_signal + mock_fetch = AsyncMock(return_value={"status": "new_signal", "content": signal_content}) + # Mock memory search (signal already exists) + mock_memory.search.return_value = [{"event": signal_content}] + + with patch("src.workflows.analyze_signal.fetch_signal", mock_fetch): + # act: + result = await analyze_signal(memory=mock_memory) + + # assert: + assert result is None + mock_fetch.assert_called_once() + mock_memory.search.assert_called_once_with(signal_content, top_k=1) + mock_info.assert_any_call("Signal already processed, skipping analysis") + mock_warning.assert_not_called() + mock_error.assert_not_called() + + +@pytest.mark.asyncio +async def test_analyze_signal_no_data(mock_workflow_logger, mock_memory): + """Test when no signal is available.""" + # arrange: + mock_info, mock_warning, mock_error = mock_workflow_logger + + # Mock fetch_signal + mock_fetch = AsyncMock(return_value={"status": "no_data"}) + + with patch("src.workflows.analyze_signal.fetch_signal", mock_fetch): + # act: + result = await analyze_signal(memory=mock_memory) + + # assert: + assert result is None + mock_fetch.assert_called_once() + mock_memory.search.assert_not_called() + mock_info.assert_any_call("No actionable signal detected.") + mock_warning.assert_not_called() + mock_error.assert_not_called() + + +@pytest.mark.asyncio +async def test_analyze_signal_unknown_format(mock_workflow_logger, mock_memory): + """Test handling of unknown signal format.""" + # arrange: + mock_info, mock_warning, mock_error = mock_workflow_logger + + # Mock fetch_signal + mock_fetch = AsyncMock(return_value={"status": "unknown"}) + + with patch("src.workflows.analyze_signal.fetch_signal", mock_fetch): + # act: + result = await analyze_signal(memory=mock_memory) + + # assert: + assert result is None + mock_fetch.assert_called_once() + mock_memory.search.assert_not_called() + mock_warning.assert_called_once_with("Received an unknown signal format or an error occurred.") + mock_error.assert_not_called() + + +@pytest.mark.asyncio +async def test_analyze_signal_error(mock_workflow_logger, mock_memory): + """Test error handling in signal analysis.""" + # arrange: + mock_info, mock_warning, mock_error = mock_workflow_logger + + # Mock fetch_signal to raise an exception + mock_fetch = AsyncMock(side_effect=Exception("Test error")) + + with patch("src.workflows.analyze_signal.fetch_signal", mock_fetch): + # act: + result = await analyze_signal(memory=mock_memory) + + # assert: + assert result is None + mock_fetch.assert_called_once() + mock_memory.search.assert_not_called() + mock_error.assert_called_once_with("Error in analyze_and_post_signal workflow: Test error") + mock_warning.assert_not_called() diff --git a/tests/workflows/test_research_news.py b/tests/workflows/test_research_news.py index 4c3f4fc..0b41a86 100644 --- a/tests/workflows/test_research_news.py +++ b/tests/workflows/test_research_news.py @@ -1,13 +1,157 @@ -def test_news_research(): - """Test news research workflow""" - pass # Add news research tests +from unittest.mock import AsyncMock, MagicMock, patch +import pytest +from loguru import logger -def test_news_validation(): - """Test news validation""" - pass # Add news validation tests +from src.workflows.research_news import analyze_news_workflow -def test_news_processing(): - """Test news processing functionality""" - pass # Add news processing tests +@pytest.fixture +def mock_workflow_logger(monkeypatch): + """Mock logger for workflow testing.""" + mock_info = MagicMock() + mock_error = MagicMock() + monkeypatch.setattr(logger, "info", mock_info) + monkeypatch.setattr(logger, "error", mock_error) + return mock_info, mock_error + + +@pytest.fixture +def mock_memory(): + """Create a mock memory module.""" + memory = MagicMock() + memory.search = AsyncMock() + memory.store = AsyncMock() + return memory + + +@pytest.mark.asyncio +async def test_analyze_news_success(mock_workflow_logger, mock_memory): + """Test successful news analysis and tweet posting.""" + # arrange: + mock_info, mock_error = mock_workflow_logger + news_content = "Test news content" + tweet_text = "Breaking News:\nTest analysis\n#StayInformed" + tweet_id = "123456789" + + # Mock memory search for context + mock_memory.search.return_value = [ + {"event": "event1", "outcome": "outcome1"}, + {"event": "event2", "outcome": "outcome2"}, + ] + + # Mock LLM + mock_llm = AsyncMock() + mock_llm.generate_response = AsyncMock(return_value="Test analysis") + + # Mock Twitter post + mock_post = AsyncMock(return_value=[tweet_id]) + + with ( + patch("src.workflows.research_news.LLM", return_value=mock_llm), + patch("src.workflows.research_news.post_twitter_thread", mock_post), + ): + # act: + result = await analyze_news_workflow(news_content, memory=mock_memory) + + # assert: + assert result == tweet_id + mock_memory.search.assert_called_once_with("recent events", top_k=3) + mock_llm.generate_response.assert_called_once() + mock_post.assert_called_once_with(tweets={"tweet1": tweet_text}) + mock_info.assert_any_call("Analyzing news...") + mock_info.assert_any_call(f"Publishing tweet:\n{tweet_text}") + mock_info.assert_any_call("Tweet posted successfully!") + mock_error.assert_not_called() + + +@pytest.mark.asyncio +async def test_analyze_news_no_context(mock_workflow_logger, mock_memory): + """Test news analysis when no context is available.""" + # arrange: + mock_info, mock_error = mock_workflow_logger + news_content = "Test news content" + tweet_text = "Breaking News:\nTest analysis\n#StayInformed" + tweet_id = "123456789" + + # Mock memory search (no context available) + mock_memory.search.return_value = [] + + # Mock LLM + mock_llm = AsyncMock() + mock_llm.generate_response = AsyncMock(return_value="Test analysis") + + # Mock Twitter post + mock_post = AsyncMock(return_value=[tweet_id]) + + with ( + patch("src.workflows.research_news.LLM", return_value=mock_llm), + patch("src.workflows.research_news.post_twitter_thread", mock_post), + ): + # act: + result = await analyze_news_workflow(news_content, memory=mock_memory) + + # assert: + assert result == tweet_id + mock_memory.search.assert_called_once_with("recent events", top_k=3) + mock_llm.generate_response.assert_called_once() + mock_post.assert_called_once_with(tweets={"tweet1": tweet_text}) + mock_info.assert_any_call("Analyzing news...") + mock_error.assert_not_called() + + +@pytest.mark.asyncio +async def test_analyze_news_llm_error(mock_workflow_logger, mock_memory): + """Test error handling when LLM fails.""" + # arrange: + mock_info, mock_error = mock_workflow_logger + news_content = "Test news content" + + # Mock memory search + mock_memory.search.return_value = [{"event": "event1", "outcome": "outcome1"}] + + # Mock LLM with error + mock_llm = AsyncMock() + mock_llm.generate_response = AsyncMock(side_effect=Exception("LLM error")) + + with patch("src.workflows.research_news.LLM", return_value=mock_llm): + # act: + result = await analyze_news_workflow(news_content, memory=mock_memory) + + # assert: + assert result is None + mock_memory.search.assert_called_once() + mock_llm.generate_response.assert_called_once() + mock_error.assert_called_once_with("Error in analyze_news_workflow: LLM error") + + +@pytest.mark.asyncio +async def test_analyze_news_twitter_error(mock_workflow_logger, mock_memory): + """Test error handling when Twitter posting fails.""" + # arrange: + mock_info, mock_error = mock_workflow_logger + news_content = "Test news content" + + # Mock memory search + mock_memory.search.return_value = [{"event": "event1", "outcome": "outcome1"}] + + # Mock LLM + mock_llm = AsyncMock() + mock_llm.generate_response = AsyncMock(return_value="Test analysis") + + # Mock Twitter post with error + mock_post = AsyncMock(side_effect=Exception("Twitter error")) + + with ( + patch("src.workflows.research_news.LLM", return_value=mock_llm), + patch("src.workflows.research_news.post_twitter_thread", mock_post), + ): + # act: + result = await analyze_news_workflow(news_content, memory=mock_memory) + + # assert: + assert result is None + mock_memory.search.assert_called_once() + mock_llm.generate_response.assert_called_once() + mock_post.assert_called_once() + mock_error.assert_called_once_with("Error in analyze_news_workflow: Twitter error") From 511bf552605fafbc2834201ddb0133a5055c8861 Mon Sep 17 00:00:00 2001 From: Dzmitry Hramyka Date: Sun, 29 Dec 2024 00:32:19 +0100 Subject: [PATCH 3/6] Add some more tests --- coverage.lcov | 104 ++++++------ tests/core/test_config.py | 8 - tests/feedback/test_feedback_module.py | 173 ++++++++++++++++++++ tests/{core => llm/providers}/__init__.py | 0 tests/llm/providers/test_anthropic.py | 178 ++++++++++++++++++++ tests/llm/providers/test_oai.py | 187 ++++++++++++++++++++++ tests/llm/test_embeddings.py | 157 +++++++++++++++++- tests/llm/test_llm.py | 156 ++++++++++++++++++ tests/memory/backends/__init__.py | 0 tests/memory/backends/test_chroma.py | 0 tests/memory/backends/test_qdrant.py | 0 tests/memory/test_memory_module.py | 176 ++++++++++++++++++-- tests/test_agent.py | 1 + tests/test_agent_runtime.py | 3 + tests/tools/test_get_signal.py | 11 +- 15 files changed, 1075 insertions(+), 79 deletions(-) delete mode 100644 tests/core/test_config.py create mode 100644 tests/feedback/test_feedback_module.py rename tests/{core => llm/providers}/__init__.py (100%) create mode 100644 tests/llm/providers/test_anthropic.py create mode 100644 tests/llm/providers/test_oai.py create mode 100644 tests/llm/test_llm.py create mode 100644 tests/memory/backends/__init__.py create mode 100644 tests/memory/backends/test_chroma.py create mode 100644 tests/memory/backends/test_qdrant.py diff --git a/coverage.lcov b/coverage.lcov index 6c5e5a6..cf8265d 100644 --- a/coverage.lcov +++ b/coverage.lcov @@ -209,37 +209,37 @@ DA:1,1 DA:3,1 DA:6,1 DA:11,1 -DA:13,0 +DA:13,1 DA:15,1 -DA:26,0 -DA:29,0 -DA:30,0 -DA:31,0 -DA:33,0 -DA:34,0 -DA:37,0 -DA:38,0 -DA:41,0 -DA:47,0 -DA:49,0 -DA:50,0 +DA:26,1 +DA:29,1 +DA:30,1 +DA:31,1 +DA:33,1 +DA:34,1 +DA:37,1 +DA:38,1 +DA:41,1 +DA:47,1 +DA:49,1 +DA:50,1 DA:52,1 -DA:62,0 +DA:62,1 DA:64,1 -DA:68,0 -DA:69,0 +DA:68,1 +DA:69,1 LF:23 -LH:7 +LH:23 FN:11,13,FeedbackModule.__init__ -FNDA:0,FeedbackModule.__init__ +FNDA:1,FeedbackModule.__init__ FN:15,50,FeedbackModule.collect_feedback -FNDA:0,FeedbackModule.collect_feedback +FNDA:1,FeedbackModule.collect_feedback FN:52,62,FeedbackModule.get_feedback_history -FNDA:0,FeedbackModule.get_feedback_history +FNDA:1,FeedbackModule.get_feedback_history FN:64,69,FeedbackModule.reset_feedback_history -FNDA:0,FeedbackModule.reset_feedback_history +FNDA:1,FeedbackModule.reset_feedback_history FNF:4 -FNH:0 +FNH:4 end_of_record SF:src/llm/__init__.py end_of_record @@ -255,25 +255,25 @@ DA:14,1 DA:26,1 DA:27,1 DA:29,1 -DA:43,0 -DA:44,0 -DA:47,0 -DA:49,0 -DA:50,0 -DA:51,0 -DA:54,0 -DA:55,0 -DA:57,0 -DA:58,0 -DA:59,0 +DA:43,1 +DA:44,1 +DA:47,1 +DA:49,1 +DA:50,1 +DA:51,1 +DA:54,1 +DA:55,1 +DA:57,1 +DA:58,1 +DA:59,1 LF:22 -LH:11 +LH:22 FN:14,27,EmbeddingGenerator.__init__ FNDA:1,EmbeddingGenerator.__init__ FN:29,59,EmbeddingGenerator.get_embedding -FNDA:0,EmbeddingGenerator.get_embedding +FNDA:1,EmbeddingGenerator.get_embedding FNF:2 -FNH:1 +FNH:2 end_of_record SF:src/llm/llm.py DA:1,1 @@ -286,29 +286,29 @@ DA:9,1 DA:10,1 DA:13,1 DA:18,1 -DA:23,0 -DA:24,0 +DA:23,1 +DA:24,1 DA:26,1 -DA:38,0 -DA:39,0 -DA:43,0 -DA:45,0 -DA:46,0 -DA:47,0 -DA:48,0 -DA:50,0 +DA:38,1 +DA:39,1 +DA:43,1 +DA:45,1 +DA:46,1 +DA:47,1 +DA:48,1 +DA:50,1 DA:54,1 DA:55,1 LF:23 -LH:13 +LH:23 FN:18,24,LLM.__init__ -FNDA:0,LLM.__init__ +FNDA:1,LLM.__init__ FN:26,50,LLM.generate_response -FNDA:0,LLM.generate_response +FNDA:1,LLM.generate_response FN:54,55,get_oai_client FNDA:1,get_oai_client FNF:3 -FNH:1 +FNH:3 end_of_record SF:src/llm/providers/__init__.py end_of_record @@ -616,7 +616,7 @@ DA:105,1 DA:106,1 DA:107,1 DA:110,1 -DA:112,0 +DA:112,1 DA:115,1 DA:116,1 DA:117,1 @@ -635,7 +635,7 @@ DA:145,1 DA:148,1 DA:151,1 LF:57 -LH:52 +LH:53 FN:14,63,PlanningModule.__init__ FNDA:1,PlanningModule.__init__ FN:65,80,PlanningModule._load_q_table diff --git a/tests/core/test_config.py b/tests/core/test_config.py deleted file mode 100644 index 9fc3dbc..0000000 --- a/tests/core/test_config.py +++ /dev/null @@ -1,8 +0,0 @@ -def test_config_loading(): - """Test configuration loading functionality""" - pass # Add specific tests based on your config implementation - - -def test_config_validation(): - """Test configuration validation""" - pass # Add validation tests diff --git a/tests/feedback/test_feedback_module.py b/tests/feedback/test_feedback_module.py new file mode 100644 index 0000000..6c0bf04 --- /dev/null +++ b/tests/feedback/test_feedback_module.py @@ -0,0 +1,173 @@ +from unittest.mock import MagicMock + +import pytest +from loguru import logger + +from src.feedback.feedback_module import FeedbackModule + + +@pytest.fixture +def mock_logger(monkeypatch): + """Mock logger for testing.""" + mock_debug = MagicMock() + mock_error = MagicMock() + monkeypatch.setattr(logger, "debug", mock_debug) + monkeypatch.setattr(logger, "error", mock_error) + return mock_debug, mock_error + + +@pytest.fixture +def feedback_module(): + """Create a FeedbackModule instance.""" + return FeedbackModule() + + +def test_init(): + """Test FeedbackModule initialization.""" + module = FeedbackModule() + assert isinstance(module.feedback_history, list) + assert len(module.feedback_history) == 0 + + +def test_collect_feedback_success(feedback_module, mock_logger): + """Test feedback collection for successful action.""" + # arrange: + mock_debug, _ = mock_logger + action = "test_action" + outcome = "success" + + # act: + score = feedback_module.collect_feedback(action, outcome) + + # assert: + assert score == 0.0 # Currently hardcoded to 0.0 as per DEBUG comment + assert len(feedback_module.feedback_history) == 1 + + entry = feedback_module.feedback_history[0] + assert entry["action"] == action + assert entry["outcome"] == outcome + assert entry["score"] == 0.0 + assert entry["status"] == "neutral" + + mock_debug.assert_any_call(f"Collecting feedback for action '{action}'") + mock_debug.assert_any_call(f"Feedback recorded: {entry}") + + +def test_collect_feedback_failure(feedback_module): + """Test feedback collection for failed action.""" + # arrange: + action = "test_action" + outcome = None + + # act: + score = feedback_module.collect_feedback(action, outcome) + + # assert: + assert score == 0.0 # Currently hardcoded to 0.0 as per DEBUG comment + assert len(feedback_module.feedback_history) == 1 + + entry = feedback_module.feedback_history[0] + assert entry["action"] == action + assert entry["outcome"] is None + assert entry["score"] == 0.0 + assert entry["status"] == "neutral" + + +def test_collect_multiple_feedback(feedback_module): + """Test collecting multiple feedback entries.""" + # arrange: + actions = ["action1", "action2", "action3"] + outcomes = ["success", None, "partial"] + + # act: + scores = [ + feedback_module.collect_feedback(action, outcome) + for action, outcome in zip(actions, outcomes) + ] + + # assert: + assert len(scores) == 3 + assert len(feedback_module.feedback_history) == 3 + + for i, (action, outcome) in enumerate(zip(actions, outcomes)): + entry = feedback_module.feedback_history[i] + assert entry["action"] == action + assert entry["outcome"] == outcome + assert entry["score"] == 0.0 # Currently hardcoded + assert entry["status"] == "neutral" # Currently hardcoded + + +def test_get_feedback_history_empty(feedback_module): + """Test getting feedback history when empty.""" + history = feedback_module.get_feedback_history() + assert isinstance(history, list) + assert len(history) == 0 + + +def test_get_feedback_history_with_limit(feedback_module): + """Test getting limited feedback history.""" + # arrange: + # Add 5 feedback entries + for i in range(5): + feedback_module.collect_feedback(f"action{i}", f"outcome{i}") + + # act: + # Get last 3 entries + history = feedback_module.get_feedback_history(limit=3) + + # assert: + assert len(history) == 3 + assert history[0]["action"] == "action2" + assert history[1]["action"] == "action3" + assert history[2]["action"] == "action4" + + +def test_get_feedback_history_limit_exceeds_size(feedback_module): + """Test getting feedback history with limit larger than history size.""" + # arrange: + feedback_module.collect_feedback("action1", "outcome1") + feedback_module.collect_feedback("action2", "outcome2") + + # act: + history = feedback_module.get_feedback_history(limit=5) + + # assert: + assert len(history) == 2 # Should return all entries + + +def test_reset_feedback_history(feedback_module, mock_logger): + """Test resetting feedback history.""" + # arrange: + mock_debug, _ = mock_logger + feedback_module.collect_feedback("action1", "outcome1") + feedback_module.collect_feedback("action2", "outcome2") + assert len(feedback_module.feedback_history) == 2 + + # act: + feedback_module.reset_feedback_history() + + # assert: + assert len(feedback_module.feedback_history) == 0 + mock_debug.assert_any_call("Feedback history has been reset.") + + +@pytest.mark.parametrize( + "action,outcome", + [ + ("", None), # Empty action + ("action", ""), # Empty string outcome + ("action", 123), # Numeric outcome + ("action", {"key": "value"}), # Dict outcome + ], +) +def test_collect_feedback_different_types(feedback_module, action, outcome): + """Test feedback collection with different input types.""" + # act: + score = feedback_module.collect_feedback(action, outcome) + + # assert: + assert score == 0.0 # Currently hardcoded + assert len(feedback_module.feedback_history) == 1 + entry = feedback_module.feedback_history[0] + assert entry["action"] == action + assert entry["outcome"] == outcome diff --git a/tests/core/__init__.py b/tests/llm/providers/__init__.py similarity index 100% rename from tests/core/__init__.py rename to tests/llm/providers/__init__.py diff --git a/tests/llm/providers/test_anthropic.py b/tests/llm/providers/test_anthropic.py new file mode 100644 index 0000000..cc6f730 --- /dev/null +++ b/tests/llm/providers/test_anthropic.py @@ -0,0 +1,178 @@ +# from unittest.mock import AsyncMock, MagicMock, patch +# import pytest +# from anthropic import AI_PROMPT, HUMAN_PROMPT, Anthropic +# from loguru import logger + +# from src.core.config import settings +# from src.core.exceptions import LLMError +# from src.llm.providers.anthropic import call_anthropic + + +# @pytest.fixture +# def mock_logger(monkeypatch): +# """Mock logger for testing.""" +# mock_debug = MagicMock() +# mock_error = MagicMock() +# monkeypatch.setattr(logger, "debug", mock_debug) +# monkeypatch.setattr(logger, "error", mock_error) +# return mock_debug, mock_error + + +# @pytest.fixture +# def mock_settings(monkeypatch): +# """Mock settings for testing.""" +# monkeypatch.setattr(settings, "ANTHROPIC_API_KEY", "test-key") +# monkeypatch.setattr(settings, "ANTHROPIC_MODEL", "claude-2") +# return settings + + +# @pytest.fixture +# def mock_anthropic_client(): +# """Create a mock Anthropic client.""" +# with patch("src.llm.providers.anthropic.Anthropic") as mock: +# client = mock.return_value +# client.completions.create = AsyncMock() +# mock_response = MagicMock() +# mock_response.completion = "Test response" +# client.completions.create.return_value = mock_response +# return client + + +# @pytest.mark.asyncio +# async def test_call_anthropic_basic(mock_anthropic_client, mock_logger): +# """Test basic Anthropic API call with simple message.""" +# # arrange: +# messages = [{"role": "user", "content": "Hello"}] +# expected_prompt = f"{HUMAN_PROMPT} Hello\n\n{AI_PROMPT}" + +# # act: +# response = await call_anthropic(messages) + +# # assert: +# mock_anthropic_client.completions.create.assert_called_once_with( +# prompt=expected_prompt, +# model="claude-2", +# temperature=0.7, +# max_tokens_to_sample=1024 +# ) +# assert response == "Test response" + + +# @pytest.mark.asyncio +# async def test_call_anthropic_with_system_message(mock_anthropic_client): +# """Test Anthropic API call with system message.""" +# # arrange: +# messages = [ +# {"role": "system", "content": "You are a helpful assistant"}, +# {"role": "user", "content": "Hello"} +# ] +# expected_prompt = ( +# f"(System) You are a helpful assistant\n\n" +# f"{HUMAN_PROMPT} Hello\n\n{AI_PROMPT}" +# ) + +# # act: +# await call_anthropic(messages) + +# # assert: +# mock_anthropic_client.completions.create.assert_called_once() +# actual_prompt = mock_anthropic_client.completions.create.call_args[1]["prompt"] +# assert actual_prompt == expected_prompt + + +# @pytest.mark.asyncio +# async def test_call_anthropic_with_conversation(mock_anthropic_client): +# """Test Anthropic API call with full conversation.""" +# # arrange: +# messages = [ +# {"role": "system", "content": "System prompt"}, +# {"role": "user", "content": "Hello"}, +# {"role": "assistant", "content": "Hi there"}, +# {"role": "user", "content": "How are you?"} +# ] +# expected_prompt = ( +# f"(System) System prompt\n\n" +# f"{HUMAN_PROMPT} Hello\n\n" +# f"{AI_PROMPT} Hi there\n\n" +# f"{HUMAN_PROMPT} How are you?\n\n" +# f"{AI_PROMPT}" +# ) + +# # act: +# await call_anthropic(messages) + +# # assert: +# mock_anthropic_client.completions.create.assert_called_once() +# actual_prompt = mock_anthropic_client.completions.create.call_args[1]["prompt"] +# assert actual_prompt == expected_prompt + + +# @pytest.mark.asyncio +# async def test_call_anthropic_with_custom_params(mock_anthropic_client): +# """Test Anthropic API call with custom parameters.""" +# # arrange: +# messages = [{"role": "user", "content": "Hello"}] +# custom_params = { +# "model": "claude-instant-1", +# "temperature": 0.5 +# } + +# # act: +# await call_anthropic(messages, **custom_params) + +# # assert: +# mock_anthropic_client.completions.create.assert_called_once() +# call_kwargs = mock_anthropic_client.completions.create.call_args[1] +# assert call_kwargs["model"] == "claude-instant-1" +# assert call_kwargs["temperature"] == 0.5 + + +# @pytest.mark.asyncio +# async def test_call_anthropic_api_error(mock_anthropic_client, mock_logger): +# """Test error handling for API failures.""" +# # arrange: +# messages = [{"role": "user", "content": "Hello"}] +# error_message = "API Error" +# mock_anthropic_client.completions.create.side_effect = Exception(error_message) +# _, mock_error = mock_logger + +# # act/assert: +# with pytest.raises(LLMError, match="Error during Anthropic API call"): +# await call_anthropic(messages) + +# mock_error.assert_called_once() +# assert error_message in mock_error.call_args[0][0] + + +# @pytest.mark.asyncio +# async def test_call_anthropic_empty_messages(mock_anthropic_client): +# """Test Anthropic API call with empty messages list.""" +# # arrange: +# messages = [] +# expected_prompt = AI_PROMPT # Should only contain the final AI prompt + +# # act: +# await call_anthropic(messages) + +# # assert: +# mock_anthropic_client.completions.create.assert_called_once() +# actual_prompt = mock_anthropic_client.completions.create.call_args[1]["prompt"] +# assert actual_prompt == expected_prompt + + +# @pytest.mark.asyncio +# async def test_call_anthropic_response_processing(mock_anthropic_client, mock_logger): +# """Test proper processing of API response.""" +# # arrange: +# mock_debug, _ = mock_logger +# messages = [{"role": "user", "content": "Hello"}] +# mock_response = MagicMock() +# mock_response.completion = " Processed response \n" # Add extra whitespace +# mock_anthropic_client.completions.create.return_value = mock_response + +# # act: +# response = await call_anthropic(messages) + +# # assert: +# assert response == "Processed response" # Should be stripped +# mock_debug.assert_any_call("Anthropic response: Processed response") diff --git a/tests/llm/providers/test_oai.py b/tests/llm/providers/test_oai.py new file mode 100644 index 0000000..fb99337 --- /dev/null +++ b/tests/llm/providers/test_oai.py @@ -0,0 +1,187 @@ +# from unittest.mock import AsyncMock, MagicMock, patch +# import pytest +# from loguru import logger +# from openai import AsyncOpenAI +# from openai.types.chat import ChatCompletion, ChatCompletionMessage + +# from src.core.config import settings +# from src.core.exceptions import LLMError +# from src.llm.providers.oai import call_openai + + +# @pytest.fixture +# def mock_logger(monkeypatch): +# """Mock logger for testing.""" +# mock_debug = MagicMock() +# mock_error = MagicMock() +# monkeypatch.setattr(logger, "debug", mock_debug) +# monkeypatch.setattr(logger, "error", mock_error) +# return mock_debug, mock_error + + +# @pytest.fixture +# def mock_settings(monkeypatch): +# """Mock settings for testing.""" +# monkeypatch.setattr(settings, "OPENAI_API_KEY", "test-key") +# monkeypatch.setattr(settings, "OPENAI_MODEL", "gpt-4") +# return settings + + +# def create_mock_completion(content: str) -> ChatCompletion: +# """Helper function to create mock ChatCompletion responses.""" +# message = MagicMock(spec=ChatCompletionMessage) +# message.content = content + +# choice = MagicMock() +# choice.message = message + +# completion = MagicMock(spec=ChatCompletion) +# completion.choices = [choice] + +# return completion + + +# @pytest.fixture +# def mock_openai_client(): +# """Create a mock OpenAI client with chat completions.""" +# with patch("openai.AsyncOpenAI") as mock: +# client = mock.return_value +# chat = MagicMock() +# completions = AsyncMock() +# completions.create = AsyncMock() +# chat.completions = completions +# client.chat = chat + +# # Set up default response +# mock_response = create_mock_completion("Test response") +# completions.create.return_value = mock_response + +# return client + + +# @pytest.mark.asyncio +# async def test_call_openai_basic(mock_openai_client, mock_logger): +# """Test basic OpenAI API call with simple message.""" +# # arrange: +# mock_debug, _ = mock_logger +# messages = [{"role": "user", "content": "Hello"}] + +# # act: +# response = await call_openai(messages) + +# # assert: +# mock_openai_client.chat.completions.create.assert_called_once_with( +# model="gpt-4", +# messages=messages, +# temperature=0.2 +# ) +# assert response == "Test response" +# mock_debug.assert_any_call( +# f"Calling OpenAI with model=gpt-4, temperature=0.2, messages={messages}" +# ) + + +# @pytest.mark.asyncio +# async def test_call_openai_with_system_message(mock_openai_client): +# """Test OpenAI API call with system message.""" +# # arrange: +# messages = [ +# {"role": "system", "content": "You are a helpful assistant"}, +# {"role": "user", "content": "Hello"} +# ] + +# # act: +# await call_openai(messages) + +# # assert: +# mock_openai_client.chat.completions.create.assert_called_once() +# actual_messages = mock_openai_client.chat.completions.create.call_args[1]["messages"] +# assert actual_messages == messages + + +# @pytest.mark.asyncio +# async def test_call_openai_with_conversation(mock_openai_client): +# """Test OpenAI API call with full conversation.""" +# # arrange: +# messages = [ +# {"role": "system", "content": "System prompt"}, +# {"role": "user", "content": "Hello"}, +# {"role": "assistant", "content": "Hi there"}, +# {"role": "user", "content": "How are you?"} +# ] + +# # act: +# await call_openai(messages) + +# # assert: +# mock_openai_client.chat.completions.create.assert_called_once() +# actual_messages = mock_openai_client.chat.completions.create.call_args[1]["messages"] +# assert actual_messages == messages + + +# @pytest.mark.asyncio +# async def test_call_openai_with_custom_params(mock_openai_client): +# """Test OpenAI API call with custom parameters.""" +# # arrange: +# messages = [{"role": "user", "content": "Hello"}] +# custom_params = { +# "model": "gpt-3.5-turbo", +# "temperature": 0.7 +# } + +# # act: +# await call_openai(messages, **custom_params) + +# # assert: +# mock_openai_client.chat.completions.create.assert_called_once() +# call_kwargs = mock_openai_client.chat.completions.create.call_args[1] +# assert call_kwargs["model"] == "gpt-3.5-turbo" +# assert call_kwargs["temperature"] == 0.7 + + +# @pytest.mark.asyncio +# async def test_call_openai_api_error(mock_openai_client, mock_logger): +# """Test error handling for API failures.""" +# # arrange: +# messages = [{"role": "user", "content": "Hello"}] +# error_message = "API Error" +# mock_openai_client.chat.completions.create.side_effect = Exception(error_message) +# _, mock_error = mock_logger + +# # act/assert: +# with pytest.raises(LLMError, match="Error during OpenAI API call"): +# await call_openai(messages) + +# mock_error.assert_called_once() +# assert error_message in mock_error.call_args[0][0] + + +# @pytest.mark.asyncio +# async def test_call_openai_empty_response(mock_openai_client, mock_logger): +# """Test handling of empty response content.""" +# # arrange: +# messages = [{"role": "user", "content": "Hello"}] +# mock_response = create_mock_completion("") # Empty content +# mock_openai_client.chat.completions.create.return_value = mock_response +# _, mock_error = mock_logger + +# # act/assert: +# with pytest.raises(LLMError, match="No content in OpenAI response"): +# await call_openai(messages) + + +# @pytest.mark.asyncio +# async def test_call_openai_response_processing(mock_openai_client, mock_logger): +# """Test proper processing of API response.""" +# # arrange: +# mock_debug, _ = mock_logger +# messages = [{"role": "user", "content": "Hello"}] +# mock_response = create_mock_completion(" Processed response \n") # Add extra whitespace +# mock_openai_client.chat.completions.create.return_value = mock_response + +# # act: +# response = await call_openai(messages) + +# # assert: +# assert response == "Processed response" # Should be stripped +# mock_debug.assert_any_call("OpenAI response: Processed response") diff --git a/tests/llm/test_embeddings.py b/tests/llm/test_embeddings.py index c5fc418..d927434 100644 --- a/tests/llm/test_embeddings.py +++ b/tests/llm/test_embeddings.py @@ -1,8 +1,153 @@ -def test_embedding_generation(): - """Test embedding generation functionality""" - pass # Add embedding generation tests +from unittest.mock import AsyncMock, MagicMock +import numpy as np +import pytest +from loguru import logger +from openai import AsyncOpenAI +from openai.types.create_embedding_response import CreateEmbeddingResponse, Embedding -def test_embedding_similarity(): - """Test embedding similarity calculations""" - pass # Add similarity calculation tests +from src.core.config import settings +from src.llm.embeddings import EmbeddingGenerator + + +@pytest.fixture +def mock_logger(monkeypatch): + """Mock logger for testing.""" + mock_debug = MagicMock() + mock_error = MagicMock() + monkeypatch.setattr(logger, "debug", mock_debug) + monkeypatch.setattr(logger, "error", mock_error) + return mock_debug, mock_error + + +@pytest.fixture +def mock_openai_client(): + """Create a mock OpenAI client.""" + client = AsyncMock(spec=AsyncOpenAI) + # Create embeddings attribute with create method + embeddings = AsyncMock() + embeddings.create = AsyncMock() + client.embeddings = embeddings + return client + + +@pytest.fixture +def embedding_generator(mock_openai_client): + """Create an EmbeddingGenerator instance with mocked client.""" + return EmbeddingGenerator(client=mock_openai_client, model=settings.OPENAI_EMBEDDING_MODEL) + + +def create_mock_embedding_response(embeddings_data): + """Helper function to create mock embedding responses.""" + mock_embeddings = [] + for emb in embeddings_data: + mock_embedding = MagicMock(spec=Embedding) + mock_embedding.embedding = emb + mock_embeddings.append(mock_embedding) + + mock_response = MagicMock(spec=CreateEmbeddingResponse) + mock_response.data = mock_embeddings + return mock_response + + +def test_init_custom_values(mock_openai_client): + """Test EmbeddingGenerator initialization with custom values.""" + custom_model = "custom-embedding-model" + generator = EmbeddingGenerator(client=mock_openai_client, model=custom_model) + assert generator.client == mock_openai_client + assert generator.model == custom_model + + +@pytest.mark.asyncio +async def test_get_embedding_single_text(embedding_generator, mock_logger): + """Test getting embeddings for a single text.""" + # arrange: + mock_debug, _ = mock_logger + text = "test text" + mock_embedding = [0.1, 0.2, 0.3] + embedding_generator.client.embeddings.create.return_value = create_mock_embedding_response( + [mock_embedding] + ) + + # act: + result = await embedding_generator.get_embedding(text) + + # assert: + embedding_generator.client.embeddings.create.assert_called_once_with( + model=embedding_generator.model, input=[text] + ) + mock_debug.assert_called_once_with("Getting embeddings for 1 texts") + assert isinstance(result, np.ndarray) + np.testing.assert_array_equal(result, np.array([mock_embedding])) + + +@pytest.mark.asyncio +async def test_get_embedding_multiple_texts(embedding_generator, mock_logger): + """Test getting embeddings for multiple texts.""" + # arrange: + mock_debug, _ = mock_logger + texts = ["text1", "text2", "text3"] + mock_embeddings = [[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]] + embedding_generator.client.embeddings.create.return_value = create_mock_embedding_response( + mock_embeddings + ) + + # act: + result = await embedding_generator.get_embedding(texts) + + # assert: + embedding_generator.client.embeddings.create.assert_called_once_with( + model=embedding_generator.model, input=texts + ) + mock_debug.assert_called_once_with("Getting embeddings for 3 texts") + assert isinstance(result, np.ndarray) + np.testing.assert_array_equal(result, np.array(mock_embeddings)) + + +@pytest.mark.asyncio +async def test_get_embedding_empty_input(embedding_generator, mock_logger): + """Test error handling for empty input.""" + # arrange: + _, mock_error = mock_logger + + # act/assert: + with pytest.raises(ValueError, match="Input text cannot be empty"): + await embedding_generator.get_embedding("") + + embedding_generator.client.embeddings.create.assert_not_called() + + +@pytest.mark.asyncio +async def test_get_embedding_api_error(embedding_generator, mock_logger): + """Test error handling for API errors.""" + # arrange: + _, mock_error = mock_logger + text = "test text" + error_message = "API Error" + embedding_generator.client.embeddings.create.side_effect = Exception(error_message) + + # act/assert: + with pytest.raises(Exception, match=error_message): + await embedding_generator.get_embedding(text) + + mock_error.assert_called_once() + assert error_message in mock_error.call_args[0][0] + + +@pytest.mark.asyncio +async def test_get_embedding_response_processing(embedding_generator): + """Test proper processing of API response structure.""" + # arrange: + text = "test text" + mock_embedding = [0.1, 0.2, 0.3] + embedding_generator.client.embeddings.create.return_value = create_mock_embedding_response( + [mock_embedding] + ) + + # act: + result = await embedding_generator.get_embedding(text) + + # assert: + assert isinstance(result, np.ndarray) + assert result.shape == (1, len(mock_embedding)) + np.testing.assert_array_equal(result[0], np.array(mock_embedding)) diff --git a/tests/llm/test_llm.py b/tests/llm/test_llm.py new file mode 100644 index 0000000..b489c1b --- /dev/null +++ b/tests/llm/test_llm.py @@ -0,0 +1,156 @@ +from unittest.mock import AsyncMock, MagicMock, patch + +import openai +import pytest +from loguru import logger + +from src.core.config import settings +from src.core.defs import LLMProviderType +from src.core.exceptions import LLMError +from src.llm.llm import LLM, get_oai_client + + +@pytest.fixture +def mock_logger(monkeypatch): + """Mock logger for testing.""" + mock_debug = MagicMock() + mock_error = MagicMock() + monkeypatch.setattr(logger, "debug", mock_debug) + monkeypatch.setattr(logger, "error", mock_error) + return mock_debug, mock_error + + +@pytest.fixture +def mock_settings(monkeypatch): + """Mock settings for testing.""" + monkeypatch.setattr(settings, "LLM_PROVIDER", LLMProviderType.OPENAI) + monkeypatch.setattr(settings, "AGENT_PERSONALITY", "Test Personality") + monkeypatch.setattr(settings, "AGENT_GOAL", "Test Goal") + monkeypatch.setattr(settings, "OPENAI_API_KEY", "test-key") + return settings + + +@pytest.fixture +def llm(mock_settings): + """Create an LLM instance with mocked settings.""" + return LLM() + + +@pytest.mark.parametrize( + "provider", + [ + LLMProviderType.OPENAI, + LLMProviderType.ANTHROPIC, + ], +) +def test_init(mock_settings, mock_logger, provider): + """Test LLM initialization with different providers.""" + # arrange: + mock_debug, _ = mock_logger + mock_settings.LLM_PROVIDER = provider + + # act: + llm = LLM() + + # assert: + assert llm.provider == provider + mock_debug.assert_called_once_with(f"Using LLM provider: {provider}") + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "provider,call_function,expected_response", + [ + (LLMProviderType.OPENAI, "src.llm.llm.call_openai", "OpenAI response"), + (LLMProviderType.ANTHROPIC, "src.llm.llm.call_anthropic", "Anthropic response"), + ], +) +async def test_generate_response(mock_settings, provider, call_function, expected_response): + """Test response generation with different providers.""" + # arrange: + mock_settings.LLM_PROVIDER = provider + messages = [{"role": "user", "content": "Hello"}] + kwargs = {"temperature": 0.7} + + with patch(call_function, AsyncMock(return_value=expected_response)): + llm = LLM() + + # act: + response = await llm.generate_response(messages, **kwargs) + + # assert: + assert response == expected_response + + +@pytest.mark.asyncio +async def test_generate_response_with_system_message(llm): + """Test response generation with automatic system message addition.""" + # arrange: + messages = [{"role": "user", "content": "Hello"}] + expected_system_message = { + "role": "system", + "content": f"{settings.AGENT_PERSONALITY}\n\n{settings.AGENT_GOAL}", + } + + with patch("src.llm.llm.call_openai", AsyncMock()) as mock_call: + # act: + await llm.generate_response(messages) + + # assert: + called_messages = mock_call.call_args[0][0] + assert called_messages[0] == expected_system_message + assert called_messages[1:] == messages + + +@pytest.mark.asyncio +async def test_generate_response_existing_system_message(llm): + """Test response generation when system message already exists.""" + # arrange: + existing_system = {"role": "system", "content": "Existing system message"} + messages = [existing_system, {"role": "user", "content": "Hello"}] + + with patch("src.llm.llm.call_openai", AsyncMock()) as mock_call: + # act: + await llm.generate_response(messages) + + # assert: + called_messages = mock_call.call_args[0][0] + assert called_messages == messages # Messages should remain unchanged + + +@pytest.mark.asyncio +async def test_generate_response_invalid_provider(mock_settings): + """Test error handling for invalid provider.""" + # arrange: + mock_settings.LLM_PROVIDER = "invalid_provider" + llm = LLM() + messages = [{"role": "user", "content": "Hello"}] + + # act/assert: + with pytest.raises(LLMError, match="Unknown LLM provider: invalid_provider"): + await llm.generate_response(messages) + + +def test_get_oai_client(mock_settings): + """Test OpenAI client creation.""" + # act: + client = get_oai_client() + + # assert: + assert isinstance(client, openai.AsyncOpenAI) + assert client.api_key == "test-key" + + +@pytest.mark.asyncio +async def test_generate_response_with_kwargs(llm): + """Test response generation with additional kwargs.""" + # arrange: + messages = [{"role": "user", "content": "Hello"}] + kwargs = {"temperature": 0.7, "max_tokens": 100, "model": "gpt-4"} + + with patch("src.llm.llm.call_openai", AsyncMock()) as mock_call: + # act: + await llm.generate_response(messages, **kwargs) + + # assert: + mock_call.assert_called_once_with(mock_call.call_args[0][0], **kwargs) diff --git a/tests/memory/backends/__init__.py b/tests/memory/backends/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/memory/backends/test_chroma.py b/tests/memory/backends/test_chroma.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/memory/backends/test_qdrant.py b/tests/memory/backends/test_qdrant.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/memory/test_memory_module.py b/tests/memory/test_memory_module.py index 909ed8b..f292e1d 100644 --- a/tests/memory/test_memory_module.py +++ b/tests/memory/test_memory_module.py @@ -1,13 +1,171 @@ -def test_memory_storage(): - """Test memory storage functionality""" - pass # Add memory storage tests +# from unittest.mock import AsyncMock, MagicMock, patch +# import pytest +# from loguru import logger +# from openai import AsyncOpenAI +# from src.core.defs import MemoryBackendType +# from src.memory.memory_module import MemoryModule, get_memory_module +# from src.memory.backends.qdrant import QdrantBackend +# from src.memory.backends.chroma import ChromaBackend -def test_memory_retrieval(): - """Test memory retrieval functionality""" - pass # Add memory retrieval tests +# @pytest.fixture +# def mock_logger(monkeypatch): +# """Mock logger for testing.""" +# mock_debug = MagicMock() +# mock_error = MagicMock() +# monkeypatch.setattr(logger, "debug", mock_debug) +# monkeypatch.setattr(logger, "error", mock_error) +# return mock_debug, mock_error -def test_memory_update(): - """Test memory update functionality""" - pass # Add memory update tests + +# @pytest.fixture +# def mock_openai_client(): +# """Create a mock OpenAI client.""" +# return AsyncMock(spec=AsyncOpenAI) + + +# @pytest.fixture +# def mock_embedding_generator(): +# """Create a mock EmbeddingGenerator.""" +# with patch("src.memory.memory_module.EmbeddingGenerator") as mock: +# generator = mock.return_value +# generator.get_embedding = AsyncMock(return_value=[[0.1, 0.2, 0.3]]) +# yield generator + + +# @pytest.fixture +# def mock_qdrant_backend(): +# """Create a mock Qdrant backend.""" +# with patch("src.memory.memory_module.QdrantBackend") as mock: +# backend = mock.return_value +# backend.store = AsyncMock() +# backend.search = AsyncMock(return_value=[ +# {"event": "test event", "action": "test action", "outcome": "test outcome"} +# ]) +# yield backend + + +# @pytest.fixture +# def mock_chroma_backend(): +# """Create a mock Chroma backend.""" +# with patch("src.memory.memory_module.ChromaBackend") as mock: +# backend = mock.return_value +# backend.store = AsyncMock() +# backend.search = AsyncMock(return_value=[ +# {"event": "test event", "action": "test action", "outcome": "test outcome"} +# ]) +# yield backend + + +# @pytest.mark.parametrize("backend_type,backend_class", [ +# (MemoryBackendType.QDRANT, QdrantBackend), +# (MemoryBackendType.CHROMA, ChromaBackend), +# ]) +# def test_memory_module_init(mock_openai_client, backend_type, backend_class): +# """Test MemoryModule initialization with different backends.""" +# # arrange/act: +# with patch(f"src.memory.memory_module.{backend_class.__name__}") as mock_backend: +# module = MemoryModule( +# openai_client=mock_openai_client, +# backend_type=backend_type, +# collection_name="test_collection", +# host="localhost", +# port=6333, +# vector_size=1536, +# persist_directory="./persist" +# ) + +# # assert: +# assert isinstance(module.backend, mock_backend.return_value.__class__) +# if backend_type == MemoryBackendType.QDRANT: +# mock_backend.assert_called_once_with( +# collection_name="test_collection", +# host="localhost", +# port=6333, +# vector_size=1536 +# ) +# else: +# mock_backend.assert_called_once_with( +# collection_name="test_collection", +# persist_directory="./persist" +# ) + + +# def test_memory_module_init_invalid_backend(): +# """Test MemoryModule initialization with invalid backend type.""" +# with pytest.raises(ValueError, match="Unsupported backend type: invalid"): +# MemoryModule(backend_type="invalid") + + +# @pytest.mark.asyncio +# @pytest.mark.parametrize("backend_fixture", ["mock_qdrant_backend", "mock_chroma_backend"]) +# async def test_memory_storage(mock_embedding_generator, request, backend_fixture): +# """Test memory storage functionality with different backends.""" +# # arrange: +# backend = request.getfixturevalue(backend_fixture) +# module = MemoryModule(backend_type="qdrant") # Type doesn't matter as backend is mocked + +# # act: +# await module.store( +# event="test event", +# action="test action", +# outcome="test outcome", +# metadata={"key": "value"} +# ) + +# # assert: +# mock_embedding_generator.get_embedding.assert_called_once_with( +# "test event test action test outcome" +# ) +# backend.store.assert_called_once_with( +# event="test event", +# action="test action", +# outcome="test outcome", +# embedding=[0.1, 0.2, 0.3], +# metadata={"key": "value"} +# ) + + +# @pytest.mark.asyncio +# @pytest.mark.parametrize("backend_fixture", ["mock_qdrant_backend", "mock_chroma_backend"]) +# async def test_memory_search(mock_embedding_generator, request, backend_fixture): +# """Test memory search functionality with different backends.""" +# # arrange: +# backend = request.getfixturevalue(backend_fixture) +# module = MemoryModule(backend_type="qdrant") # Type doesn't matter as backend is mocked + +# # act: +# results = await module.search("test query", top_k=3) + +# # assert: +# mock_embedding_generator.get_embedding.assert_called_once_with("test query") +# backend.search.assert_called_once_with( +# query_vector=[0.1, 0.2, 0.3], +# top_k=3 +# ) +# assert len(results) == 1 +# assert results[0]["event"] == "test event" +# assert results[0]["action"] == "test action" +# assert results[0]["outcome"] == "test outcome" + + +# @pytest.mark.parametrize("backend_type", [ +# MemoryBackendType.QDRANT, +# MemoryBackendType.CHROMA +# ]) +# def test_get_memory_module(mock_openai_client, backend_type): +# """Test get_memory_module factory function.""" +# # arrange/act: +# with patch(f"src.memory.memory_module.MemoryModule") as mock_module: +# module = get_memory_module( +# openai_client=mock_openai_client, +# backend_type=backend_type +# ) + +# # assert: +# mock_module.assert_called_once_with( +# openai_client=mock_openai_client, +# backend_type=backend_type +# ) +# assert module == mock_module.return_value diff --git a/tests/test_agent.py b/tests/test_agent.py index 5f4d53c..9be3b7a 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -28,6 +28,7 @@ def agent(): ): agent = Agent() # Make store method a coroutine + agent.memory_module = MagicMock() agent.memory_module.store = AsyncMock() # Make search method a coroutine agent.memory_module.search = AsyncMock() diff --git a/tests/test_agent_runtime.py b/tests/test_agent_runtime.py index 339dc82..8f791c0 100644 --- a/tests/test_agent_runtime.py +++ b/tests/test_agent_runtime.py @@ -32,12 +32,15 @@ def runtime_agent(): ): agent = Agent() # Mock memory module methods + agent.memory_module = MagicMock() agent.memory_module.store = AsyncMock() agent.memory_module.search = AsyncMock() # Mock planning module methods + agent.planning_module = MagicMock() agent.planning_module.get_action = MagicMock(return_value=AgentAction.IDLE) agent.planning_module.update_q_table = MagicMock() # Mock feedback module methods + agent.feedback_module = MagicMock() agent.feedback_module.collect_feedback = MagicMock(return_value=1.0) return agent diff --git a/tests/tools/test_get_signal.py b/tests/tools/test_get_signal.py index ce3e0c5..21b3019 100644 --- a/tests/tools/test_get_signal.py +++ b/tests/tools/test_get_signal.py @@ -1,3 +1,4 @@ +from typing import Any, Dict from unittest.mock import AsyncMock, MagicMock, patch import pytest @@ -57,9 +58,10 @@ async def test_get_coinstats_news_http_error(mock_tool_logger, mock_httpx_client """Test error handling when Coinstats API returns an error.""" # arrange: mock_debug, mock_error = mock_tool_logger - mock_response = Response(500) + mock_response = MagicMock(spec=Response) + mock_response.status_code = 500 + mock_response.raise_for_status.side_effect = Exception("HTTP Error") mock_httpx_client.get.return_value = mock_response - mock_response.raise_for_status = MagicMock(side_effect=Exception("HTTP Error")) with ( patch("httpx.AsyncClient", return_value=mock_httpx_client), @@ -97,9 +99,10 @@ async def test_fetch_signal_no_news(mock_tool_logger): """Test signal fetching when no news is available.""" # arrange: mock_debug, mock_error = mock_tool_logger - news_data = {"result": []} + news_data: Dict[str, Any] = {"result": []} + mock_get_news = AsyncMock(return_value=news_data) - with patch("src.tools.get_signal.get_coinstats_news", return_value=news_data): + with patch("src.tools.get_signal.get_coinstats_news", new=mock_get_news): # act: result = await fetch_signal() From 48873003e6b38b17fbc4042ae4e35ed28a4eb762 Mon Sep 17 00:00:00 2001 From: Dzmitry Hramyka Date: Sun, 29 Dec 2024 00:55:21 +0100 Subject: [PATCH 4/6] llm providers tests --- coverage.lcov | 80 ++++---- tests/llm/providers/test_anthropic.py | 199 ++++---------------- tests/llm/providers/test_oai.py | 257 +++++++------------------- 3 files changed, 146 insertions(+), 390 deletions(-) diff --git a/coverage.lcov b/coverage.lcov index cf8265d..29df9c7 100644 --- a/coverage.lcov +++ b/coverage.lcov @@ -319,32 +319,32 @@ DA:4,1 DA:6,1 DA:7,1 DA:10,1 -DA:22,0 -DA:24,0 -DA:25,0 -DA:28,0 -DA:29,0 -DA:30,0 -DA:31,0 -DA:32,0 -DA:33,0 -DA:35,0 -DA:36,0 -DA:38,0 -DA:40,0 -DA:41,0 -DA:47,0 +DA:22,1 +DA:24,1 +DA:25,1 +DA:28,1 +DA:29,1 +DA:30,1 +DA:31,1 +DA:32,1 +DA:33,1 +DA:35,1 +DA:36,1 +DA:38,1 +DA:40,1 +DA:41,1 +DA:47,1 DA:48,0 DA:49,0 -DA:50,0 -DA:51,0 -DA:52,0 +DA:50,1 +DA:51,1 +DA:52,1 LF:26 -LH:6 +LH:24 FN:10,52,call_anthropic -FNDA:0,call_anthropic +FNDA:1,call_anthropic FNF:1 -FNH:0 +FNH:1 end_of_record SF:src/llm/providers/oai.py DA:1,1 @@ -353,26 +353,26 @@ DA:4,1 DA:6,1 DA:7,1 DA:10,1 -DA:22,0 -DA:24,0 -DA:25,0 -DA:27,0 -DA:31,0 -DA:32,0 -DA:37,0 -DA:38,0 -DA:40,0 -DA:41,0 -DA:42,0 -DA:43,0 -DA:44,0 -DA:45,0 +DA:22,1 +DA:24,1 +DA:25,1 +DA:27,1 +DA:31,1 +DA:32,1 +DA:37,1 +DA:38,1 +DA:40,1 +DA:41,1 +DA:42,1 +DA:43,1 +DA:44,1 +DA:45,1 LF:20 -LH:6 +LH:20 FN:10,45,call_openai -FNDA:0,call_openai +FNDA:1,call_openai FNF:1 -FNH:0 +FNH:1 end_of_record SF:src/main.py DA:1,1 @@ -616,7 +616,7 @@ DA:105,1 DA:106,1 DA:107,1 DA:110,1 -DA:112,1 +DA:112,0 DA:115,1 DA:116,1 DA:117,1 @@ -635,7 +635,7 @@ DA:145,1 DA:148,1 DA:151,1 LF:57 -LH:53 +LH:52 FN:14,63,PlanningModule.__init__ FNDA:1,PlanningModule.__init__ FN:65,80,PlanningModule._load_q_table diff --git a/tests/llm/providers/test_anthropic.py b/tests/llm/providers/test_anthropic.py index cc6f730..da81144 100644 --- a/tests/llm/providers/test_anthropic.py +++ b/tests/llm/providers/test_anthropic.py @@ -1,178 +1,51 @@ -# from unittest.mock import AsyncMock, MagicMock, patch -# import pytest -# from anthropic import AI_PROMPT, HUMAN_PROMPT, Anthropic -# from loguru import logger +from unittest.mock import MagicMock, patch -# from src.core.config import settings -# from src.core.exceptions import LLMError -# from src.llm.providers.anthropic import call_anthropic +import pytest +from src.core.exceptions import LLMError +from src.llm.providers.anthropic import call_anthropic -# @pytest.fixture -# def mock_logger(monkeypatch): -# """Mock logger for testing.""" -# mock_debug = MagicMock() -# mock_error = MagicMock() -# monkeypatch.setattr(logger, "debug", mock_debug) -# monkeypatch.setattr(logger, "error", mock_error) -# return mock_debug, mock_error +@pytest.mark.asyncio +async def test_call_anthropic_success(): + """Test a successful call to the Anthropic Claude API.""" + mock_response = MagicMock() + mock_response.completion = "This is a mock response from Anthropic." -# @pytest.fixture -# def mock_settings(monkeypatch): -# """Mock settings for testing.""" -# monkeypatch.setattr(settings, "ANTHROPIC_API_KEY", "test-key") -# monkeypatch.setattr(settings, "ANTHROPIC_MODEL", "claude-2") -# return settings + # Mock the Anthropic client and its `completions.create` method + mock_client = MagicMock() + mock_client.completions.create.return_value = mock_response + with patch("src.llm.providers.anthropic.Anthropic", return_value=mock_client): + messages = [{"role": "user", "content": "Test message"}] + result = await call_anthropic(messages, model="claude-2", temperature=0.5) -# @pytest.fixture -# def mock_anthropic_client(): -# """Create a mock Anthropic client.""" -# with patch("src.llm.providers.anthropic.Anthropic") as mock: -# client = mock.return_value -# client.completions.create = AsyncMock() -# mock_response = MagicMock() -# mock_response.completion = "Test response" -# client.completions.create.return_value = mock_response -# return client + assert result == "This is a mock response from Anthropic." + mock_client.completions.create.assert_called_once() -# @pytest.mark.asyncio -# async def test_call_anthropic_basic(mock_anthropic_client, mock_logger): -# """Test basic Anthropic API call with simple message.""" -# # arrange: -# messages = [{"role": "user", "content": "Hello"}] -# expected_prompt = f"{HUMAN_PROMPT} Hello\n\n{AI_PROMPT}" +@pytest.mark.asyncio +async def test_call_anthropic_no_content(): + """Test when Anthropic returns no content in the response.""" + mock_response = MagicMock() + mock_response.completion = "" -# # act: -# response = await call_anthropic(messages) + mock_client = MagicMock() + mock_client.completions.create.return_value = mock_response -# # assert: -# mock_anthropic_client.completions.create.assert_called_once_with( -# prompt=expected_prompt, -# model="claude-2", -# temperature=0.7, -# max_tokens_to_sample=1024 -# ) -# assert response == "Test response" + with patch("src.llm.providers.anthropic.Anthropic", return_value=mock_client): + messages = [{"role": "user", "content": "Test message"}] + await call_anthropic(messages) -# @pytest.mark.asyncio -# async def test_call_anthropic_with_system_message(mock_anthropic_client): -# """Test Anthropic API call with system message.""" -# # arrange: -# messages = [ -# {"role": "system", "content": "You are a helpful assistant"}, -# {"role": "user", "content": "Hello"} -# ] -# expected_prompt = ( -# f"(System) You are a helpful assistant\n\n" -# f"{HUMAN_PROMPT} Hello\n\n{AI_PROMPT}" -# ) +@pytest.mark.asyncio +async def test_call_anthropic_exception(): + """Test when Anthropic raises an exception.""" + mock_client = MagicMock() + mock_client.completions.create.side_effect = Exception("API call failed") -# # act: -# await call_anthropic(messages) + with patch("src.llm.providers.anthropic.Anthropic", return_value=mock_client): + messages = [{"role": "user", "content": "Test message"}] -# # assert: -# mock_anthropic_client.completions.create.assert_called_once() -# actual_prompt = mock_anthropic_client.completions.create.call_args[1]["prompt"] -# assert actual_prompt == expected_prompt - - -# @pytest.mark.asyncio -# async def test_call_anthropic_with_conversation(mock_anthropic_client): -# """Test Anthropic API call with full conversation.""" -# # arrange: -# messages = [ -# {"role": "system", "content": "System prompt"}, -# {"role": "user", "content": "Hello"}, -# {"role": "assistant", "content": "Hi there"}, -# {"role": "user", "content": "How are you?"} -# ] -# expected_prompt = ( -# f"(System) System prompt\n\n" -# f"{HUMAN_PROMPT} Hello\n\n" -# f"{AI_PROMPT} Hi there\n\n" -# f"{HUMAN_PROMPT} How are you?\n\n" -# f"{AI_PROMPT}" -# ) - -# # act: -# await call_anthropic(messages) - -# # assert: -# mock_anthropic_client.completions.create.assert_called_once() -# actual_prompt = mock_anthropic_client.completions.create.call_args[1]["prompt"] -# assert actual_prompt == expected_prompt - - -# @pytest.mark.asyncio -# async def test_call_anthropic_with_custom_params(mock_anthropic_client): -# """Test Anthropic API call with custom parameters.""" -# # arrange: -# messages = [{"role": "user", "content": "Hello"}] -# custom_params = { -# "model": "claude-instant-1", -# "temperature": 0.5 -# } - -# # act: -# await call_anthropic(messages, **custom_params) - -# # assert: -# mock_anthropic_client.completions.create.assert_called_once() -# call_kwargs = mock_anthropic_client.completions.create.call_args[1] -# assert call_kwargs["model"] == "claude-instant-1" -# assert call_kwargs["temperature"] == 0.5 - - -# @pytest.mark.asyncio -# async def test_call_anthropic_api_error(mock_anthropic_client, mock_logger): -# """Test error handling for API failures.""" -# # arrange: -# messages = [{"role": "user", "content": "Hello"}] -# error_message = "API Error" -# mock_anthropic_client.completions.create.side_effect = Exception(error_message) -# _, mock_error = mock_logger - -# # act/assert: -# with pytest.raises(LLMError, match="Error during Anthropic API call"): -# await call_anthropic(messages) - -# mock_error.assert_called_once() -# assert error_message in mock_error.call_args[0][0] - - -# @pytest.mark.asyncio -# async def test_call_anthropic_empty_messages(mock_anthropic_client): -# """Test Anthropic API call with empty messages list.""" -# # arrange: -# messages = [] -# expected_prompt = AI_PROMPT # Should only contain the final AI prompt - -# # act: -# await call_anthropic(messages) - -# # assert: -# mock_anthropic_client.completions.create.assert_called_once() -# actual_prompt = mock_anthropic_client.completions.create.call_args[1]["prompt"] -# assert actual_prompt == expected_prompt - - -# @pytest.mark.asyncio -# async def test_call_anthropic_response_processing(mock_anthropic_client, mock_logger): -# """Test proper processing of API response.""" -# # arrange: -# mock_debug, _ = mock_logger -# messages = [{"role": "user", "content": "Hello"}] -# mock_response = MagicMock() -# mock_response.completion = " Processed response \n" # Add extra whitespace -# mock_anthropic_client.completions.create.return_value = mock_response - -# # act: -# response = await call_anthropic(messages) - -# # assert: -# assert response == "Processed response" # Should be stripped -# mock_debug.assert_any_call("Anthropic response: Processed response") + with pytest.raises(LLMError, match="Error during Anthropic API call"): + await call_anthropic(messages) diff --git a/tests/llm/providers/test_oai.py b/tests/llm/providers/test_oai.py index fb99337..1270b47 100644 --- a/tests/llm/providers/test_oai.py +++ b/tests/llm/providers/test_oai.py @@ -1,187 +1,70 @@ -# from unittest.mock import AsyncMock, MagicMock, patch -# import pytest -# from loguru import logger -# from openai import AsyncOpenAI -# from openai.types.chat import ChatCompletion, ChatCompletionMessage - -# from src.core.config import settings -# from src.core.exceptions import LLMError -# from src.llm.providers.oai import call_openai - - -# @pytest.fixture -# def mock_logger(monkeypatch): -# """Mock logger for testing.""" -# mock_debug = MagicMock() -# mock_error = MagicMock() -# monkeypatch.setattr(logger, "debug", mock_debug) -# monkeypatch.setattr(logger, "error", mock_error) -# return mock_debug, mock_error - - -# @pytest.fixture -# def mock_settings(monkeypatch): -# """Mock settings for testing.""" -# monkeypatch.setattr(settings, "OPENAI_API_KEY", "test-key") -# monkeypatch.setattr(settings, "OPENAI_MODEL", "gpt-4") -# return settings - - -# def create_mock_completion(content: str) -> ChatCompletion: -# """Helper function to create mock ChatCompletion responses.""" -# message = MagicMock(spec=ChatCompletionMessage) -# message.content = content - -# choice = MagicMock() -# choice.message = message - -# completion = MagicMock(spec=ChatCompletion) -# completion.choices = [choice] - -# return completion - - -# @pytest.fixture -# def mock_openai_client(): -# """Create a mock OpenAI client with chat completions.""" -# with patch("openai.AsyncOpenAI") as mock: -# client = mock.return_value -# chat = MagicMock() -# completions = AsyncMock() -# completions.create = AsyncMock() -# chat.completions = completions -# client.chat = chat - -# # Set up default response -# mock_response = create_mock_completion("Test response") -# completions.create.return_value = mock_response - -# return client - - -# @pytest.mark.asyncio -# async def test_call_openai_basic(mock_openai_client, mock_logger): -# """Test basic OpenAI API call with simple message.""" -# # arrange: -# mock_debug, _ = mock_logger -# messages = [{"role": "user", "content": "Hello"}] - -# # act: -# response = await call_openai(messages) - -# # assert: -# mock_openai_client.chat.completions.create.assert_called_once_with( -# model="gpt-4", -# messages=messages, -# temperature=0.2 -# ) -# assert response == "Test response" -# mock_debug.assert_any_call( -# f"Calling OpenAI with model=gpt-4, temperature=0.2, messages={messages}" -# ) - - -# @pytest.mark.asyncio -# async def test_call_openai_with_system_message(mock_openai_client): -# """Test OpenAI API call with system message.""" -# # arrange: -# messages = [ -# {"role": "system", "content": "You are a helpful assistant"}, -# {"role": "user", "content": "Hello"} -# ] - -# # act: -# await call_openai(messages) - -# # assert: -# mock_openai_client.chat.completions.create.assert_called_once() -# actual_messages = mock_openai_client.chat.completions.create.call_args[1]["messages"] -# assert actual_messages == messages - - -# @pytest.mark.asyncio -# async def test_call_openai_with_conversation(mock_openai_client): -# """Test OpenAI API call with full conversation.""" -# # arrange: -# messages = [ -# {"role": "system", "content": "System prompt"}, -# {"role": "user", "content": "Hello"}, -# {"role": "assistant", "content": "Hi there"}, -# {"role": "user", "content": "How are you?"} -# ] - -# # act: -# await call_openai(messages) - -# # assert: -# mock_openai_client.chat.completions.create.assert_called_once() -# actual_messages = mock_openai_client.chat.completions.create.call_args[1]["messages"] -# assert actual_messages == messages - - -# @pytest.mark.asyncio -# async def test_call_openai_with_custom_params(mock_openai_client): -# """Test OpenAI API call with custom parameters.""" -# # arrange: -# messages = [{"role": "user", "content": "Hello"}] -# custom_params = { -# "model": "gpt-3.5-turbo", -# "temperature": 0.7 -# } - -# # act: -# await call_openai(messages, **custom_params) - -# # assert: -# mock_openai_client.chat.completions.create.assert_called_once() -# call_kwargs = mock_openai_client.chat.completions.create.call_args[1] -# assert call_kwargs["model"] == "gpt-3.5-turbo" -# assert call_kwargs["temperature"] == 0.7 - - -# @pytest.mark.asyncio -# async def test_call_openai_api_error(mock_openai_client, mock_logger): -# """Test error handling for API failures.""" -# # arrange: -# messages = [{"role": "user", "content": "Hello"}] -# error_message = "API Error" -# mock_openai_client.chat.completions.create.side_effect = Exception(error_message) -# _, mock_error = mock_logger - -# # act/assert: -# with pytest.raises(LLMError, match="Error during OpenAI API call"): -# await call_openai(messages) - -# mock_error.assert_called_once() -# assert error_message in mock_error.call_args[0][0] - - -# @pytest.mark.asyncio -# async def test_call_openai_empty_response(mock_openai_client, mock_logger): -# """Test handling of empty response content.""" -# # arrange: -# messages = [{"role": "user", "content": "Hello"}] -# mock_response = create_mock_completion("") # Empty content -# mock_openai_client.chat.completions.create.return_value = mock_response -# _, mock_error = mock_logger - -# # act/assert: -# with pytest.raises(LLMError, match="No content in OpenAI response"): -# await call_openai(messages) - - -# @pytest.mark.asyncio -# async def test_call_openai_response_processing(mock_openai_client, mock_logger): -# """Test proper processing of API response.""" -# # arrange: -# mock_debug, _ = mock_logger -# messages = [{"role": "user", "content": "Hello"}] -# mock_response = create_mock_completion(" Processed response \n") # Add extra whitespace -# mock_openai_client.chat.completions.create.return_value = mock_response - -# # act: -# response = await call_openai(messages) - -# # assert: -# assert response == "Processed response" # Should be stripped -# mock_debug.assert_any_call("OpenAI response: Processed response") +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + +from src.core.exceptions import LLMError +from src.llm.providers.oai import call_openai + + +@pytest.mark.asyncio +async def test_call_openai_success(): + """Test a successful call to OpenAI.""" + mock_message = MagicMock() + mock_message.content = "This is a mock response from OpenAI." + + mock_choice = MagicMock() + mock_choice.message = mock_message + + mock_response = MagicMock() + mock_response.choices = [mock_choice] + + # Mock the AsyncOpenAI client and its `chat.completions.create` method + mock_client = AsyncMock() + mock_client.chat.completions.create.return_value = mock_response + + # Patch the AsyncOpenAI client constructor to return the mock client + with patch("src.llm.providers.oai.openai.AsyncOpenAI", return_value=mock_client): + messages = [{"role": "user", "content": "Test message"}] + result = await call_openai(messages, model="gpt-4", temperature=0.7) + + assert result == "This is a mock response from OpenAI." + mock_client.chat.completions.create.assert_called_once_with( + model="gpt-4", + messages=messages, + temperature=0.7, + ) + + +@pytest.mark.asyncio +async def test_call_openai_no_content(): + """Test when OpenAI returns no content in the response.""" + mock_message = MagicMock() + mock_message.content = "" + + mock_choice = MagicMock() + mock_choice.message = mock_message + + mock_response = MagicMock() + mock_response.choices = [mock_choice] + + mock_client = AsyncMock() + mock_client.chat.completions.create.return_value = mock_response + + with patch("src.llm.providers.oai.openai.AsyncOpenAI", return_value=mock_client): + messages = [{"role": "user", "content": "Test message"}] + + with pytest.raises(LLMError): + await call_openai(messages) + + +@pytest.mark.asyncio +async def test_call_openai_exception(): + """Test when OpenAI raises an exception.""" + mock_client = AsyncMock() + mock_client.chat.completions.create.side_effect = Exception("API call failed") + + with patch("src.llm.providers.oai.openai.AsyncOpenAI", return_value=mock_client): + messages = [{"role": "user", "content": "Test message"}] + + with pytest.raises(LLMError, match="Error during OpenAI API call"): + await call_openai(messages) From 09b00b1e92952a995b5ca82f9915c5471c482326 Mon Sep 17 00:00:00 2001 From: Dzmitry Hramyka Date: Sun, 29 Dec 2024 01:16:20 +0100 Subject: [PATCH 5/6] memory tests --- coverage.lcov | 148 ++++++------- tests/memory/backends/test_chroma.py | 119 ++++++++++ tests/memory/backends/test_qdrant.py | 108 +++++++++ tests/memory/test_memory_module.py | 316 ++++++++++++--------------- 4 files changed, 446 insertions(+), 245 deletions(-) diff --git a/coverage.lcov b/coverage.lcov index 29df9c7..d7edf84 100644 --- a/coverage.lcov +++ b/coverage.lcov @@ -325,17 +325,17 @@ DA:25,1 DA:28,1 DA:29,1 DA:30,1 -DA:31,1 +DA:31,0 DA:32,1 DA:33,1 -DA:35,1 +DA:35,0 DA:36,1 DA:38,1 DA:40,1 DA:41,1 DA:47,1 -DA:48,0 -DA:49,0 +DA:48,1 +DA:49,1 DA:50,1 DA:51,1 DA:52,1 @@ -437,35 +437,35 @@ DA:53,0 DA:54,0 DA:55,0 DA:57,1 -DA:75,0 -DA:76,0 -DA:78,0 -DA:79,0 -DA:80,0 -DA:84,0 -DA:86,0 -DA:87,0 -DA:93,0 -DA:94,0 -DA:95,0 -DA:96,0 +DA:75,1 +DA:76,1 +DA:78,1 +DA:79,1 +DA:80,1 +DA:84,1 +DA:86,1 +DA:87,1 +DA:93,1 +DA:94,1 +DA:95,1 +DA:96,1 DA:98,1 -DA:109,0 -DA:111,0 -DA:112,0 -DA:115,0 -DA:116,0 -DA:117,0 -DA:118,0 -DA:119,0 -DA:120,0 -DA:121,0 -DA:123,0 -DA:124,0 -DA:125,0 -DA:126,0 +DA:109,1 +DA:111,1 +DA:112,1 +DA:115,1 +DA:116,1 +DA:117,1 +DA:118,1 +DA:119,1 +DA:120,1 +DA:121,1 +DA:123,1 +DA:124,1 +DA:125,1 +DA:126,1 LF:52 -LH:21 +LH:47 FN:17,26,MemoryBackend.store FNDA:0,MemoryBackend.store FN:29,31,MemoryBackend.search @@ -473,11 +473,11 @@ FNDA:0,MemoryBackend.search FN:37,55,ChromaBackend.__init__ FNDA:1,ChromaBackend.__init__ FN:57,96,ChromaBackend.store -FNDA:0,ChromaBackend.store +FNDA:1,ChromaBackend.store FN:98,126,ChromaBackend.search -FNDA:0,ChromaBackend.search +FNDA:1,ChromaBackend.search FNF:5 -FNH:1 +FNH:3 end_of_record SF:src/memory/backends/qdrant.py DA:1,1 @@ -491,44 +491,44 @@ DA:10,1 DA:11,1 DA:14,1 DA:17,1 -DA:25,0 -DA:26,0 -DA:27,0 -DA:30,0 -DA:31,0 -DA:32,0 +DA:25,1 +DA:26,1 +DA:27,1 +DA:30,1 +DA:31,1 +DA:32,1 DA:33,0 DA:34,0 DA:35,0 DA:42,1 -DA:60,0 -DA:61,0 -DA:63,0 -DA:69,0 -DA:70,0 -DA:72,0 -DA:73,0 -DA:83,0 -DA:84,0 -DA:85,0 -DA:86,0 +DA:60,1 +DA:61,1 +DA:63,1 +DA:69,1 +DA:70,1 +DA:72,1 +DA:73,1 +DA:83,1 +DA:84,1 +DA:85,1 +DA:86,1 DA:88,1 -DA:99,0 -DA:100,0 -DA:105,0 +DA:99,1 +DA:100,1 +DA:105,1 DA:106,0 DA:107,0 DA:108,0 LF:39 -LH:13 +LH:33 FN:17,40,QdrantBackend.__init__ -FNDA:0,QdrantBackend.__init__ +FNDA:1,QdrantBackend.__init__ FN:42,86,QdrantBackend.store -FNDA:0,QdrantBackend.store +FNDA:1,QdrantBackend.store FN:88,108,QdrantBackend.search -FNDA:0,QdrantBackend.search +FNDA:1,QdrantBackend.search FNF:3 -FNH:0 +FNH:3 end_of_record SF:src/memory/memory_module.py DA:1,1 @@ -545,34 +545,34 @@ DA:15,1 DA:38,1 DA:41,1 DA:42,1 -DA:43,0 +DA:43,1 DA:49,1 DA:50,1 -DA:55,0 +DA:55,1 DA:56,1 DA:58,1 -DA:70,0 -DA:71,0 -DA:72,0 -DA:73,0 +DA:70,1 +DA:71,1 +DA:72,1 +DA:73,1 DA:81,1 -DA:92,0 -DA:93,0 -DA:94,0 +DA:92,1 +DA:93,1 +DA:94,1 DA:100,1 DA:105,1 LF:30 -LH:21 +LH:30 FN:15,56,MemoryModule.__init__ FNDA:1,MemoryModule.__init__ FN:58,79,MemoryModule.store -FNDA:0,MemoryModule.store +FNDA:1,MemoryModule.store FN:81,97,MemoryModule.search -FNDA:0,MemoryModule.search +FNDA:1,MemoryModule.search FN:100,105,get_memory_module FNDA:1,get_memory_module FNF:4 -FNH:2 +FNH:4 end_of_record SF:src/planning/__init__.py end_of_record @@ -616,7 +616,7 @@ DA:105,1 DA:106,1 DA:107,1 DA:110,1 -DA:112,0 +DA:112,1 DA:115,1 DA:116,1 DA:117,1 @@ -635,7 +635,7 @@ DA:145,1 DA:148,1 DA:151,1 LF:57 -LH:52 +LH:53 FN:14,63,PlanningModule.__init__ FNDA:1,PlanningModule.__init__ FN:65,80,PlanningModule._load_q_table diff --git a/tests/memory/backends/test_chroma.py b/tests/memory/backends/test_chroma.py index e69de29..9c9b840 100644 --- a/tests/memory/backends/test_chroma.py +++ b/tests/memory/backends/test_chroma.py @@ -0,0 +1,119 @@ +import uuid +from unittest.mock import MagicMock, patch + +import pytest + +from src.memory.backends.chroma import ChromaBackend + + +@pytest.fixture +def mock_chroma_client(): + """Mock the ChromaDB client.""" + client = MagicMock() + return client + + +@pytest.fixture +def mock_chroma_collection(): + """Mock the ChromaDB collection.""" + collection = MagicMock() + return collection + + +@pytest.fixture +def mock_chroma_backend(mock_chroma_client, mock_chroma_collection): + """Create a ChromaBackend instance with mocked client and collection.""" + with patch("src.memory.backends.chroma.chromadb.Client", return_value=mock_chroma_client): + mock_chroma_client.get_or_create_collection.return_value = mock_chroma_collection + backend = ChromaBackend( + collection_name="test_collection", + persist_directory="/mock/directory", + ) + backend.collection = mock_chroma_collection # Explicitly set the mocked collection + return backend + + +def test_chroma_backend_init(mock_chroma_client, mock_chroma_collection): + """Test ChromaBackend initialization with mocked client and collection.""" + with patch("src.memory.backends.chroma.chromadb.Client", return_value=mock_chroma_client): + mock_chroma_client.get_or_create_collection.return_value = mock_chroma_collection + backend = ChromaBackend( + collection_name="test_collection", + persist_directory="/mock/directory", + ) + + mock_chroma_client.get_or_create_collection.assert_called_once_with( + name="test_collection", metadata={"hnsw:space": "cosine"} + ) + assert backend.collection == mock_chroma_collection + + +@pytest.mark.asyncio +async def test_store_memory(mock_chroma_backend, mock_chroma_collection): + """Test storing a memory in ChromaDB with mocked collection.""" + event = "Test Event" + action = "Test Action" + outcome = "Test Outcome" + embedding = [0.1, 0.2, 0.3] + metadata = {"key": "value"} + + with patch("uuid.uuid4", return_value=uuid.UUID("12345678123456781234567812345678")): + await mock_chroma_backend.store(event, action, outcome, embedding, metadata) + + mock_chroma_collection.add.assert_called_once() + + +@pytest.mark.asyncio +async def test_store_memory_error(mock_chroma_backend, mock_chroma_collection): + """Test handling errors when storing a memory in ChromaDB.""" + mock_chroma_collection.add.side_effect = Exception("Add failed") + + event = "Test Event" + action = "Test Action" + outcome = "Test Outcome" + embedding = [0.1, 0.2, 0.3] + + with pytest.raises(Exception, match="Add failed"): + await mock_chroma_backend.store(event, action, outcome, embedding) + + +@pytest.mark.asyncio +async def test_search_memory(mock_chroma_backend, mock_chroma_collection): + """Test searching for memories in ChromaDB.""" + query_vector = [0.1, 0.2, 0.3] + mock_results = {"metadatas": [[{"event": "Event1"}, {"event": "Event2"}, {"event": "Event3"}]]} + mock_chroma_collection.query.return_value = mock_results + + results = await mock_chroma_backend.search(query_vector, top_k=3) + + mock_chroma_collection.query.assert_called_once_with( + query_embeddings=[query_vector], + n_results=3, + ) + assert results == [{"event": "Event1"}, {"event": "Event2"}, {"event": "Event3"}] + + +@pytest.mark.asyncio +async def test_search_memory_no_results(mock_chroma_backend, mock_chroma_collection): + """Test searching for memories in ChromaDB with no results.""" + query_vector = [0.1, 0.2, 0.3] + mock_chroma_collection.query.return_value = {"metadatas": [[]]} + + results = await mock_chroma_backend.search(query_vector, top_k=3) + + mock_chroma_collection.query.assert_called_once_with( + query_embeddings=[query_vector], + n_results=3, + ) + assert results == [] + + +@pytest.mark.asyncio +async def test_search_memory_error(mock_chroma_backend, mock_chroma_collection): + """Test handling errors when searching in ChromaDB.""" + mock_chroma_collection.query.side_effect = Exception("Query failed") + + query_vector = [0.1, 0.2, 0.3] + + results = await mock_chroma_backend.search(query_vector, top_k=3) + assert results == [] diff --git a/tests/memory/backends/test_qdrant.py b/tests/memory/backends/test_qdrant.py index e69de29..b13ba1e 100644 --- a/tests/memory/backends/test_qdrant.py +++ b/tests/memory/backends/test_qdrant.py @@ -0,0 +1,108 @@ +import uuid +from unittest.mock import MagicMock, patch + +import pytest + +from src.memory.backends.qdrant import QdrantBackend + + +@pytest.fixture +def mock_qdrant_client(): + """Mock the QdrantClient.""" + client = MagicMock() + return client + + +@pytest.fixture +def mock_qdrant_backend(mock_qdrant_client): + """Create a QdrantBackend instance with a mocked QdrantClient.""" + with patch("src.memory.backends.qdrant.QdrantClient", return_value=mock_qdrant_client): + backend = QdrantBackend( + collection_name="test_collection", + host="mock_host", + port=9999, + vector_size=768, + ) + backend.client = mock_qdrant_client # Explicitly set the mocked client + return backend + + +def test_qdrant_backend_init(mock_qdrant_client): + """Test QdrantBackend initialization with mocked client.""" + with patch("src.memory.backends.qdrant.QdrantClient", return_value=mock_qdrant_client): + backend = QdrantBackend( + collection_name="test_collection", + host="mock_host", + port=9999, + vector_size=768, + ) + + mock_qdrant_client.get_collection.assert_called_once_with("test_collection") + assert backend.collection_name == "test_collection" + assert backend.vector_size == 768 + + +@pytest.mark.asyncio +async def test_store_memory(mock_qdrant_backend, mock_qdrant_client): + """Test storing a memory in Qdrant with mocked client.""" + event = "Test Event" + action = "Test Action" + outcome = "Test Outcome" + embedding = [0.1, 0.2, 0.3] + metadata = {"key": "value"} + + with patch("uuid.uuid4", return_value=uuid.UUID("12345678123456781234567812345678")): + await mock_qdrant_backend.store(event, action, outcome, embedding, metadata) + + mock_qdrant_client.upsert.assert_called_once() + + +@pytest.mark.asyncio +async def test_store_memory_error(mock_qdrant_backend, mock_qdrant_client): + """Test handling errors when storing a memory in Qdrant.""" + mock_qdrant_client.upsert.side_effect = Exception("Upsert failed") + + event = "Test Event" + action = "Test Action" + outcome = "Test Outcome" + embedding = [0.1, 0.2, 0.3] + + with pytest.raises(Exception, match="Upsert failed"): + await mock_qdrant_backend.store(event, action, outcome, embedding) + + +@pytest.mark.asyncio +async def test_search_memory(mock_qdrant_backend, mock_qdrant_client): + """Test searching for memories in Qdrant.""" + query_vector = [0.1, 0.2, 0.3] + mock_results = [ + MagicMock(payload={"event": "Event1"}), + MagicMock(payload={"event": "Event2"}), + MagicMock(payload={"event": "Event3"}), + ] + mock_qdrant_client.search.return_value = mock_results + + results = await mock_qdrant_backend.search(query_vector, top_k=3) + + mock_qdrant_client.search.assert_called_once_with( + collection_name="test_collection", + query_vector=query_vector, + limit=3, + ) + assert results == [{"event": "Event1"}, {"event": "Event2"}, {"event": "Event3"}] + + +@pytest.mark.asyncio +async def test_search_memory_no_results(mock_qdrant_backend, mock_qdrant_client): + """Test searching for memories in Qdrant with no results.""" + query_vector = [0.1, 0.2, 0.3] + mock_qdrant_client.search.return_value = [] + + results = await mock_qdrant_backend.search(query_vector, top_k=3) + + mock_qdrant_client.search.assert_called_once_with( + collection_name="test_collection", + query_vector=query_vector, + limit=3, + ) + assert results == [] diff --git a/tests/memory/test_memory_module.py b/tests/memory/test_memory_module.py index f292e1d..1134811 100644 --- a/tests/memory/test_memory_module.py +++ b/tests/memory/test_memory_module.py @@ -1,171 +1,145 @@ -# from unittest.mock import AsyncMock, MagicMock, patch -# import pytest -# from loguru import logger -# from openai import AsyncOpenAI - -# from src.core.defs import MemoryBackendType -# from src.memory.memory_module import MemoryModule, get_memory_module -# from src.memory.backends.qdrant import QdrantBackend -# from src.memory.backends.chroma import ChromaBackend - - -# @pytest.fixture -# def mock_logger(monkeypatch): -# """Mock logger for testing.""" -# mock_debug = MagicMock() -# mock_error = MagicMock() -# monkeypatch.setattr(logger, "debug", mock_debug) -# monkeypatch.setattr(logger, "error", mock_error) -# return mock_debug, mock_error - - -# @pytest.fixture -# def mock_openai_client(): -# """Create a mock OpenAI client.""" -# return AsyncMock(spec=AsyncOpenAI) - - -# @pytest.fixture -# def mock_embedding_generator(): -# """Create a mock EmbeddingGenerator.""" -# with patch("src.memory.memory_module.EmbeddingGenerator") as mock: -# generator = mock.return_value -# generator.get_embedding = AsyncMock(return_value=[[0.1, 0.2, 0.3]]) -# yield generator - - -# @pytest.fixture -# def mock_qdrant_backend(): -# """Create a mock Qdrant backend.""" -# with patch("src.memory.memory_module.QdrantBackend") as mock: -# backend = mock.return_value -# backend.store = AsyncMock() -# backend.search = AsyncMock(return_value=[ -# {"event": "test event", "action": "test action", "outcome": "test outcome"} -# ]) -# yield backend - - -# @pytest.fixture -# def mock_chroma_backend(): -# """Create a mock Chroma backend.""" -# with patch("src.memory.memory_module.ChromaBackend") as mock: -# backend = mock.return_value -# backend.store = AsyncMock() -# backend.search = AsyncMock(return_value=[ -# {"event": "test event", "action": "test action", "outcome": "test outcome"} -# ]) -# yield backend - - -# @pytest.mark.parametrize("backend_type,backend_class", [ -# (MemoryBackendType.QDRANT, QdrantBackend), -# (MemoryBackendType.CHROMA, ChromaBackend), -# ]) -# def test_memory_module_init(mock_openai_client, backend_type, backend_class): -# """Test MemoryModule initialization with different backends.""" -# # arrange/act: -# with patch(f"src.memory.memory_module.{backend_class.__name__}") as mock_backend: -# module = MemoryModule( -# openai_client=mock_openai_client, -# backend_type=backend_type, -# collection_name="test_collection", -# host="localhost", -# port=6333, -# vector_size=1536, -# persist_directory="./persist" -# ) - -# # assert: -# assert isinstance(module.backend, mock_backend.return_value.__class__) -# if backend_type == MemoryBackendType.QDRANT: -# mock_backend.assert_called_once_with( -# collection_name="test_collection", -# host="localhost", -# port=6333, -# vector_size=1536 -# ) -# else: -# mock_backend.assert_called_once_with( -# collection_name="test_collection", -# persist_directory="./persist" -# ) - - -# def test_memory_module_init_invalid_backend(): -# """Test MemoryModule initialization with invalid backend type.""" -# with pytest.raises(ValueError, match="Unsupported backend type: invalid"): -# MemoryModule(backend_type="invalid") - - -# @pytest.mark.asyncio -# @pytest.mark.parametrize("backend_fixture", ["mock_qdrant_backend", "mock_chroma_backend"]) -# async def test_memory_storage(mock_embedding_generator, request, backend_fixture): -# """Test memory storage functionality with different backends.""" -# # arrange: -# backend = request.getfixturevalue(backend_fixture) -# module = MemoryModule(backend_type="qdrant") # Type doesn't matter as backend is mocked - -# # act: -# await module.store( -# event="test event", -# action="test action", -# outcome="test outcome", -# metadata={"key": "value"} -# ) - -# # assert: -# mock_embedding_generator.get_embedding.assert_called_once_with( -# "test event test action test outcome" -# ) -# backend.store.assert_called_once_with( -# event="test event", -# action="test action", -# outcome="test outcome", -# embedding=[0.1, 0.2, 0.3], -# metadata={"key": "value"} -# ) - - -# @pytest.mark.asyncio -# @pytest.mark.parametrize("backend_fixture", ["mock_qdrant_backend", "mock_chroma_backend"]) -# async def test_memory_search(mock_embedding_generator, request, backend_fixture): -# """Test memory search functionality with different backends.""" -# # arrange: -# backend = request.getfixturevalue(backend_fixture) -# module = MemoryModule(backend_type="qdrant") # Type doesn't matter as backend is mocked - -# # act: -# results = await module.search("test query", top_k=3) - -# # assert: -# mock_embedding_generator.get_embedding.assert_called_once_with("test query") -# backend.search.assert_called_once_with( -# query_vector=[0.1, 0.2, 0.3], -# top_k=3 -# ) -# assert len(results) == 1 -# assert results[0]["event"] == "test event" -# assert results[0]["action"] == "test action" -# assert results[0]["outcome"] == "test outcome" - - -# @pytest.mark.parametrize("backend_type", [ -# MemoryBackendType.QDRANT, -# MemoryBackendType.CHROMA -# ]) -# def test_get_memory_module(mock_openai_client, backend_type): -# """Test get_memory_module factory function.""" -# # arrange/act: -# with patch(f"src.memory.memory_module.MemoryModule") as mock_module: -# module = get_memory_module( -# openai_client=mock_openai_client, -# backend_type=backend_type -# ) - -# # assert: -# mock_module.assert_called_once_with( -# openai_client=mock_openai_client, -# backend_type=backend_type -# ) -# assert module == mock_module.return_value +from unittest.mock import AsyncMock, patch + +import numpy as np +import pytest + +from src.core.defs import MemoryBackendType +from src.llm.embeddings import EmbeddingGenerator +from src.memory.backends.chroma import ChromaBackend +from src.memory.backends.qdrant import QdrantBackend +from src.memory.memory_module import MemoryModule + + +@pytest.fixture +def mock_embedding_generator(): + """Mock the EmbeddingGenerator.""" + generator = AsyncMock(spec=EmbeddingGenerator) + # Return a numpy array instead of a list + generator.get_embedding.return_value = np.array([[0.1, 0.2, 0.3]]) + return generator + + +@pytest.fixture +def mock_qdrant_backend(): + """Mock the QdrantBackend.""" + backend = AsyncMock(spec=QdrantBackend) + return backend + + +@pytest.fixture +def mock_chroma_backend(): + """Mock the ChromaBackend.""" + backend = AsyncMock(spec=ChromaBackend) + return backend + + +@pytest.fixture +def memory_module_qdrant(mock_embedding_generator, mock_qdrant_backend): + """Create a MemoryModule instance with QdrantBackend.""" + with ( + patch("src.memory.memory_module.QdrantBackend", return_value=mock_qdrant_backend), + patch("src.memory.memory_module.EmbeddingGenerator", return_value=mock_embedding_generator), + ): + return MemoryModule(backend_type=MemoryBackendType.QDRANT) + + +@pytest.fixture +def memory_module_chroma(mock_embedding_generator, mock_chroma_backend): + """Create a MemoryModule instance with ChromaBackend.""" + with ( + patch("src.memory.memory_module.ChromaBackend", return_value=mock_chroma_backend), + patch("src.memory.memory_module.EmbeddingGenerator", return_value=mock_embedding_generator), + ): + return MemoryModule(backend_type=MemoryBackendType.CHROMA) + + +def test_memory_module_init_qdrant(mock_qdrant_backend): + """Test MemoryModule initialization with QdrantBackend.""" + with patch("src.memory.memory_module.QdrantBackend", return_value=mock_qdrant_backend): + module = MemoryModule(backend_type=MemoryBackendType.QDRANT) + assert isinstance(module.backend, QdrantBackend) + + +def test_memory_module_init_chroma(mock_chroma_backend): + """Test MemoryModule initialization with ChromaBackend.""" + with patch("src.memory.memory_module.ChromaBackend", return_value=mock_chroma_backend): + module = MemoryModule(backend_type=MemoryBackendType.CHROMA) + assert isinstance(module.backend, ChromaBackend) + + +def test_memory_module_init_invalid_backend(): + """Test MemoryModule initialization with an invalid backend.""" + with pytest.raises(ValueError, match="Unsupported backend type"): + MemoryModule(backend_type="invalid_backend") + + +@pytest.mark.asyncio +async def test_store_memory_qdrant(memory_module_qdrant, mock_qdrant_backend): + """Test storing a memory with QdrantBackend.""" + event = "Test Event" + action = "Test Action" + outcome = "Test Outcome" + metadata = {"key": "value"} + + await memory_module_qdrant.store(event, action, outcome, metadata) + + memory_module_qdrant.embedding_generator.get_embedding.assert_called_once_with( + f"{event} {action} {outcome}" + ) + mock_qdrant_backend.store.assert_called_once_with( + event=event, + action=action, + outcome=outcome, + embedding=[0.1, 0.2, 0.3], + metadata=metadata, + ) + + +@pytest.mark.asyncio +async def test_store_memory_chroma(memory_module_chroma, mock_chroma_backend): + """Test storing a memory with ChromaBackend.""" + event = "Test Event" + action = "Test Action" + outcome = "Test Outcome" + metadata = {"key": "value"} + + await memory_module_chroma.store(event, action, outcome, metadata) + + memory_module_chroma.embedding_generator.get_embedding.assert_called_once_with( + f"{event} {action} {outcome}" + ) + mock_chroma_backend.store.assert_called_once_with( + event=event, + action=action, + outcome=outcome, + embedding=[0.1, 0.2, 0.3], + metadata=metadata, + ) + + +@pytest.mark.asyncio +async def test_search_memory_qdrant(memory_module_qdrant, mock_qdrant_backend): + """Test searching memory with QdrantBackend.""" + query = "Test Query" + top_k = 5 + mock_qdrant_backend.search.return_value = [{"event": "result_event"}] + + results = await memory_module_qdrant.search(query, top_k) + + memory_module_qdrant.embedding_generator.get_embedding.assert_called_once_with(query) + mock_qdrant_backend.search.assert_called_once_with(query_vector=[0.1, 0.2, 0.3], top_k=top_k) + assert results == [{"event": "result_event"}] + + +@pytest.mark.asyncio +async def test_search_memory_chroma(memory_module_chroma, mock_chroma_backend): + """Test searching memory with ChromaBackend.""" + query = "Test Query" + top_k = 5 + mock_chroma_backend.search.return_value = [{"event": "result_event"}] + + results = await memory_module_chroma.search(query, top_k) + + memory_module_chroma.embedding_generator.get_embedding.assert_called_once_with(query) + mock_chroma_backend.search.assert_called_once_with(query_vector=[0.1, 0.2, 0.3], top_k=top_k) + assert results == [{"event": "result_event"}] From 359e4346135253132a4125d0dcb1ce2daae94490 Mon Sep 17 00:00:00 2001 From: Dzmitry Hramyka Date: Sun, 29 Dec 2024 01:40:52 +0100 Subject: [PATCH 6/6] Add tools tests --- .gitignore | 1 + coverage.lcov | 179 +++++---- ...earch_with_perplexity.py => perplexity.py} | 0 src/workflows/research_news.py | 15 +- tests/tools/test_perplexity.py | 95 +++++ tests/tools/test_search_with_perplexity.py | 0 tests/tools/test_tg.py | 223 ++++------- tests/tools/test_twitter.py | 346 ++++++------------ tests/workflows/test_research_news.py | 82 ++--- 9 files changed, 416 insertions(+), 525 deletions(-) rename src/tools/{search_with_perplexity.py => perplexity.py} (100%) create mode 100644 tests/tools/test_perplexity.py delete mode 100644 tests/tools/test_search_with_perplexity.py diff --git a/.gitignore b/.gitignore index aad7894..711d635 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ qdrant_storage/ .chromadb/ persistent_q_table.json +.coverage coverage.lcov # ====== Default gitignore config ====== diff --git a/coverage.lcov b/coverage.lcov index d7edf84..d67f082 100644 --- a/coverage.lcov +++ b/coverage.lcov @@ -616,7 +616,7 @@ DA:105,1 DA:106,1 DA:107,1 DA:110,1 -DA:112,1 +DA:112,0 DA:115,1 DA:116,1 DA:117,1 @@ -635,7 +635,7 @@ DA:145,1 DA:148,1 DA:151,1 LF:57 -LH:53 +LH:52 FN:14,63,PlanningModule.__init__ FNDA:1,PlanningModule.__init__ FN:65,80,PlanningModule._load_q_table @@ -699,49 +699,49 @@ FNDA:1,fetch_signal FNF:2 FNH:2 end_of_record -SF:src/tools/search_with_perplexity.py -DA:1,0 -DA:2,0 -DA:4,0 -DA:5,0 -DA:8,0 -DA:17,0 -DA:18,0 +SF:src/tools/perplexity.py +DA:1,1 +DA:2,1 +DA:4,1 +DA:5,1 +DA:8,1 +DA:17,1 +DA:18,1 DA:19,0 -DA:20,0 +DA:20,1 DA:21,0 -DA:23,0 -DA:53,0 -DA:58,0 -DA:61,0 -DA:64,0 -DA:65,0 -DA:67,0 -DA:68,0 +DA:23,1 +DA:53,1 +DA:58,1 +DA:61,1 +DA:64,1 +DA:65,1 +DA:67,1 +DA:68,1 DA:73,0 DA:76,0 DA:77,0 DA:78,0 DA:79,0 -DA:80,0 -DA:81,0 -DA:82,0 -DA:83,0 -DA:84,0 -DA:85,0 -DA:88,0 -DA:99,0 -DA:102,0 -DA:105,0 -DA:106,0 +DA:80,1 +DA:81,1 +DA:82,1 +DA:83,1 +DA:84,1 +DA:85,1 +DA:88,1 +DA:99,1 +DA:102,1 +DA:105,1 +DA:106,1 LF:34 -LH:0 +LH:27 FN:8,85,search_with_perplexity -FNDA:0,search_with_perplexity +FNDA:1,search_with_perplexity FN:88,106,estimate_perplexity_cost_per_request -FNDA:0,estimate_perplexity_cost_per_request +FNDA:1,estimate_perplexity_cost_per_request FNF:2 -FNH:0 +FNH:2 end_of_record SF:src/tools/tg.py DA:1,0 @@ -801,60 +801,60 @@ DA:23,0 DA:26,1 DA:33,0 DA:41,1 -DA:51,0 -DA:53,0 -DA:59,0 -DA:60,0 -DA:61,0 -DA:62,0 -DA:65,0 -DA:66,0 -DA:67,0 -DA:68,0 -DA:69,0 -DA:72,0 -DA:73,0 -DA:74,0 -DA:75,0 -DA:76,0 -DA:77,0 -DA:78,0 +DA:51,1 +DA:53,1 +DA:59,1 +DA:60,1 +DA:61,1 +DA:62,1 +DA:65,1 +DA:66,1 +DA:67,1 +DA:68,1 +DA:69,1 +DA:72,1 +DA:73,1 +DA:74,1 +DA:75,1 +DA:76,1 +DA:77,1 +DA:78,1 DA:81,1 -DA:92,0 -DA:93,0 -DA:94,0 -DA:95,0 -DA:97,0 -DA:99,0 -DA:100,0 -DA:102,0 -DA:103,0 -DA:104,0 -DA:106,0 -DA:107,0 -DA:109,0 -DA:111,0 -DA:115,0 -DA:116,0 -DA:117,0 -DA:118,0 -DA:121,0 -DA:122,0 -DA:123,0 -DA:124,0 -DA:126,0 +DA:92,1 +DA:93,1 +DA:94,1 +DA:95,1 +DA:97,1 +DA:99,1 +DA:100,1 +DA:102,1 +DA:103,1 +DA:104,1 +DA:106,1 +DA:107,1 +DA:109,1 +DA:111,1 +DA:115,1 +DA:116,1 +DA:117,1 +DA:118,1 +DA:121,1 +DA:122,1 +DA:123,1 +DA:124,1 +DA:126,1 LF:58 -LH:13 +LH:54 FN:14,23,get_twitter_conn_v1 FNDA:0,get_twitter_conn_v1 FN:26,38,get_twitter_conn_v2 FNDA:0,get_twitter_conn_v2 FN:41,78,upload_media_v1 -FNDA:0,upload_media_v1 +FNDA:1,upload_media_v1 FN:81,126,post_twitter_thread -FNDA:0,post_twitter_thread +FNDA:1,post_twitter_thread FNF:4 -FNH:0 +FNH:2 end_of_record SF:src/utils.py DA:1,1 @@ -941,25 +941,24 @@ DA:5,1 DA:6,1 DA:7,1 DA:10,1 -DA:15,1 +DA:13,1 +DA:14,1 DA:16,1 -DA:18,1 DA:19,1 -DA:22,1 -DA:23,1 -DA:28,1 +DA:20,1 +DA:25,1 +DA:26,1 DA:29,1 DA:32,1 +DA:33,1 +DA:34,1 DA:35,1 DA:36,1 DA:37,1 DA:38,1 -DA:39,1 -DA:40,1 -DA:41,1 -LF:22 -LH:22 -FN:10,41,analyze_news_workflow +LF:21 +LH:21 +FN:10,38,analyze_news_workflow FNDA:1,analyze_news_workflow FNF:1 FNH:1 diff --git a/src/tools/search_with_perplexity.py b/src/tools/perplexity.py similarity index 100% rename from src/tools/search_with_perplexity.py rename to src/tools/perplexity.py diff --git a/src/workflows/research_news.py b/src/workflows/research_news.py index 2804460..481bda9 100644 --- a/src/workflows/research_news.py +++ b/src/workflows/research_news.py @@ -3,26 +3,23 @@ from loguru import logger from src.llm.llm import LLM -from src.memory.memory_module import MemoryModule, get_memory_module +from src.tools.perplexity import search_with_perplexity from src.tools.twitter import post_twitter_thread -async def analyze_news_workflow( - news: str, memory: MemoryModule = get_memory_module() -) -> Optional[str]: +async def analyze_news_workflow(news: str) -> Optional[str]: """Workflow for analyzing news and posting to Twitter.""" try: logger.info("Analyzing news...") - # Retrieve recent memory for context - recent_memories = await memory.search("recent events", top_k=3) - context = "\n".join([f"- {mem['event']}: {mem['outcome']}" for mem in recent_memories]) + # Get recent news context using Perplexity + context = await search_with_perplexity("Latest cryptocurrency news") # Prepare LLM prompt llm = LLM() user_prompt = ( - f"Context:\n{context}\n\nNews:\n{news}\n\n" - "Analyze the news and provide insights." + f"Context from recent news:\n{context}\n\nNews to analyze:\n{news}\n\n" + "Analyze the news and provide insights. " "Finally make a concise tweet about the news with a maximum of 280 characters." ) messages = [{"role": "user", "content": user_prompt}] diff --git a/tests/tools/test_perplexity.py b/tests/tools/test_perplexity.py new file mode 100644 index 0000000..d1f2879 --- /dev/null +++ b/tests/tools/test_perplexity.py @@ -0,0 +1,95 @@ +from unittest.mock import AsyncMock, patch + +import httpx +import pytest + +from src.core.exceptions import APIError +from src.tools.perplexity import estimate_perplexity_cost_per_request, search_with_perplexity + + +@pytest.fixture +def mock_settings(monkeypatch): + """Fixture to mock settings for the tests.""" + monkeypatch.setattr("src.tools.perplexity.settings.PERPLEXITY_API_KEY", "mock-api-key") + monkeypatch.setattr( + "src.tools.perplexity.settings.PERPLEXITY_ENDPOINT", "https://mock.endpoint" + ) + monkeypatch.setattr( + "src.tools.perplexity.settings.PERPLEXITY_NEWS_CATEGORY_LIST", + ["crypto", "technology", "finance"], + ) + + +@pytest.mark.asyncio +async def test_search_with_perplexity_success(mock_settings): + """Test a successful Perplexity search.""" + # Arrange + mock_response_data = { + "choices": [{"message": {"content": "Mock Perplexity result"}}], + "usage": {"total_tokens": 1000}, + } + with patch("src.tools.perplexity.httpx.AsyncClient.post", new_callable=AsyncMock) as mock_post: + mock_post.return_value.json.return_value = mock_response_data + mock_post.return_value.status_code = 200 + + # Act + result = await search_with_perplexity("Latest cryptocurrency news") + + # Assert + assert "Perplexity search data" in result + mock_post.assert_called_once() + estimated_cost = estimate_perplexity_cost_per_request(1000) + assert estimated_cost == 0.0002 + + +@pytest.mark.skip(reason="Need to be fixed") +@pytest.mark.asyncio +async def test_search_with_perplexity_no_api_key(): + """Test when Perplexity API key is not set.""" + with patch("src.tools.perplexity.settings.PERPLEXITY_API_KEY", None): + with pytest.raises(APIError, match="Perplexity API key is not set"): + await search_with_perplexity("Latest cryptocurrency news") + + +@pytest.mark.skip(reason="Need to be fixed") +@pytest.mark.asyncio +async def test_search_with_perplexity_no_endpoint(): + """Test when Perplexity endpoint is not set.""" + with patch("src.tools.perplexity.settings.PERPLEXITY_ENDPOINT", None): + with pytest.raises(APIError, match="Perplexity endpoint is not set"): + await search_with_perplexity("Latest cryptocurrency news") + + +@pytest.mark.asyncio +async def test_search_with_perplexity_timeout(mock_settings): + """Test handling a timeout exception during Perplexity search.""" + with patch("src.tools.perplexity.httpx.AsyncClient.post", new_callable=AsyncMock) as mock_post: + mock_post.side_effect = httpx.TimeoutException("Request timed out") + + # Act + result = await search_with_perplexity("Latest cryptocurrency news") + + # Assert + assert "timeout error" in result.lower() + + +@pytest.mark.asyncio +async def test_search_with_perplexity_api_error(mock_settings): + """Test handling a general API error during Perplexity search.""" + with patch("src.tools.perplexity.httpx.AsyncClient.post", new_callable=AsyncMock) as mock_post: + mock_post.side_effect = Exception("Mock API failure") + + # Act + result = await search_with_perplexity("Latest cryptocurrency news") + + # Assert + assert "currently unavailable" in result.lower() + + +def test_estimate_perplexity_cost_per_request(): + """Test cost estimation for Perplexity requests.""" + # Act + estimated_cost = estimate_perplexity_cost_per_request(1000) + + # Assert + assert estimated_cost == 0.0002 # $0.2 per 1M tokens diff --git a/tests/tools/test_search_with_perplexity.py b/tests/tools/test_search_with_perplexity.py deleted file mode 100644 index e69de29..0000000 diff --git a/tests/tools/test_tg.py b/tests/tools/test_tg.py index f52237f..153c5c4 100644 --- a/tests/tools/test_tg.py +++ b/tests/tools/test_tg.py @@ -1,171 +1,102 @@ -# from unittest.mock import AsyncMock, MagicMock, patch # import pytest -# from loguru import logger +# from unittest.mock import AsyncMock, MagicMock, patch + # from telegram import Bot, Message -# from telegram.constants import MessageLimit -# from telegram.error import TelegramError as TGError +# from telegram.constants import MessageLimit, ParseMode +# from telegram.error import TelegramError -# from src.core.config import settings -# from src.core.exceptions import TelegramError # from src.tools.tg import split_long_message, post_summary_to_telegram +# from src.core.exceptions import TelegramError as TelegramPostError -# @pytest.fixture -# def mock_tool_logger(monkeypatch): -# """Mock logger for tool testing.""" -# mock_debug = MagicMock() -# mock_error = MagicMock() -# monkeypatch.setattr(logger, "debug", mock_debug) -# monkeypatch.setattr(logger, "error", mock_error) -# return mock_debug, mock_error - - -# @pytest.fixture -# def mock_telegram_settings(monkeypatch): -# """Mock Telegram settings.""" -# monkeypatch.setattr(settings, "TELEGRAM_CHAT_ID", "test_chat_id") -# return settings - +# TELEGRAM_CHAT_ID = "1234567890" # @pytest.fixture -# def mock_telegram_bot(): -# """Create a mock Telegram bot.""" -# mock_bot = AsyncMock(spec=Bot) -# return mock_bot - - -# def test_split_long_message_short(): -# """Test splitting a message that's already within limits.""" -# # arrange: -# message = "Short message" - -# # act: -# chunks = split_long_message(message) - -# # assert: -# assert len(chunks) == 1 -# assert chunks[0] == message - - -# def test_split_long_message_exact(): -# """Test splitting a message that's exactly at the limit.""" -# # arrange: -# chunk_size = 10 -# message = "A" * chunk_size - -# # act: -# chunks = split_long_message(message, chunk_size) - -# # assert: -# assert len(chunks) == 1 -# assert chunks[0] == message - - -# def test_split_long_message_multiple_chunks(): -# """Test splitting a long message into multiple chunks.""" -# # arrange: -# chunk_size = 10 -# message = "A" * 25 # Will create 3 chunks - -# # act: -# chunks = split_long_message(message, chunk_size) - -# # assert: -# assert len(chunks) == 3 -# assert all(len(chunk) <= chunk_size for chunk in chunks) -# assert "".join(chunks) == message - - -# def test_split_long_message_telegram_limit(): -# """Test splitting using Telegram's actual message limit.""" -# # arrange: -# message = "A" * (MessageLimit.MAX_TEXT_LENGTH + 100) - -# # act: -# chunks = split_long_message(message) - -# # assert: -# assert len(chunks) == 2 -# assert all(len(chunk) <= MessageLimit.MAX_TEXT_LENGTH for chunk in chunks) -# assert "".join(chunks) == message +# def mock_bot(): +# """Mock the Telegram Bot instance.""" +# return AsyncMock(spec=Bot) + + +# @pytest.mark.parametrize( +# "message,chunk_size,expected", +# [ +# ("Short message", MessageLimit.MAX_TEXT_LENGTH, ["Short message"]), +# ( +# "A" * (MessageLimit.MAX_TEXT_LENGTH + 10), +# MessageLimit.MAX_TEXT_LENGTH, +# ["A" * MessageLimit.MAX_TEXT_LENGTH, "A" * 10], +# ), +# ("", MessageLimit.MAX_TEXT_LENGTH, [""]), +# ], +# ) +# def test_split_long_message(message, chunk_size, expected): +# """Test splitting long messages into chunks.""" +# result = split_long_message(message, chunk_size=chunk_size) +# assert result == expected # @pytest.mark.asyncio -# async def test_post_summary_to_telegram_success( -# mock_tool_logger, mock_telegram_settings, mock_telegram_bot -# ): -# """Test successful posting of a message to Telegram.""" -# # arrange: -# mock_debug, mock_error = mock_tool_logger -# summary = "Test message" +# async def test_post_summary_to_telegram_success(mock_bot): +# """Test successfully posting a message to Telegram.""" +# # Arrange +# summary_html = "Test message" # mock_message = MagicMock(spec=Message) # mock_message.message_id = 12345 -# mock_telegram_bot.send_message.return_value = mock_message - -# # act: -# message_ids = await post_summary_to_telegram(summary, bot=mock_telegram_bot) - -# # assert: -# assert message_ids == [12345] -# mock_telegram_bot.send_message.assert_called_once_with( -# chat_id=mock_telegram_settings.TELEGRAM_CHAT_ID, -# text=summary, -# parse_mode="HTML", +# mock_bot.send_message.return_value = mock_message + +# # Act +# with patch("src.tools.tg.bot", mock_bot): +# result = await post_summary_to_telegram(summary_html) + +# # Assert +# assert result == [12345] +# mock_bot.send_message.assert_called_once_with( +# chat_id=TELEGRAM_CHAT_ID, +# text=summary_html, +# parse_mode=ParseMode.HTML, # disable_web_page_preview=False, # ) -# mock_debug.assert_called_once() -# mock_error.assert_not_called() # @pytest.mark.asyncio -# async def test_post_summary_to_telegram_long_message( -# mock_tool_logger, mock_telegram_settings, mock_telegram_bot -# ): -# """Test posting a long message that needs to be split.""" -# # arrange: -# mock_debug, mock_error = mock_tool_logger -# summary = "A" * (MessageLimit.MAX_TEXT_LENGTH + 100) -# mock_message1 = MagicMock(spec=Message, message_id=12345) -# mock_message2 = MagicMock(spec=Message, message_id=12346) -# mock_telegram_bot.send_message.side_effect = [mock_message1, mock_message2] - -# # act: -# message_ids = await post_summary_to_telegram(summary, bot=mock_telegram_bot) - -# # assert: -# assert message_ids == [12345, 12346] -# assert mock_telegram_bot.send_message.call_count == 2 -# assert mock_debug.call_count == 2 -# mock_error.assert_not_called() +# async def test_post_summary_to_telegram_long_message(mock_bot): +# """Test posting a long message split into multiple chunks.""" +# # Arrange +# summary_html = "" + "A" * (MessageLimit.MAX_TEXT_LENGTH + 50) + "" +# message_chunks = split_long_message(summary_html) +# mock_messages = [MagicMock(spec=Message, message_id=i) for i in range(len(message_chunks))] +# mock_bot.send_message.side_effect = mock_messages + +# # Act +# with patch("src.tools.tg.bot", mock_bot): +# result = await post_summary_to_telegram(summary_html) + +# # Assert +# assert result == [message.message_id for message in mock_messages] +# assert mock_bot.send_message.call_count == len(message_chunks) # @pytest.mark.asyncio -# async def test_post_summary_to_telegram_no_message_id( -# mock_tool_logger, mock_telegram_settings, mock_telegram_bot -# ): -# """Test error handling when Telegram doesn't return a message ID.""" -# # arrange: -# mock_debug, mock_error = mock_tool_logger -# summary = "Test message" -# mock_message = MagicMock(spec=Message) -# mock_message.message_id = None -# mock_telegram_bot.send_message.return_value = mock_message +# async def test_post_summary_to_telegram_no_message_id(mock_bot): +# """Test handling when no message ID is returned.""" +# # Arrange +# summary_html = "Test message" +# mock_bot.send_message.return_value = None -# # act/assert: -# with pytest.raises(TelegramError, match="No message ID returned from Telegram"): -# await post_summary_to_telegram(summary, bot=mock_telegram_bot) +# # Act & Assert +# with patch("src.tools.tg.bot", mock_bot): +# with pytest.raises(TelegramPostError, match="No message ID returned from Telegram"): +# await post_summary_to_telegram(summary_html) # @pytest.mark.asyncio -# async def test_post_summary_to_telegram_api_error( -# mock_tool_logger, mock_telegram_settings, mock_telegram_bot -# ): -# """Test error handling when Telegram API returns an error.""" -# # arrange: -# mock_debug, mock_error = mock_tool_logger -# summary = "Test message" -# mock_telegram_bot.send_message.side_effect = TGError("API Error") - -# # act/assert: -# with pytest.raises(TelegramError, match="Failed to send message to Telegram"): -# await post_summary_to_telegram(summary, bot=mock_telegram_bot) +# async def test_post_summary_to_telegram_telegram_error(mock_bot): +# """Test handling a TelegramError during message posting.""" +# # Arrange +# summary_html = "Test message" +# mock_bot.send_message.side_effect = TelegramError("Mock Telegram error") + +# # Act & Assert +# with patch("src.tools.tg.bot", mock_bot): +# with pytest.raises(TelegramPostError, match="Failed to send message to Telegram"): +# await post_summary_to_telegram(summary_html) diff --git a/tests/tools/test_twitter.py b/tests/tools/test_twitter.py index 3955565..21629de 100644 --- a/tests/tools/test_twitter.py +++ b/tests/tools/test_twitter.py @@ -1,228 +1,118 @@ -# from io import BytesIO -# from unittest.mock import AsyncMock, MagicMock, patch -# import pytest -# from loguru import logger -# from PIL import Image -# from requests_html import HTMLSession -# import tweepy - -# from src.core.exceptions import TwitterError -# from src.tools.twitter import ( -# get_twitter_conn_v1, -# get_twitter_conn_v2, -# upload_media_v1, -# post_twitter_thread, -# ) - - -# @pytest.fixture -# def mock_tool_logger(monkeypatch): -# """Mock logger for tool testing.""" -# mock_debug = MagicMock() -# mock_error = MagicMock() -# monkeypatch.setattr(logger, "debug", mock_debug) -# monkeypatch.setattr(logger, "error", mock_error) -# return mock_debug, mock_error - - -# @pytest.fixture -# def mock_twitter_settings(): -# """Mock Twitter API settings.""" -# with patch("src.tools.twitter.settings") as mock_settings: -# mock_settings.TWITTER_API_KEY = "test_api_key" -# mock_settings.TWITTER_API_SECRET_KEY = "test_api_secret" -# mock_settings.TWITTER_ACCESS_TOKEN = "test_access_token" -# mock_settings.TWITTER_ACCESS_TOKEN_SECRET = "test_access_secret" -# return mock_settings - - -# @pytest.fixture -# def mock_tweepy_v1(): -# """Mock Tweepy API v1.1 client.""" -# mock_auth = MagicMock() -# mock_client = MagicMock() - -# with patch("tweepy.OAuth1UserHandler", return_value=mock_auth) as mock_handler, \ -# patch("tweepy.API", return_value=mock_client) as mock_api: -# mock_auth.set_access_token = MagicMock() -# yield mock_client - - -# @pytest.fixture -# def mock_tweepy_v2(): -# """Mock Tweepy API v2 client.""" -# with patch("tweepy.Client") as mock_client_class: -# mock_client = MagicMock() -# mock_client_class.return_value = mock_client -# return mock_client - - -# def test_get_twitter_conn_v1(mock_twitter_settings, mock_tweepy_v1): -# """Test Twitter API v1.1 connection creation.""" -# with patch("tweepy.OAuth1UserHandler") as mock_handler: -# mock_auth = MagicMock() -# mock_handler.return_value = mock_auth - -# # act: -# client = get_twitter_conn_v1() - -# # assert: -# mock_handler.assert_called_once_with( -# mock_twitter_settings.TWITTER_API_KEY, -# mock_twitter_settings.TWITTER_API_SECRET_KEY -# ) -# mock_auth.set_access_token.assert_called_once_with( -# mock_twitter_settings.TWITTER_ACCESS_TOKEN, -# mock_twitter_settings.TWITTER_ACCESS_TOKEN_SECRET -# ) -# tweepy.API.assert_called_once_with(mock_auth) - - -# def test_get_twitter_conn_v2(mock_twitter_settings, mock_tweepy_v2): -# """Test Twitter API v2 connection creation.""" -# # act: -# client = get_twitter_conn_v2() - -# # assert: -# assert client == mock_tweepy_v2 -# tweepy.Client.assert_called_once_with( -# consumer_key=mock_twitter_settings.TWITTER_API_KEY, -# consumer_secret=mock_twitter_settings.TWITTER_API_SECRET_KEY, -# access_token=mock_twitter_settings.TWITTER_ACCESS_TOKEN, -# access_token_secret=mock_twitter_settings.TWITTER_ACCESS_TOKEN_SECRET, -# ) - - -# @pytest.mark.asyncio -# async def test_upload_media_v1_success(mock_tool_logger, mock_tweepy_v1): -# """Test successful media upload.""" -# # arrange: -# mock_debug, mock_error = mock_tool_logger -# url = "http://example.com/image.jpg" -# media_id = "12345" - -# # Mock HTML session -# mock_session = MagicMock() -# mock_response = MagicMock() -# mock_response.content = b"fake_image_data" -# mock_session.get.return_value = mock_response - -# # Mock image processing -# mock_image = MagicMock(spec=Image.Image) -# mock_image.convert.return_value = mock_image - -# # Mock media upload -# mock_media = MagicMock() -# mock_media.media_id = media_id -# mock_tweepy_v1.media_upload.return_value = mock_media - -# with patch("src.tools.twitter.HTMLSession", return_value=mock_session), \ -# patch("src.tools.twitter.Image.open", return_value=mock_image), \ -# patch("src.tools.twitter.get_twitter_conn_v1", return_value=mock_tweepy_v1): -# # act: -# result = await upload_media_v1(url) - -# # assert: -# assert result == media_id -# mock_session.get.assert_called_once_with(url, headers=pytest.ANY) -# mock_image.convert.assert_called_once_with("L") -# mock_tweepy_v1.media_upload.assert_called_once() -# mock_debug.assert_called_once() -# mock_error.assert_not_called() - - -# @pytest.mark.asyncio -# async def test_upload_media_v1_error(mock_tool_logger): -# """Test media upload error handling.""" -# # arrange: -# mock_debug, mock_error = mock_tool_logger -# url = "http://example.com/image.jpg" - -# # Mock session with error -# mock_session = MagicMock() -# mock_session.get.side_effect = Exception("Network error") - -# with patch("src.tools.twitter.HTMLSession", return_value=mock_session): -# # act: -# result = await upload_media_v1(url) - -# # assert: -# assert result is None -# mock_error.assert_called_once() -# mock_debug.assert_not_called() - - -# @pytest.mark.asyncio -# async def test_post_twitter_thread_success(mock_tool_logger, mock_tweepy_v2): -# """Test successful thread posting.""" -# # arrange: -# mock_debug, mock_error = mock_tool_logger -# tweets = { -# "tweet1": "First tweet", -# "tweet2": "Second tweet" -# } -# tweet_ids = [12345, 67890] - -# # Mock tweet responses -# mock_tweepy_v2.create_tweet.side_effect = [ -# MagicMock(data={"id": tweet_id}) for tweet_id in tweet_ids -# ] - -# with patch("src.tools.twitter.get_twitter_conn_v2", return_value=mock_tweepy_v2), \ -# patch("asyncio.sleep"): # Mock sleep to speed up test -# # act: -# result = await post_twitter_thread(tweets) - -# # assert: -# assert result == tweet_ids -# assert mock_tweepy_v2.create_tweet.call_count == 2 -# mock_debug.assert_called() -# mock_error.assert_not_called() - - -# @pytest.mark.asyncio -# async def test_post_twitter_thread_with_media( -# mock_tool_logger, mock_tweepy_v1, mock_tweepy_v2 -# ): -# """Test thread posting with media.""" -# # arrange: -# mock_debug, mock_error = mock_tool_logger -# tweets = {"tweet1": "Tweet with media"} -# media_url = "http://example.com/image.jpg" -# tweet_id = 12345 -# media_id = "67890" - -# # Mock media upload -# with patch("src.tools.twitter.upload_media_v1", return_value=media_id): -# # Mock tweet creation -# mock_tweepy_v2.create_tweet.return_value = MagicMock(data={"id": tweet_id}) - -# with patch("src.tools.twitter.get_twitter_conn_v2", return_value=mock_tweepy_v2), \ -# patch("asyncio.sleep"): -# # act: -# result = await post_twitter_thread(tweets, media_url=media_url) - -# # assert: -# assert result == [tweet_id] -# mock_tweepy_v2.create_tweet.assert_called_once_with( -# text=tweets["tweet1"], -# media_ids=[media_id] -# ) - - -# @pytest.mark.asyncio -# async def test_post_twitter_thread_error(mock_tool_logger, mock_tweepy_v2): -# """Test error handling in thread posting.""" -# # arrange: -# mock_debug, mock_error = mock_tool_logger -# tweets = {"tweet1": "Test tweet"} -# mock_tweepy_v2.create_tweet.side_effect = Exception("API Error") - -# with patch("src.tools.twitter.get_twitter_conn_v2", return_value=mock_tweepy_v2), \ -# pytest.raises(TwitterError): -# # act: -# await post_twitter_thread(tweets) - -# # assert: -# mock_error.assert_called_once() -# mock_debug.assert_not_called() +from unittest.mock import MagicMock, patch + +import pytest +from requests_html import HTMLSession + +from src.core.exceptions import TwitterError as TwitterPostError +from src.tools.twitter import post_twitter_thread, upload_media_v1 + + +@pytest.fixture +def mock_twitter_v1(): + """Mock Twitter API v1.1 client.""" + return MagicMock() + + +@pytest.fixture +def mock_twitter_v2(): + """Mock Twitter API v2 client.""" + return MagicMock() + + +@pytest.mark.asyncio +async def test_upload_media_v1_success(mock_twitter_v1): + """Test successful media upload using Twitter API v1.1.""" + # Arrange + mock_media_id = "123456789" + mock_media = MagicMock() + mock_media.media_id = mock_media_id + + with patch("src.tools.twitter.get_twitter_conn_v1", return_value=mock_twitter_v1): + with patch.object(HTMLSession, "get", return_value=MagicMock(content=b"image data")): + with patch("PIL.Image.open", return_value=MagicMock()): + mock_twitter_v1.media_upload.return_value = mock_media + + # Act + media_id = await upload_media_v1("http://example.com/image.jpg") + + # Assert + assert media_id == mock_media_id + mock_twitter_v1.media_upload.assert_called_once() + + +@pytest.mark.asyncio +async def test_upload_media_v1_failure(mock_twitter_v1): + """Test media upload failure.""" + with patch("src.tools.twitter.get_twitter_conn_v1", return_value=mock_twitter_v1): + with patch.object(HTMLSession, "get", side_effect=Exception("Network error")): + # Act + media_id = await upload_media_v1("http://example.com/image.jpg") + + # Assert + assert media_id is None + + +@pytest.mark.asyncio +async def test_post_twitter_thread_success(mock_twitter_v2): + """Test posting a Twitter thread successfully.""" + tweets = {"tweet1": "Hello world!", "tweet2": "Follow-up tweet."} + mock_tweet_id_1 = "1111111" + mock_tweet_id_2 = "2222222" + + mock_tweet_response_1 = MagicMock(data={"id": mock_tweet_id_1}) + mock_tweet_response_2 = MagicMock(data={"id": mock_tweet_id_2}) + + with patch("src.tools.twitter.get_twitter_conn_v2", return_value=mock_twitter_v2): + # Mock the async create_tweet method + mock_twitter_v2.create_tweet.side_effect = [mock_tweet_response_1, mock_tweet_response_2] + + # Act + result = await post_twitter_thread(tweets) + + # Assert + assert result == [mock_tweet_id_1, mock_tweet_id_2] + assert mock_twitter_v2.create_tweet.call_count == 2 + + +@pytest.mark.asyncio +async def test_post_twitter_thread_with_media(mock_twitter_v2, mock_twitter_v1): + """Test posting a Twitter thread with media.""" + tweets = {"tweet1": "Hello world!", "tweet2": "Follow-up tweet."} + media_url = "http://example.com/image.jpg" + mock_media_id = "9999999" + mock_tweet_id_1 = "1111111" + mock_tweet_id_2 = "2222222" + + mock_tweet_response_1 = MagicMock(data={"id": mock_tweet_id_1}) + mock_tweet_response_2 = MagicMock(data={"id": mock_tweet_id_2}) + + with patch("src.tools.twitter.get_twitter_conn_v2", return_value=mock_twitter_v2): + with patch("src.tools.twitter.upload_media_v1", return_value=mock_media_id): + # Mock the async create_tweet method + mock_twitter_v2.create_tweet.side_effect = [ + mock_tweet_response_1, + mock_tweet_response_2, + ] + + # Act + result = await post_twitter_thread(tweets, media_url=media_url) + + # Assert + assert result == [mock_tweet_id_1, mock_tweet_id_2] + assert mock_twitter_v2.create_tweet.call_count == 2 + mock_twitter_v2.create_tweet.assert_any_call( + text="Hello world!", media_ids=[mock_media_id] + ) + + +@pytest.mark.asyncio +async def test_post_twitter_thread_failure(mock_twitter_v2): + """Test failure during posting a Twitter thread.""" + tweets = {"tweet1": "Hello world!"} + + with patch("src.tools.twitter.get_twitter_conn_v2", return_value=mock_twitter_v2): + mock_twitter_v2.create_tweet.side_effect = Exception("Twitter API error") + + # Act & Assert + with pytest.raises(TwitterPostError): + await post_twitter_thread(tweets) diff --git a/tests/workflows/test_research_news.py b/tests/workflows/test_research_news.py index 0b41a86..3eb1c35 100644 --- a/tests/workflows/test_research_news.py +++ b/tests/workflows/test_research_news.py @@ -16,17 +16,8 @@ def mock_workflow_logger(monkeypatch): return mock_info, mock_error -@pytest.fixture -def mock_memory(): - """Create a mock memory module.""" - memory = MagicMock() - memory.search = AsyncMock() - memory.store = AsyncMock() - return memory - - @pytest.mark.asyncio -async def test_analyze_news_success(mock_workflow_logger, mock_memory): +async def test_analyze_news_success(mock_workflow_logger): """Test successful news analysis and tweet posting.""" # arrange: mock_info, mock_error = mock_workflow_logger @@ -34,11 +25,8 @@ async def test_analyze_news_success(mock_workflow_logger, mock_memory): tweet_text = "Breaking News:\nTest analysis\n#StayInformed" tweet_id = "123456789" - # Mock memory search for context - mock_memory.search.return_value = [ - {"event": "event1", "outcome": "outcome1"}, - {"event": "event2", "outcome": "outcome2"}, - ] + # Mock Perplexity search + mock_perplexity = AsyncMock(return_value="Recent crypto news context") # Mock LLM mock_llm = AsyncMock() @@ -48,15 +36,16 @@ async def test_analyze_news_success(mock_workflow_logger, mock_memory): mock_post = AsyncMock(return_value=[tweet_id]) with ( + patch("src.workflows.research_news.search_with_perplexity", mock_perplexity), patch("src.workflows.research_news.LLM", return_value=mock_llm), patch("src.workflows.research_news.post_twitter_thread", mock_post), ): # act: - result = await analyze_news_workflow(news_content, memory=mock_memory) + result = await analyze_news_workflow(news_content) # assert: assert result == tweet_id - mock_memory.search.assert_called_once_with("recent events", top_k=3) + mock_perplexity.assert_called_once_with("Latest cryptocurrency news") mock_llm.generate_response.assert_called_once() mock_post.assert_called_once_with(tweets={"tweet1": tweet_text}) mock_info.assert_any_call("Analyzing news...") @@ -66,74 +55,62 @@ async def test_analyze_news_success(mock_workflow_logger, mock_memory): @pytest.mark.asyncio -async def test_analyze_news_no_context(mock_workflow_logger, mock_memory): - """Test news analysis when no context is available.""" +async def test_analyze_news_perplexity_error(mock_workflow_logger): + """Test error handling when Perplexity search fails.""" # arrange: mock_info, mock_error = mock_workflow_logger news_content = "Test news content" - tweet_text = "Breaking News:\nTest analysis\n#StayInformed" - tweet_id = "123456789" - # Mock memory search (no context available) - mock_memory.search.return_value = [] + # Mock Perplexity with error + mock_perplexity = AsyncMock(side_effect=Exception("Perplexity error")) - # Mock LLM - mock_llm = AsyncMock() - mock_llm.generate_response = AsyncMock(return_value="Test analysis") - - # Mock Twitter post - mock_post = AsyncMock(return_value=[tweet_id]) - - with ( - patch("src.workflows.research_news.LLM", return_value=mock_llm), - patch("src.workflows.research_news.post_twitter_thread", mock_post), - ): + with patch("src.workflows.research_news.search_with_perplexity", mock_perplexity): # act: - result = await analyze_news_workflow(news_content, memory=mock_memory) + result = await analyze_news_workflow(news_content) # assert: - assert result == tweet_id - mock_memory.search.assert_called_once_with("recent events", top_k=3) - mock_llm.generate_response.assert_called_once() - mock_post.assert_called_once_with(tweets={"tweet1": tweet_text}) - mock_info.assert_any_call("Analyzing news...") - mock_error.assert_not_called() + assert result is None + mock_perplexity.assert_called_once() + mock_error.assert_called_once_with("Error in analyze_news_workflow: Perplexity error") @pytest.mark.asyncio -async def test_analyze_news_llm_error(mock_workflow_logger, mock_memory): +async def test_analyze_news_llm_error(mock_workflow_logger): """Test error handling when LLM fails.""" # arrange: mock_info, mock_error = mock_workflow_logger news_content = "Test news content" - # Mock memory search - mock_memory.search.return_value = [{"event": "event1", "outcome": "outcome1"}] + # Mock Perplexity search + mock_perplexity = AsyncMock(return_value="Recent crypto news context") # Mock LLM with error mock_llm = AsyncMock() mock_llm.generate_response = AsyncMock(side_effect=Exception("LLM error")) - with patch("src.workflows.research_news.LLM", return_value=mock_llm): + with ( + patch("src.workflows.research_news.search_with_perplexity", mock_perplexity), + patch("src.workflows.research_news.LLM", return_value=mock_llm), + ): # act: - result = await analyze_news_workflow(news_content, memory=mock_memory) + result = await analyze_news_workflow(news_content) # assert: assert result is None - mock_memory.search.assert_called_once() + mock_perplexity.assert_called_once() mock_llm.generate_response.assert_called_once() mock_error.assert_called_once_with("Error in analyze_news_workflow: LLM error") @pytest.mark.asyncio -async def test_analyze_news_twitter_error(mock_workflow_logger, mock_memory): +async def test_analyze_news_twitter_error(mock_workflow_logger): """Test error handling when Twitter posting fails.""" # arrange: mock_info, mock_error = mock_workflow_logger news_content = "Test news content" - # Mock memory search - mock_memory.search.return_value = [{"event": "event1", "outcome": "outcome1"}] + # Mock Perplexity search + mock_perplexity = AsyncMock(return_value="Recent crypto news context") # Mock LLM mock_llm = AsyncMock() @@ -143,15 +120,16 @@ async def test_analyze_news_twitter_error(mock_workflow_logger, mock_memory): mock_post = AsyncMock(side_effect=Exception("Twitter error")) with ( + patch("src.workflows.research_news.search_with_perplexity", mock_perplexity), patch("src.workflows.research_news.LLM", return_value=mock_llm), patch("src.workflows.research_news.post_twitter_thread", mock_post), ): # act: - result = await analyze_news_workflow(news_content, memory=mock_memory) + result = await analyze_news_workflow(news_content) # assert: assert result is None - mock_memory.search.assert_called_once() + mock_perplexity.assert_called_once() mock_llm.generate_response.assert_called_once() mock_post.assert_called_once() mock_error.assert_called_once_with("Error in analyze_news_workflow: Twitter error")