From 94652903776efdb5349fff032eea86a18570210c Mon Sep 17 00:00:00 2001 From: Jonathan Granda Acaro Date: Tue, 6 Aug 2024 20:44:57 -0400 Subject: [PATCH 01/21] Adding utils.py and setting up openai_config.json --- reverie/backend_server/utils..py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 reverie/backend_server/utils..py diff --git a/reverie/backend_server/utils..py b/reverie/backend_server/utils..py new file mode 100644 index 0000000000..1d105f13fb --- /dev/null +++ b/reverie/backend_server/utils..py @@ -0,0 +1,20 @@ +# Copy and paste your OpenAI API Key +openai_api_key = "" +# Put your name +key_owner = "Jonathan" + +maze_assets_loc = "../../environment/frontend_server/static_dirs/assets" +env_matrix = f"{maze_assets_loc}/the_ville/matrix" +env_visuals = f"{maze_assets_loc}/the_ville/visuals" + +fs_storage = "../../environment/frontend_server/storage" +fs_temp_storage = "../../environment/frontend_server/temp_storage" + +collision_block_id = "32125" + +# Verbose +debug = True + +use_openai = True +# If you're not using OpenAI, define api_model +# api_model = "" \ No newline at end of file From cea0b78fd5b999c2a7405d982996b7bd074da097 Mon Sep 17 00:00:00 2001 From: Jonathan Granda Acaro Date: Tue, 6 Aug 2024 21:04:05 -0400 Subject: [PATCH 02/21] Changing gpt model to gp4-turbo and embedding model to text-davinci-002 --- README.md | 2 +- reverie/backend_server/persona/prompt_template/gpt_structure.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 35fb138b39..59ff92e159 100644 --- a/README.md +++ b/README.md @@ -68,7 +68,7 @@ OpenAI example: ```json { "client": "openai", - "model": "gpt-3.5-turbo-0125", + "model": "gpt-4-turbo", "model-key": "", "model-costs": { "input": 0.5, diff --git a/reverie/backend_server/persona/prompt_template/gpt_structure.py b/reverie/backend_server/persona/prompt_template/gpt_structure.py index f7e4d96eb5..0e43dc9440 100644 --- a/reverie/backend_server/persona/prompt_template/gpt_structure.py +++ b/reverie/backend_server/persona/prompt_template/gpt_structure.py @@ -120,7 +120,7 @@ def setup_client(type: str, config: dict): def ChatGPT_single_request(prompt): temp_sleep() - completion = client.chat.completions.create(model= "gpt-3.5-turbo" if use_openai else model, + completion = client.chat.completions.create(model= "gpt-4-turbo" if use_openai else model, messages=[{"role": "user", "content": prompt}]) return completion.choices[0].message.content From 113b48d8eb5b279355c75774add320c70f9cb5fd Mon Sep 17 00:00:00 2001 From: Jonathan Granda Acaro Date: Wed, 7 Aug 2024 13:49:42 -0400 Subject: [PATCH 03/21] Creating 2 separate simulation scenarios: hide-and-seek and search-and-rescue --- .../environment/0.json | 17 ---- .../associative_memory/embeddings.json | 1 - .../associative_memory/kw_strength.json | 2 - .../associative_memory/nodes.json | 1 - .../bootstrap_memory/scratch.json | 51 ----------- .../bootstrap_memory/spatial_memory.json | 66 -------------- .../associative_memory/embeddings.json | 1 - .../associative_memory/kw_strength.json | 2 - .../associative_memory/nodes.json | 1 - .../bootstrap_memory/scratch.json | 51 ----------- .../bootstrap_memory/spatial_memory.json | 86 ------------------ .../associative_memory/embeddings.json | 1 - .../associative_memory/kw_strength.json | 2 - .../associative_memory/nodes.json | 1 - .../Maria Lopez/bootstrap_memory/scratch.json | 51 ----------- .../bootstrap_memory/spatial_memory.json | 87 ------------------- .../reverie/meta.json | 13 --- 17 files changed, 434 deletions(-) delete mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/environment/0.json delete mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/embeddings.json delete mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/kw_strength.json delete mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/nodes.json delete mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/scratch.json delete mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/spatial_memory.json delete mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/embeddings.json delete mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/kw_strength.json delete mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/nodes.json delete mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/scratch.json delete mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/spatial_memory.json delete mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/embeddings.json delete mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/kw_strength.json delete mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/nodes.json delete mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/scratch.json delete mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/spatial_memory.json delete mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/reverie/meta.json diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/environment/0.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/environment/0.json deleted file mode 100644 index 0b2fb23dc3..0000000000 --- a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/environment/0.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "Isabella Rodriguez": { - "maze": "the_ville", - "x": 72, - "y": 14 - }, - "Klaus Mueller": { - "maze": "the_ville", - "x": 126, - "y": 46 - }, - "Maria Lopez": { - "maze": "the_ville", - "x": 123, - "y": 57 - } -} diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/embeddings.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/embeddings.json deleted file mode 100644 index 9e26dfeeb6..0000000000 --- a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/embeddings.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/kw_strength.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/kw_strength.json deleted file mode 100644 index 6dc73c1c85..0000000000 --- a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/kw_strength.json +++ /dev/null @@ -1,2 +0,0 @@ -{"kw_strength_event": {}, - "kw_strength_thought": {}} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/nodes.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/nodes.json deleted file mode 100644 index 9e26dfeeb6..0000000000 --- a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/nodes.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/scratch.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/scratch.json deleted file mode 100644 index dbed4b705e..0000000000 --- a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/scratch.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "vision_r": 8, - "att_bandwidth": 8, - "retention": 8, - "curr_time": null, - "curr_tile": null, - "daily_plan_req": "Isabella Rodriguez opens Hobbs Cafe at 8am everyday, and works at the counter until 8pm, at which point she closes the cafe.", - "name": "Isabella Rodriguez", - "first_name": "Isabella", - "last_name": "Rodriguez", - "age": 34, - "innate": "friendly, outgoing, hospitable", - "learned": "Isabella Rodriguez is a cafe owner of Hobbs Cafe who loves to make people feel welcome. She is always looking for ways to make the cafe a place where people can come to relax and enjoy themselves.", - "currently": "Isabella Rodriguez is planning on having a Valentine's Day party at Hobbs Cafe with her customers on February 14th, 2023 at 5pm. She is gathering party material, and is telling everyone to join the party at Hobbs Cafe on February 14th, 2023, from 5pm to 7pm.", - "lifestyle": "Isabella Rodriguez goes to bed around 11pm, awakes up around 6am.", - "living_area": "the Ville:Isabella Rodriguez's apartment:main room", - "concept_forget": 100, - "daily_reflection_time": 180, - "daily_reflection_size": 5, - "overlap_reflect_th": 4, - "kw_strg_event_reflect_th": 10, - "kw_strg_thought_reflect_th": 9, - - "recency_w": 1, - "relevance_w": 1, - "importance_w": 1, - "recency_decay": 0.995, - "importance_trigger_max": 150, - "importance_trigger_curr": 150, - "importance_ele_n": 0, - "thought_count": 5, - - "daily_req": [], - "f_daily_schedule": [], - "f_daily_schedule_hourly_org": [], - "act_address": null, - "act_start_time": null, - "act_duration": null, - "act_description": null, - "act_pronunciatio": null, - "act_event": ["Isabella Rodriguez", null, null], - "act_obj_description": null, - "act_obj_pronunciatio": null, - "act_obj_event": [null, null, null], - "chatting_with": null, - "chat": null, - "chatting_with_buffer": {}, - "chatting_end_time": null, - "act_path_set": false, - "planned_path": [] -} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/spatial_memory.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/spatial_memory.json deleted file mode 100644 index f881579508..0000000000 --- a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/spatial_memory.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "the Ville": { - "Hobbs Cafe": { - "cafe": [ - "refrigerator", - "cafe customer seating", - "cooking area", - "kitchen sink", - "behind the cafe counter", - "piano" - ] - }, - "Isabella Rodriguez's apartment": { - "main room": [ - "bed", - "desk", - "refrigerator", - "closet", - "shelf" - ] - }, - "The Rose and Crown Pub": { - "pub": [ - "shelf", - "refrigerator", - "bar customer seating", - "behind the bar counter", - "kitchen sink", - "cooking area", - "microphone" - ] - }, - "Harvey Oak Supply Store": { - "supply store": [ - "supply store product shelf", - "behind the supply store counter", - "supply store counter" - ] - }, - "The Willows Market and Pharmacy": { - "store": [ - "behind the pharmacy counter", - "pharmacy store shelf", - "pharmacy store counter", - "grocery store shelf", - "behind the grocery counter", - "grocery store counter" - ] - }, - "Dorm for Oak Hill College": { - "garden": [ - "dorm garden" - ], - "common room": [ - "common room sofa", - "pool table", - "common room table" - ] - }, - "Johnson Park": { - "park": [ - "park garden" - ] - } - } -} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/embeddings.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/embeddings.json deleted file mode 100644 index 9e26dfeeb6..0000000000 --- a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/embeddings.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/kw_strength.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/kw_strength.json deleted file mode 100644 index 6dc73c1c85..0000000000 --- a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/kw_strength.json +++ /dev/null @@ -1,2 +0,0 @@ -{"kw_strength_event": {}, - "kw_strength_thought": {}} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/nodes.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/nodes.json deleted file mode 100644 index 9e26dfeeb6..0000000000 --- a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/nodes.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/scratch.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/scratch.json deleted file mode 100644 index 7b0ce7d722..0000000000 --- a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/scratch.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "vision_r": 8, - "att_bandwidth": 8, - "retention": 8, - "curr_time": null, - "curr_tile": null, - "daily_plan_req": "Klaus Mueller goes to the library at Oak Hill College early in the morning, spends his days writing, and eats at Hobbs Cafe.", - "name": "Klaus Mueller", - "first_name": "Klaus", - "last_name": "Mueller", - "age": 20, - "innate": "kind, inquisitive, passionate", - "learned": "Klaus Mueller is a student at Oak Hill College studying sociology. He is passionate about social justice and loves to explore different perspectives.", - "currently": "Klaus Mueller is writing a research paper on the effects of gentrification in low-income communities.", - "lifestyle": "Klaus Mueller goes to bed around 11pm, awakes up around 7am, eats dinner around 5pm.", - "living_area": "the Ville:Dorm for Oak Hill College:Klaus Mueller's room", - "concept_forget": 100, - "daily_reflection_time": 180, - "daily_reflection_size": 5, - "overlap_reflect_th": 4, - "kw_strg_event_reflect_th": 10, - "kw_strg_thought_reflect_th": 9, - - "recency_w": 1, - "relevance_w": 1, - "importance_w": 1, - "recency_decay": 0.99, - "importance_trigger_max": 150, - "importance_trigger_curr": 150, - "importance_ele_n": 0, - "thought_count": 5, - - "daily_req": [], - "f_daily_schedule": [], - "f_daily_schedule_hourly_org": [], - "act_address": null, - "act_start_time": null, - "act_duration": null, - "act_description": null, - "act_pronunciatio": null, - "act_event": ["Klaus Mueller", null, null], - "act_obj_description": null, - "act_obj_pronunciatio": null, - "act_obj_event": [null, null, null], - "chatting_with": null, - "chat": null, - "chatting_with_buffer": {}, - "chatting_end_time": null, - "act_path_set": false, - "planned_path": [] -} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/spatial_memory.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/spatial_memory.json deleted file mode 100644 index 4f41686772..0000000000 --- a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/spatial_memory.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "the Ville": { - "Oak Hill College": { - "hallway": [], - "library": [ - "library sofa", - "library table", - "bookshelf" - ], - "classroom": [ - "blackboard", - "classroom podium", - "classroom student seating" - ] - }, - "Dorm for Oak Hill College": { - "garden": [ - "dorm garden" - ], - "Klaus Mueller's room": [ - "bed", - "game console", - "closet", - "desk" - ], - "woman's bathroom": [ - "toilet", - "shower", - "bathroom sink" - ], - "common room": [ - "common room sofa", - "pool table", - "common room table" - ], - "man's bathroom": [ - "shower", - "bathroom sink", - "toilet" - ] - }, - "The Willows Market and Pharmacy": { - "store": [ - "grocery store shelf", - "behind the grocery counter", - "grocery store counter", - "pharmacy store shelf", - "pharmacy store counter", - "behind the pharmacy counter" - ] - }, - "Harvey Oak Supply Store": { - "supply store": [ - "supply store product shelf", - "behind the supply store counter", - "supply store counter" - ] - }, - "Johnson Park": { - "park": [ - "park garden" - ] - }, - "The Rose and Crown Pub": { - "pub": [ - "shelf", - "refrigerator", - "bar customer seating", - "behind the bar counter", - "kitchen sink", - "cooking area", - "microphone" - ] - }, - "Hobbs Cafe": { - "cafe": [ - "refrigerator", - "cafe customer seating", - "cooking area", - "kitchen sink", - "behind the cafe counter", - "piano" - ] - } - } -} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/embeddings.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/embeddings.json deleted file mode 100644 index 9e26dfeeb6..0000000000 --- a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/embeddings.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/kw_strength.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/kw_strength.json deleted file mode 100644 index 6dc73c1c85..0000000000 --- a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/kw_strength.json +++ /dev/null @@ -1,2 +0,0 @@ -{"kw_strength_event": {}, - "kw_strength_thought": {}} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/nodes.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/nodes.json deleted file mode 100644 index 9e26dfeeb6..0000000000 --- a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/nodes.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/scratch.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/scratch.json deleted file mode 100644 index c3a304952d..0000000000 --- a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/scratch.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "vision_r": 8, - "att_bandwidth": 8, - "retention": 8, - "curr_time": null, - "curr_tile": null, - "daily_plan_req": "Maria Lopez spends at least 3 hours a day Twitch streaming or gaming.", - "name": "Maria Lopez", - "first_name": "Maria", - "last_name": "Lopez", - "age": 21, - "innate": "energetic, enthusiastic, inquisitive", - "learned": "Maria Lopez is a student at Oak Hill College studying physics and a part time Twitch game streamer who loves to connect with people and explore new ideas.", - "currently": "Maria Lopez is working on her physics degree and streaming games on Twitch to make some extra money. She visits Hobbs Cafe for studying and eating just about everyday.", - "lifestyle": "Maria Lopez goes to bed around 2am, awakes up around 9am, eats dinner around 6pm. She likes to hang out at Hobbs Cafe if it's before 6pm.", - "living_area": "the Ville:Dorm for Oak Hill College:Maria Lopez's room", - "concept_forget": 100, - "daily_reflection_time": 180, - "daily_reflection_size": 5, - "overlap_reflect_th": 4, - "kw_strg_event_reflect_th": 10, - "kw_strg_thought_reflect_th": 9, - - "recency_w": 1, - "relevance_w": 1, - "importance_w": 1, - "recency_decay": 0.99, - "importance_trigger_max": 150, - "importance_trigger_curr": 150, - "importance_ele_n": 0, - "thought_count": 5, - - "daily_req": [], - "f_daily_schedule": [], - "f_daily_schedule_hourly_org": [], - "act_address": null, - "act_start_time": null, - "act_duration": null, - "act_description": null, - "act_pronunciatio": null, - "act_event": ["Maria Lopez", null, null], - "act_obj_description": null, - "act_obj_pronunciatio": null, - "act_obj_event": [null, null, null], - "chatting_with": null, - "chat": null, - "chatting_with_buffer": {}, - "chatting_end_time": null, - "act_path_set": false, - "planned_path": [] -} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/spatial_memory.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/spatial_memory.json deleted file mode 100644 index 0a58212bda..0000000000 --- a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/spatial_memory.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "the Ville": { - "Oak Hill College": { - "hallway": [], - "library": [ - "library sofa", - "library table", - "bookshelf" - ], - "classroom": [ - "blackboard", - "classroom podium", - "classroom student seating" - ] - }, - "Dorm for Oak Hill College": { - "garden": [ - "dorm garden" - ], - "Maria Lopez's room": [ - "closet", - "desk", - "bed", - "computer", - "blackboard" - ], - "woman's bathroom": [ - "toilet", - "shower", - "bathroom sink" - ], - "common room": [ - "common room sofa", - "pool table", - "common room table" - ], - "man's bathroom": [ - "shower", - "bathroom sink", - "toilet" - ] - }, - "The Willows Market and Pharmacy": { - "store": [ - "grocery store shelf", - "behind the grocery counter", - "grocery store counter", - "pharmacy store shelf", - "pharmacy store counter", - "behind the pharmacy counter" - ] - }, - "Harvey Oak Supply Store": { - "supply store": [ - "supply store product shelf", - "behind the supply store counter", - "supply store counter" - ] - }, - "Johnson Park": { - "park": [ - "park garden" - ] - }, - "The Rose and Crown Pub": { - "pub": [ - "shelf", - "refrigerator", - "bar customer seating", - "behind the bar counter", - "kitchen sink", - "cooking area", - "microphone" - ] - }, - "Hobbs Cafe": { - "cafe": [ - "refrigerator", - "cafe customer seating", - "cooking area", - "kitchen sink", - "behind the cafe counter", - "piano" - ] - } - } -} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/reverie/meta.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/reverie/meta.json deleted file mode 100644 index 1e81ec12d2..0000000000 --- a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/reverie/meta.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "fork_sim_code": "base_the_ville_isabella_maria_klaus", - "start_date": "February 13, 2023", - "curr_time": "February 13, 2023, 00:00:00", - "sec_per_step": 10, - "maze_name": "the_ville", - "persona_names": [ - "Isabella Rodriguez", - "Maria Lopez", - "Klaus Mueller" - ], - "step": 0 -} \ No newline at end of file From 56e4f285a508de768d984ef057b0fb8b2abfa2a3 Mon Sep 17 00:00:00 2001 From: Jonathan Granda Acaro Date: Wed, 7 Aug 2024 13:55:46 -0400 Subject: [PATCH 04/21] updating .gitignore to not send the utils.py file to the remote branch --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 951aec0fc5..44be01dd81 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ # OpenAI API key openai_config.json -reverie/backend_server/utils.py +./reverie/backend_server/utils.py environment/frontend_server/storage/* environment/frontend_server/temp_storage*/* From f498f8ada7d2b78308e99fa4db1713eb26027991 Mon Sep 17 00:00:00 2001 From: Jonathan Granda Acaro Date: Wed, 7 Aug 2024 17:39:16 -0400 Subject: [PATCH 05/21] Setting up spawning locations for agents and personasn for the search and rescue scenario --- .../personas/Klaus Mueller/bootstrap_memory/scratch.json | 2 +- .../personas/Klaus Mueller/bootstrap_memory/scratch.json | 2 +- reverie/backend_server/test.py | 4 ++-- reverie/backend_server/{utils..py => utils.py} | 0 run_backend.sh | 2 +- run_backend_automatic.sh | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) rename reverie/backend_server/{utils..py => utils.py} (100%) diff --git a/environment/frontend_server/storage/base_the_ville_smol_elections_5_voters/personas/Klaus Mueller/bootstrap_memory/scratch.json b/environment/frontend_server/storage/base_the_ville_smol_elections_5_voters/personas/Klaus Mueller/bootstrap_memory/scratch.json index 204d18a1fa..50666d6853 100644 --- a/environment/frontend_server/storage/base_the_ville_smol_elections_5_voters/personas/Klaus Mueller/bootstrap_memory/scratch.json +++ b/environment/frontend_server/storage/base_the_ville_smol_elections_5_voters/personas/Klaus Mueller/bootstrap_memory/scratch.json @@ -11,7 +11,7 @@ "age": 20, "innate": "kind, inquisitive, passionate", "learned": "Klaus Mueller is a student at Oak Hill College studying sociology. He is passionate about social justice and loves to explore different perspectives.", - "currently": "Klaus Mueller is writing a research paper on the effects of gentrification in low-income communities. Klauss is thinking about whom to vote for in the upcoming town election. He knows that Adam Smith and Isabella Rodriguez are running for the office of town mayor. The election takes place next week.", + "currently": "Klaus Mueller is writing a research paper on the effects of gentrification in low-income communities. Klaus is thinking about whom to vote for in the upcoming town election. He knows that Adam Smith and Isabella Rodriguez are running for the office of town mayor. The election takes place next week.", "lifestyle": "Klaus Mueller goes to bed around 11pm, awakes up around 7am, eats dinner around 5pm.", "living_area": "the Ville:Dorm for Oak Hill College:Klaus Mueller's room", "concept_forget": 100, diff --git a/environment/frontend_server/storage/base_the_ville_smol_elections_5_voters_swapped_personalities/personas/Klaus Mueller/bootstrap_memory/scratch.json b/environment/frontend_server/storage/base_the_ville_smol_elections_5_voters_swapped_personalities/personas/Klaus Mueller/bootstrap_memory/scratch.json index 204d18a1fa..50666d6853 100644 --- a/environment/frontend_server/storage/base_the_ville_smol_elections_5_voters_swapped_personalities/personas/Klaus Mueller/bootstrap_memory/scratch.json +++ b/environment/frontend_server/storage/base_the_ville_smol_elections_5_voters_swapped_personalities/personas/Klaus Mueller/bootstrap_memory/scratch.json @@ -11,7 +11,7 @@ "age": 20, "innate": "kind, inquisitive, passionate", "learned": "Klaus Mueller is a student at Oak Hill College studying sociology. He is passionate about social justice and loves to explore different perspectives.", - "currently": "Klaus Mueller is writing a research paper on the effects of gentrification in low-income communities. Klauss is thinking about whom to vote for in the upcoming town election. He knows that Adam Smith and Isabella Rodriguez are running for the office of town mayor. The election takes place next week.", + "currently": "Klaus Mueller is writing a research paper on the effects of gentrification in low-income communities. Klaus is thinking about whom to vote for in the upcoming town election. He knows that Adam Smith and Isabella Rodriguez are running for the office of town mayor. The election takes place next week.", "lifestyle": "Klaus Mueller goes to bed around 11pm, awakes up around 7am, eats dinner around 5pm.", "living_area": "the Ville:Dorm for Oak Hill College:Klaus Mueller's room", "concept_forget": 100, diff --git a/reverie/backend_server/test.py b/reverie/backend_server/test.py index f1ac176bff..c5fbc11d23 100644 --- a/reverie/backend_server/test.py +++ b/reverie/backend_server/test.py @@ -7,11 +7,11 @@ import json import random from openai import OpenAI - +from utils import * client = OpenAI(api_key=openai_api_key) import time -from utils import * + def ChatGPT_request(prompt): """ diff --git a/reverie/backend_server/utils..py b/reverie/backend_server/utils.py similarity index 100% rename from reverie/backend_server/utils..py rename to reverie/backend_server/utils.py diff --git a/run_backend.sh b/run_backend.sh index 61dfc8be86..0dfc135ab0 100755 --- a/run_backend.sh +++ b/run_backend.sh @@ -7,7 +7,7 @@ LOGS_PATH="../../logs" echo "Running backend server at: http://127.0.0.1:8000/simulator_home" cd ${BACKEND_SCRIPT_PATH} -source /home/${USER}/anaconda3/bin/activate ${CONDA_ENV} +source /home/${USER}/miniconda3/bin/activate ${CONDA_ENV} timestamp=$(date +"%Y-%m-%d_%H-%M-%S") echo "Timestamp: ${timestamp}" diff --git a/run_backend_automatic.sh b/run_backend_automatic.sh index 513ef637ae..97b6d1b901 100755 --- a/run_backend_automatic.sh +++ b/run_backend_automatic.sh @@ -7,7 +7,7 @@ LOGS_PATH="../../logs" FILE_NAME="Bash-Script" cd ${BACKEND_SCRIPT_PATH} -source /home/${USER}/anaconda3/bin/activate ${CONDA_ENV} +source /home/${USER}/miniconda3/bin/activate ${CONDA_ENV} ARGS="" while [[ $# -gt 0 ]]; do From 7c8d7be9d5d4c961e0bae0887edf5a00f4dda45f Mon Sep 17 00:00:00 2001 From: Henry Lemersal Date: Sun, 22 Sep 2024 15:58:52 -0400 Subject: [PATCH 06/21] Initial branch commit --- .../temp_storage/curr_sim_code.json | 4 +-- requirements.txt | 32 ++++++++++++------- 2 files changed, 23 insertions(+), 13 deletions(-) diff --git a/environment/frontend_server/temp_storage/curr_sim_code.json b/environment/frontend_server/temp_storage/curr_sim_code.json index 310d4314bc..6ada21177c 100644 --- a/environment/frontend_server/temp_storage/curr_sim_code.json +++ b/environment/frontend_server/temp_storage/curr_sim_code.json @@ -1,3 +1,3 @@ { - "sim_code": "33" -} + "sim_code": "test-simulation-s-0-0-200" +} \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 87183ed63c..672610deda 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,12 +1,13 @@ aiohttp==3.8.3 aiosignal==1.3.1 -annotated-types==0.7.0 +annotated-types==0.6.0 anyio==4.2.0 asgiref==3.5.2 +astroid==3.2.2 async-generator==1.10 async-timeout==4.0.2 attrs==22.2.0 -boto==2.49.0 +#boto3==1.29.43 botocore==1.29.43 certifi==2021.10.8 cfgv==3.4.0 @@ -17,7 +18,8 @@ contourpy==1.1.0 cssbeautifier==1.14.11 cycler==0.11.0 dataclasses-json==0.5.14 -diskcache==5.6.3 +dill==0.3.8 +#diskcache==5.6.3 distlib==0.3.8 distro==1.9.0 dj-database-url==0.5.0 @@ -42,6 +44,8 @@ identify==2.5.33 idna==3.3 importlib-metadata==4.8.2 importlib-resources==6.0.1 +isort==5.13.2 +jiter==0.5.0 jmespath==1.0.1 joblib==1.3.2 jsbeautifier==1.14.11 @@ -49,16 +53,17 @@ json5==0.9.14 kiwisolver==1.4.4 langchain==0.0.273 langsmith==0.0.41 -llama_cpp_python==0.2.11 +#llama_cpp_python==0.2.11 marshmallow==3.20.1 matplotlib==3.7.2 +mccabe==0.7.0 multidict==6.0.4 mypy-extensions==1.0.0 nltk==3.6.5 nodeenv==1.8.0 -numexpr==2.8.7 -numpy==1.25.2 -openai==1.13.3 +#numexpr==2.8.7 +#numpy==1.25.2 +openai==1.41.1 openai-cost-logger==0.4.1 outcome==1.2.0 packaging==23.0 @@ -66,11 +71,13 @@ pandas==2.0.3 pathspec==0.12.1 patsy==0.5.3 Pillow==8.4.0 +pip==24.0 platformdirs==4.1.0 psycopg2-binary==2.9.5 pycparser==2.21 -pydantic==1.10.8 -pydantic_core==2.20.1 +pydantic==2.5.3 +pydantic_core==2.14.6 +pylint==3.2.2 pyparsing==3.0.6 PySocks==1.7.1 python-dateutil==2.8.2 @@ -81,9 +88,10 @@ requests==2.26.0 s3transfer==0.6.0 scikit-learn==1.3.0 scikit-posthocs==0.7.0 -scipy==1.11.1 +#scipy==1.11.1 seaborn==0.12.2 selenium==4.8.2 +setuptools==69.5.1 six==1.16.0 sklearn==0.0 smart-open==5.2.1 @@ -95,15 +103,17 @@ statsmodels==0.13.5 tenacity==8.2.3 threadpoolctl==3.0.0 tomli==2.0.1 +tomlkit==0.11.1 tqdm==4.62.3 trio==0.22.0 trio-websocket==0.9.2 trueskill==0.4.5 -typing-inspect==0.9.0 typing_extensions==4.11.0 +typing-inspect==0.9.0 tzdata==2023.3 urllib3==1.26.7 virtualenv==20.25.0 +wheel>=0.41.2 wsproto==1.2.0 yarl==1.8.2 yellowbrick==1.5 From 68d216069c856c299a70ca493339c59c47560017 Mon Sep 17 00:00:00 2001 From: Henry Lemersal Date: Thu, 26 Sep 2024 15:39:07 -0400 Subject: [PATCH 07/21] Initial commit for henry branch --- README.md | 12 ++-- environment/frontend_server/manage.py | 1 - environment/frontend_server/requirements.txt | 68 ------------------- .../matrix/special_blocks/arena_blocks.csv | 2 +- .../special_blocks/game_object_blocks.csv | 6 +- .../matrix/special_blocks/sector_blocks.csv | 2 +- .../temp_storage/curr_sim_code.json | 2 +- nlp/openai_convo_summary.py | 2 +- reverie/backend_server/survey.ipynb | 2 +- 9 files changed, 14 insertions(+), 83 deletions(-) delete mode 100644 environment/frontend_server/requirements.txt diff --git a/README.md b/README.md index 5752d0aee2..d1f639cb0d 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ Azure example: ```json { "client": "azure", - "model": "gpt-35-turbo-0125", + "model": "gpt-4-turbo", "model-key": "", "model-endpoint": "", "model-api-version": "", @@ -59,7 +59,7 @@ OpenAI example: ```json { "client": "openai", - "model": "gpt-3.5-turbo-0125", + "model": "gpt-4-turbo", "model-key": "", "model-costs": { "input": 0.5, @@ -151,7 +151,7 @@ See all the details of your expenses using the notebook "[cost_viz.ipynb](https: ### 1. base_the_ville_isabella_maria_klaus -- **Model**: "gpt-3.5-turbo-0125" +- **Model**: "gpt-4-turbo" - **Embeddings**: "text-embedding-3-small" - **N. Agents**: 3 - **Steps**: ~5000 @@ -160,7 +160,7 @@ See all the details of your expenses using the notebook "[cost_viz.ipynb](https: ### 2. base_the_ville_n25 - See the simulation saved: [skip-morning-s-14](https://github.com/drudilorenzo/generative_agents/tree/fix-and-improve/environment/frontend_server/storage/skip-morning-s-14) -- **Model**: "gpt-3.5-turbo-0125" +- **Model**: "gpt-4-turbo" - **Embeddings**: "text-embedding-3-small" - **N. Agents**: 25 - **Steps**: ~3000 (until ~8 a.m.) @@ -168,8 +168,8 @@ See all the details of your expenses using the notebook "[cost_viz.ipynb](https: ### 3. base_the_ville_n25 -- **Model**: "gpt-3.5-turbo-0125" +- **Model**: "gpt-4-turbo" - **Embeddings**: "text-embedding-3-small" -- **N. Agents**: 25 +- **N. Agents**: 25 - **Steps**: ~8650 (full day) - **Final Cost**: ~18.5 USD diff --git a/environment/frontend_server/manage.py b/environment/frontend_server/manage.py index 30edeba553..dd9d9e68a3 100644 --- a/environment/frontend_server/manage.py +++ b/environment/frontend_server/manage.py @@ -3,7 +3,6 @@ import os import sys - def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'frontend_server.settings') try: diff --git a/environment/frontend_server/requirements.txt b/environment/frontend_server/requirements.txt deleted file mode 100644 index c7679c58b5..0000000000 --- a/environment/frontend_server/requirements.txt +++ /dev/null @@ -1,68 +0,0 @@ -aiohttp==3.8.3 -aiosignal==1.3.1 -asgiref==3.5.2 -async-generator==1.10 -async-timeout==4.0.2 -attrs==22.2.0 -boto==2.49.0 -botocore==1.29.43 -certifi==2021.10.8 -charset-normalizer==2.0.12 -click==8.0.3 -cycler==0.11.0 -dj-database-url==0.5.0 -Django==2.2 -django-cors-headers==2.5.3 -django-storages-redux==1.3.3 -exceptiongroup==1.1.0 -frozenlist==1.3.3 -gensim==3.8.0 -gunicorn==20.1.0 -h11==0.14.0 -idna==3.3 -importlib-metadata==4.8.2 -jmespath==1.0.1 -joblib==1.1.0 -kiwisolver==1.3.1 -matplotlib==3.3.4 -multidict==6.0.4 -nltk==3.6.5 -numpy==1.19.5 -openai==0.27.0 -outcome==1.2.0 -packaging==23.0 -pandas==1.1.5 -patsy==0.5.3 -Pillow==8.4.0 -psycopg2-binary==2.9.5 -pycparser==2.21 -pyparsing==3.0.6 -PySocks==1.7.1 -python-dateutil==2.8.2 -pytz==2021.3 -regex==2021.11.10 -requests==2.26.0 -s3transfer==0.6.0 -scikit-learn==0.24.2 -scikit-posthocs==0.7.0 -scipy==1.5.4 -seaborn==0.12.2 -selenium==4.8.2 -six==1.16.0 -sklearn==0.0 -smart-open==5.2.1 -sniffio==1.3.0 -sortedcontainers==2.4.0 -sqlparse==0.4.3 -statsmodels==0.13.5 -threadpoolctl==3.0.0 -tqdm==4.62.3 -trio==0.22.0 -trio-websocket==0.9.2 -trueskill==0.4.5 -typing-extensions==4.0.0 -urllib3==1.26.7 -wsproto==1.2.0 -yarl==1.8.2 -yellowbrick==1.3.post1 -zipp==3.6.0 diff --git a/environment/frontend_server/static_dirs/assets/the_ville/matrix/special_blocks/arena_blocks.csv b/environment/frontend_server/static_dirs/assets/the_ville/matrix/special_blocks/arena_blocks.csv index c92e0c6ab2..6b02781cde 100644 --- a/environment/frontend_server/static_dirs/assets/the_ville/matrix/special_blocks/arena_blocks.csv +++ b/environment/frontend_server/static_dirs/assets/the_ville/matrix/special_blocks/arena_blocks.csv @@ -26,7 +26,7 @@ 32191, the Ville, Oak Hill College, library 32201, the Ville, Oak Hill College, hallway 32142, the Ville, Johnson Park, park -32152, the Ville, Harvey Oak Supply Store, supply store +32152, the Ville, Fire station, fire station 32162, the Ville, The Willows Market and Pharmacy, store 32193, the Ville, Adam Smith's house, main room 32203, the Ville, Adam Smith's house, bathroom diff --git a/environment/frontend_server/static_dirs/assets/the_ville/matrix/special_blocks/game_object_blocks.csv b/environment/frontend_server/static_dirs/assets/the_ville/matrix/special_blocks/game_object_blocks.csv index 4afc74b7a8..36c3f0ed65 100644 --- a/environment/frontend_server/static_dirs/assets/the_ville/matrix/special_blocks/game_object_blocks.csv +++ b/environment/frontend_server/static_dirs/assets/the_ville/matrix/special_blocks/game_object_blocks.csv @@ -34,9 +34,9 @@ 32241, the Ville, , grocery store shelf 32251, the Ville, , pharmacy store counter 32261, the Ville, , grocery store counter -32271, the Ville, , supply store product shelf -32281, the Ville, , behind the supply store counter -32212, the Ville, , supply store counter +32271, the Ville, , fire truck +32281, the Ville, , bunks +32212, the Ville, , common area 32222, the Ville, , dorm garden 32232, the Ville, , house garden 32242, the Ville, , garden chair diff --git a/environment/frontend_server/static_dirs/assets/the_ville/matrix/special_blocks/sector_blocks.csv b/environment/frontend_server/static_dirs/assets/the_ville/matrix/special_blocks/sector_blocks.csv index ba09c4c353..87d9f5852e 100644 --- a/environment/frontend_server/static_dirs/assets/the_ville/matrix/special_blocks/sector_blocks.csv +++ b/environment/frontend_server/static_dirs/assets/the_ville/matrix/special_blocks/sector_blocks.csv @@ -8,7 +8,7 @@ 32136, the Ville, Hobbs Cafe 32146, the Ville, Oak Hill College 32156, the Ville, Johnson Park -32166, the Ville, Harvey Oak Supply Store +32166, the Ville, Fire station 32176, the Ville, The Willows Market and Pharmacy 32186, the Ville, Adam Smith's house 32196, the Ville, Yuriko Yamamoto's house diff --git a/environment/frontend_server/temp_storage/curr_sim_code.json b/environment/frontend_server/temp_storage/curr_sim_code.json index 6ada21177c..418697ec24 100644 --- a/environment/frontend_server/temp_storage/curr_sim_code.json +++ b/environment/frontend_server/temp_storage/curr_sim_code.json @@ -1,3 +1,3 @@ { - "sim_code": "test-simulation-s-0-0-200" + "sim_code": "test_1-s-1-3-4" } \ No newline at end of file diff --git a/nlp/openai_convo_summary.py b/nlp/openai_convo_summary.py index c051514385..ad9d45fbf0 100644 --- a/nlp/openai_convo_summary.py +++ b/nlp/openai_convo_summary.py @@ -34,7 +34,7 @@ def main(): print(file_chunks[0]) response = client.chat.completions.create( - model="gpt-4", + model="gpt-4-turbo", messages=[ { "role": "system", diff --git a/reverie/backend_server/survey.ipynb b/reverie/backend_server/survey.ipynb index 209f209d74..3dc4928ff1 100644 --- a/reverie/backend_server/survey.ipynb +++ b/reverie/backend_server/survey.ipynb @@ -277,7 +277,7 @@ " with open(path, \"rb\") as f:\n", " return pickle.load(f)\n", "\n", - "gpt_param = {\"engine\": \"gpt-35-turbo-0125\", \"max_tokens\": 250, \n", + "gpt_param = {\"engine\": \"gpt-4-turbo\", \"max_tokens\": 250, \n", " \"temperature\": 0, \"top_p\": 1, \"stream\": False,\n", " \"frequency_penalty\": 0, \"presence_penalty\": 0, \"stop\": None}\n", "\n", From 9d947c74abe2410314e5c3f4618e066001423d9d Mon Sep 17 00:00:00 2001 From: Jonathan Granda Acaro Date: Fri, 27 Sep 2024 15:26:28 -0400 Subject: [PATCH 08/21] Pulled Connor's branch, updated to gpt-4o-mini, added type hinting to run_gpt_prompt.py using List[str] throught the file, installed the new requirements.txt, added new gpt-key --- .../frontend_server/temp_storage/curr_sim_code.json | 4 ++-- nlp/openai_convo_summary.py | 2 +- output.txt | 7 +++++++ .../persona/prompt_template/run_gpt_prompt.py | 9 +++++---- reverie/backend_server/test.py | 2 +- 5 files changed, 16 insertions(+), 8 deletions(-) create mode 100644 output.txt diff --git a/environment/frontend_server/temp_storage/curr_sim_code.json b/environment/frontend_server/temp_storage/curr_sim_code.json index 310d4314bc..02cf0fbc6a 100644 --- a/environment/frontend_server/temp_storage/curr_sim_code.json +++ b/environment/frontend_server/temp_storage/curr_sim_code.json @@ -1,3 +1,3 @@ { - "sim_code": "33" -} + "sim_code": "ssar-10-s-0-0-200" +} \ No newline at end of file diff --git a/nlp/openai_convo_summary.py b/nlp/openai_convo_summary.py index c051514385..76226232c8 100644 --- a/nlp/openai_convo_summary.py +++ b/nlp/openai_convo_summary.py @@ -34,7 +34,7 @@ def main(): print(file_chunks[0]) response = client.chat.completions.create( - model="gpt-4", + model="gpt-4o-mini", messages=[ { "role": "system", diff --git a/output.txt b/output.txt new file mode 100644 index 0000000000..3e3298f424 --- /dev/null +++ b/output.txt @@ -0,0 +1,7 @@ +Work sessions 9/27/24 +Changes +- pulled Connor's branch +- updated to gpt-4o-mini +- added type hinting to run_gpt_prompt.py using List[str] rather than list[str] and other places in there +- installed the new requirement.txt +- added new gpt-key \ No newline at end of file diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index 9448710a8e..97b87a742e 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -18,6 +18,7 @@ from typing import Tuple import traceback from pydantic import BaseModel +from typing import List sys.path.append('../../') @@ -113,7 +114,7 @@ def get_fail_safe(): class DailyPlan(BaseModel): - daily_plan: list[str] + daily_plan: List[str] def run_gpt_prompt_daily_plan(persona, wake_up_hour, @@ -191,7 +192,7 @@ class Activity(BaseModel): activity: str class HourlySchedule(BaseModel): - hourly_schedule: list[Activity] + hourly_schedule: List[Activity] def run_gpt_prompt_generate_hourly_schedule( persona, @@ -337,7 +338,7 @@ class Subtask(BaseModel): minutes_left: int class TaskDecomposition(BaseModel): - subtasks: list[Subtask] + subtasks: List[Subtask] def run_gpt_prompt_task_decomp(persona, task, @@ -1117,7 +1118,7 @@ class NewActivity(BaseModel): subtask: str class NewSchedule(BaseModel): - schedule: list[NewActivity] + schedule: List[NewActivity] def run_gpt_prompt_new_decomp_schedule(persona, main_act_dur, diff --git a/reverie/backend_server/test.py b/reverie/backend_server/test.py index c5fbc11d23..239c4dadda 100644 --- a/reverie/backend_server/test.py +++ b/reverie/backend_server/test.py @@ -27,7 +27,7 @@ def ChatGPT_request(prompt): """ # temp_sleep() try: - completion = client.chat.completions.create(model="gpt-4-0125-preview", + completion = client.chat.completions.create(model="gpt-4o-mini", messages=[{"role": "user", "content": prompt}]) return completion.choices[0].message.content From 53c6667c2d5efd8fdeb3c8ff6453daadf45b0aa1 Mon Sep 17 00:00:00 2001 From: Jonathan Granda Acaro Date: Fri, 27 Sep 2024 17:50:57 -0400 Subject: [PATCH 09/21] Initial test simulation with modifications to Connor's branch --- environment/frontend_server/temp_storage/curr_sim_code.json | 2 +- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment/frontend_server/temp_storage/curr_sim_code.json b/environment/frontend_server/temp_storage/curr_sim_code.json index 02cf0fbc6a..ee100cbabc 100644 --- a/environment/frontend_server/temp_storage/curr_sim_code.json +++ b/environment/frontend_server/temp_storage/curr_sim_code.json @@ -1,3 +1,3 @@ { - "sim_code": "ssar-10-s-0-0-200" + "sim_code": "ssar-12-s-7-1399-1440" } \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index e09c5d5a32..d421a7de92 100644 --- a/requirements.txt +++ b/requirements.txt @@ -76,7 +76,7 @@ platformdirs==4.1.0 psycopg2-binary==2.9.5 pycparser==2.21 pydantic==2.5.3 -pydantic_core==2.18.2 +pydantic_core==2.14.6 pylint==3.2.2 pyparsing==3.0.6 PySocks==1.7.1 From fc553563b2436e2aa42f03e98094f06799642eac Mon Sep 17 00:00:00 2001 From: Henry Lemersal Date: Thu, 10 Oct 2024 00:09:10 -0400 Subject: [PATCH 10/21] convo parse fixed --- README.md | 10 +++--- .../temp_storage/curr_sim_code.json | 2 +- nlp/openai_convo_summary.py | 2 +- print_conversations.py | 36 ++++++++++++++----- requirements.txt | 15 ++++---- .../persona/prompt_template/gpt_structure.py | 1 + reverie/backend_server/survey.ipynb | 2 +- 7 files changed, 45 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index 7212d48046..6fd881d077 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ Azure example: ```json { "client": "azure", - "model": "gpt-4-turbo", + "model": "gpt-4o-mini", "model-key": "", "model-endpoint": "", "model-api-version": "", @@ -59,7 +59,7 @@ OpenAI example: ```json { "client": "openai", - "model": "gpt-4-turbo", + "model": "gpt-4o-mini", "model-key": "", "model-costs": { "input": 0.5, @@ -153,7 +153,7 @@ See all the details of your expenses using the notebook "[cost_viz.ipynb](https: ### 1. base_the_ville_isabella_maria_klaus -- **Model**: "gpt-4-turbo" +- **Model**: "gpt-4o-mini" - **Embeddings**: "text-embedding-3-small" - **N. Agents**: 3 - **Steps**: ~5000 @@ -162,7 +162,7 @@ See all the details of your expenses using the notebook "[cost_viz.ipynb](https: ### 2. base_the_ville_n25 - See the simulation saved: [skip-morning-s-14](https://github.com/drudilorenzo/generative_agents/tree/fix-and-improve/environment/frontend_server/storage/skip-morning-s-14) -- **Model**: "gpt-4-turbo" +- **Model**: "gpt-4o-mini" - **Embeddings**: "text-embedding-3-small" - **N. Agents**: 25 - **Steps**: ~3000 (until ~8 a.m.) @@ -170,7 +170,7 @@ See all the details of your expenses using the notebook "[cost_viz.ipynb](https: ### 3. base_the_ville_n25 -- **Model**: "gpt-4-turbo" +- **Model**: "gpt-4o-mini" - **Embeddings**: "text-embedding-3-small" - **N. Agents**: 25 - **Steps**: ~8650 (full day) diff --git a/environment/frontend_server/temp_storage/curr_sim_code.json b/environment/frontend_server/temp_storage/curr_sim_code.json index 418697ec24..cec10e0c16 100644 --- a/environment/frontend_server/temp_storage/curr_sim_code.json +++ b/environment/frontend_server/temp_storage/curr_sim_code.json @@ -1,3 +1,3 @@ { - "sim_code": "test_1-s-1-3-4" + "sim_code": "ssar-32-s-0-0-200" } \ No newline at end of file diff --git a/nlp/openai_convo_summary.py b/nlp/openai_convo_summary.py index ad9d45fbf0..76226232c8 100644 --- a/nlp/openai_convo_summary.py +++ b/nlp/openai_convo_summary.py @@ -34,7 +34,7 @@ def main(): print(file_chunks[0]) response = client.chat.completions.create( - model="gpt-4-turbo", + model="gpt-4o-mini", messages=[ { "role": "system", diff --git a/print_conversations.py b/print_conversations.py index ff7cf382d3..eb46405550 100644 --- a/print_conversations.py +++ b/print_conversations.py @@ -1,31 +1,35 @@ import os +import glob +import os import sys import json - def get_unique_conversations(simulation_name): - step_folder = f"environment/frontend_server/storage/{simulation_name}/movement" + step_folder = "environment/frontend_server/storage" + + # Use glob to find all files that start with the simulation_name + search_pattern = os.path.join(step_folder, f"{simulation_name}*/movement/*") + filepaths = glob.glob(search_pattern) observed_conversations = set() file_contents = [] - # Iterate over all files in the simulation folder - for filename in os.listdir(step_folder): - filepath = os.path.join(step_folder, filename) - + # Iterate over all matching files + for filepath in filepaths: with open(filepath, "r") as file: data = json.load(file) - personas = data["persona"] + personas = data.get("persona", {}) # Loop over all personas except one, since conversations are always # between two people for name in list(personas.keys())[:-1]: persona = personas[name] - chat = persona["chat"] + chat = persona.get("chat") if chat: chat_string = str(chat) + # Add unique conversations if chat_string not in observed_conversations: observed_conversations.add(chat_string) file_contents.append(data) @@ -33,6 +37,15 @@ def get_unique_conversations(simulation_name): return file_contents +def write_conversations_to_file(conversations, simulation_name): + output_directory = "logs/conversations" + if not os.path.exists(output_directory): + os.makedirs(output_directory) + output_filename = f"{simulation_name}_highlights.json" + full_path = os.path.join(output_directory, output_filename); + with open(full_path, "w") as file: + for conversation in conversations: + json.dump(conversation, file, indent=4) if __name__ == "__main__": if len(sys.argv) < 2: @@ -41,4 +54,9 @@ def get_unique_conversations(simulation_name): simulation_name = sys.argv[1] unique_conversations = get_unique_conversations(simulation_name) - print(json.dumps(unique_conversations, indent=2)) + + if unique_conversations: + write_conversations_to_file(unique_conversations, simulation_name) + print(f"Unique conversations written to {simulation_name}_highlights.txt") + else: + print("No unique conversations found.") \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 6a4b532aec..c989c0789a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ astroid==3.2.2 async-generator==1.10 async-timeout==4.0.2 attrs==22.2.0 -#boto3==1.29.43 +boto==2.49.0 botocore==1.29.43 certifi==2021.10.8 cfgv==3.4.0 @@ -18,6 +18,7 @@ contourpy==1.1.0 cssbeautifier==1.14.11 cycler==0.11.0 dataclasses-json==0.5.14 +dill==0.3.8 diskcache==5.6.3 distlib==0.3.8 distro==1.9.0 @@ -52,7 +53,7 @@ json5==0.9.14 kiwisolver==1.4.4 langchain==0.0.273 langsmith==0.0.41 -#llama_cpp_python==0.2.11 +llama_cpp_python==0.2.11 marshmallow==3.20.1 matplotlib==3.7.2 mccabe==0.7.0 @@ -62,7 +63,7 @@ nltk==3.6.5 nodeenv==1.8.0 numexpr==2.8.7 numpy==1.25.2 -openai==1.13.3 +openai==1.41.1 openai-cost-logger==0.4.1 outcome==1.2.0 packaging==23.0 @@ -74,8 +75,9 @@ pip==24.0 platformdirs==4.1.0 psycopg2-binary==2.9.5 pycparser==2.21 -pydantic==1.10.8 -pydantic_core==2.20.1 +#pydantic==2.5.3 +#pydantic_core==2.18.2 +pylint==3.2.2 pyparsing==3.0.6 PySocks==1.7.1 python-dateutil==2.8.2 @@ -86,7 +88,7 @@ requests==2.26.0 s3transfer==0.6.0 scikit-learn==1.3.0 scikit-posthocs==0.7.0 -#scipy==1.11.1 +scipy==1.11.1 seaborn==0.12.2 selenium==4.8.2 setuptools==69.5.1 @@ -111,6 +113,7 @@ typing-inspect==0.9.0 tzdata==2023.3 urllib3==1.26.7 virtualenv==20.25.0 +wheel==0.43.0 wsproto==1.2.0 yarl==1.8.2 yellowbrick==1.5 diff --git a/reverie/backend_server/persona/prompt_template/gpt_structure.py b/reverie/backend_server/persona/prompt_template/gpt_structure.py index dea98569e3..6515e71e11 100644 --- a/reverie/backend_server/persona/prompt_template/gpt_structure.py +++ b/reverie/backend_server/persona/prompt_template/gpt_structure.py @@ -266,6 +266,7 @@ def ChatGPT_safe_generate_response( curr_gpt_response = chatgpt_response.strip() end_index = curr_gpt_response.rfind("}") + 1 curr_gpt_response = curr_gpt_response[:end_index] + print(curr_gpt_response) curr_gpt_response = json.loads(curr_gpt_response)["output"] if verbose: diff --git a/reverie/backend_server/survey.ipynb b/reverie/backend_server/survey.ipynb index 3dc4928ff1..6255ad8e5e 100644 --- a/reverie/backend_server/survey.ipynb +++ b/reverie/backend_server/survey.ipynb @@ -277,7 +277,7 @@ " with open(path, \"rb\") as f:\n", " return pickle.load(f)\n", "\n", - "gpt_param = {\"engine\": \"gpt-4-turbo\", \"max_tokens\": 250, \n", + "gpt_param = {\"engine\": \"gpt-4o-mini\", \"max_tokens\": 250, \n", " \"temperature\": 0, \"top_p\": 1, \"stream\": False,\n", " \"frequency_penalty\": 0, \"presence_penalty\": 0, \"stop\": None}\n", "\n", From 24629e9b9c0edfc6f249a251137a9eef87d71603 Mon Sep 17 00:00:00 2001 From: Henry Lemersal Date: Fri, 11 Oct 2024 14:21:47 -0400 Subject: [PATCH 11/21] Added structured_output outline --- .gitignore | 1 + .../temp_storage/curr_sim_code.json | 2 +- .../prompt_template/structured_output.py | 22 +++++++++++++++++++ 3 files changed, 24 insertions(+), 1 deletion(-) create mode 100644 reverie/backend_server/persona/prompt_template/structured_output.py diff --git a/.gitignore b/.gitignore index afa589f2a9..2028544833 100644 --- a/.gitignore +++ b/.gitignore @@ -126,3 +126,4 @@ ENV/ .ropeproject .history/ +environment/frontend_server/temp_storage/curr_sim_code.json diff --git a/environment/frontend_server/temp_storage/curr_sim_code.json b/environment/frontend_server/temp_storage/curr_sim_code.json index cec10e0c16..590e468ebd 100644 --- a/environment/frontend_server/temp_storage/curr_sim_code.json +++ b/environment/frontend_server/temp_storage/curr_sim_code.json @@ -1,3 +1,3 @@ { - "sim_code": "ssar-32-s-0-0-200" + "sim_code": "ssar-47-s-0-0-200" } \ No newline at end of file diff --git a/reverie/backend_server/persona/prompt_template/structured_output.py b/reverie/backend_server/persona/prompt_template/structured_output.py new file mode 100644 index 0000000000..dfc2757c71 --- /dev/null +++ b/reverie/backend_server/persona/prompt_template/structured_output.py @@ -0,0 +1,22 @@ +from pydantic import BaseModel +from openai import OpenAI + +client = OpenAI() + +class Movements(BaseModel): + x_pos: int + y_pos: int + message: str + +class Person(BaseModel): + name: str + actions: list[Movements] + +completion = client.beta.chat.completions.parse( + model="gpt-4o-2024-08-06", + messages=[ {"role": "system", "content": "You are overseeing a town of people interacting with one another, pulling data from what they see and do"}, + {"role": "user", "content": "Give the daily plan of these people"} + ], + response_format=Person,) + +math_reasoning = completion.choices[0].message.parsed \ No newline at end of file From c2a8c0dcb6f2da5ca9126ab38394d1cf1af99ee5 Mon Sep 17 00:00:00 2001 From: jgranda1999 Date: Fri, 11 Oct 2024 17:30:15 -0400 Subject: [PATCH 12/21] Replaced chatgpt_safe_generate_resposne with structured output version of it --- .../temp_storage/curr_sim_code.json | 2 +- .../persona/prompt_template/gpt_structure.py | 93 ++++++++++++++++--- test.py | 48 ++++++++++ 3 files changed, 131 insertions(+), 12 deletions(-) create mode 100644 test.py diff --git a/environment/frontend_server/temp_storage/curr_sim_code.json b/environment/frontend_server/temp_storage/curr_sim_code.json index ee100cbabc..04c2e7867c 100644 --- a/environment/frontend_server/temp_storage/curr_sim_code.json +++ b/environment/frontend_server/temp_storage/curr_sim_code.json @@ -1,3 +1,3 @@ { - "sim_code": "ssar-12-s-7-1399-1440" + "sim_code": "ssar-2-s-7-1399-1440" } \ No newline at end of file diff --git a/reverie/backend_server/persona/prompt_template/gpt_structure.py b/reverie/backend_server/persona/prompt_template/gpt_structure.py index dea98569e3..69e874f036 100644 --- a/reverie/backend_server/persona/prompt_template/gpt_structure.py +++ b/reverie/backend_server/persona/prompt_template/gpt_structure.py @@ -234,7 +234,6 @@ def ChatGPT_request(prompt): # return False - def ChatGPT_safe_generate_response( prompt, example_output, @@ -246,21 +245,45 @@ def ChatGPT_safe_generate_response( verbose=False, ): if func_validate and func_clean_up: - # prompt = 'GPT-3 Prompt:\n"""\n' + prompt + '\n"""\n' - prompt = '"""\n' + prompt + '\n"""\n' - prompt += ( - f"Output the response to the prompt above in json. {special_instruction}\n" - ) - prompt += "Example output json:\n" - prompt += '{"output": "' + str(example_output) + '"}' + # Constructing the new prompt using the structured output format + prompt_structure = { + "model": "gpt-4o-2024-08-06", + "messages": [ + { + "role": "system", + "content": special_instruction + }, + { + "role": "user", + "content": prompt + } + ], + "response_format": { + "type": "json_schema", + "json_schema": { + "name": "output_response", + "strict": True, + "schema": { + "type": "object", + "properties": { + "output": { + "type": "string" + } + }, + "required": ["output"], + "additionalProperties": False + } + } + } + } if verbose: - print("LLM PROMPT") - print(prompt) + print("LLM PROMPT STRUCTURE") + print(json.dumps(prompt_structure, indent=2)) for i in range(repeat): try: - chatgpt_response = ChatGPT_request(prompt) + chatgpt_response = ChatGPT_request(json.dumps(prompt_structure)) if not chatgpt_response: raise Exception("No valid response from LLM.") curr_gpt_response = chatgpt_response.strip() @@ -283,6 +306,54 @@ def ChatGPT_safe_generate_response( return fail_safe_response +# def ChatGPT_safe_generate_response( +# prompt, +# example_output, +# special_instruction, +# repeat=3, +# fail_safe_response="error", +# func_validate=None, +# func_clean_up=None, +# verbose=False, +# ): +# if func_validate and func_clean_up: +# # prompt = 'GPT-3 Prompt:\n"""\n' + prompt + '\n"""\n' +# prompt = '"""\n' + prompt + '\n"""\n' +# prompt += ( +# f"Output the response to the prompt above in json. {special_instruction}\n" +# ) +# prompt += "Example output json:\n" +# prompt += '{"output": "' + str(example_output) + '"}' + +# if verbose: +# print("LLM PROMPT") +# print(prompt) + +# for i in range(repeat): +# try: +# chatgpt_response = ChatGPT_request(prompt) +# if not chatgpt_response: +# raise Exception("No valid response from LLM.") +# curr_gpt_response = chatgpt_response.strip() +# end_index = curr_gpt_response.rfind("}") + 1 +# curr_gpt_response = curr_gpt_response[:end_index] +# curr_gpt_response = json.loads(curr_gpt_response)["output"] + +# if verbose: +# print("---- repeat count:", i) +# print("~~~~ curr_gpt_response:") +# print(curr_gpt_response) +# print("~~~~") + +# if func_validate(curr_gpt_response, prompt=prompt): +# return func_clean_up(curr_gpt_response, prompt=prompt) + +# except Exception as e: +# print("ERROR:", e) +# traceback.print_exc() + +# return fail_safe_response + def ChatGPT_safe_generate_response_OLD(prompt, repeat=3, diff --git a/test.py b/test.py new file mode 100644 index 0000000000..cb5cf91305 --- /dev/null +++ b/test.py @@ -0,0 +1,48 @@ +def ChatGPT_safe_generate_response( + prompt, + example_output, + special_instruction, + repeat=3, + fail_safe_response="error", + func_validate=None, + func_clean_up=None, + verbose=False, +): + + if func_validate and func_clean_up: + # prompt = 'GPT-3 Prompt:\n"""\n' + prompt + '\n"""\n' + prompt = '"""\n' + prompt + '\n"""\n' + prompt += ( + f"Output the response to the prompt above in json. {special_instruction}\n" + ) + prompt += "Example output json:\n" + prompt += '{"output": "' + str(example_output) + '"}' + + if verbose: + print("LLM PROMPT") + print(prompt) + + for i in range(repeat): + try: + chatgpt_response = ChatGPT_request(prompt) + if not chatgpt_response: + raise Exception("No valid response from LLM.") + curr_gpt_response = chatgpt_response.strip() + end_index = curr_gpt_response.rfind("}") + 1 + curr_gpt_response = curr_gpt_response[:end_index] + curr_gpt_response = json.loads(curr_gpt_response)["output"] + + if verbose: + print("---- repeat count:", i) + print("~~~~ curr_gpt_response:") + print(curr_gpt_response) + print("~~~~") + + if func_validate(curr_gpt_response, prompt=prompt): + return func_clean_up(curr_gpt_response, prompt=prompt) + + except Exception as e: + print("ERROR:", e) + traceback.print_exc() + + return fail_safe_response \ No newline at end of file From 2bf67d0c42dc04874ef55a63e7354f3d3c7db62d Mon Sep 17 00:00:00 2001 From: jgranda1999 Date: Fri, 11 Oct 2024 17:40:39 -0400 Subject: [PATCH 13/21] Added conversation parser --- convo_parser.py | 62 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 convo_parser.py diff --git a/convo_parser.py b/convo_parser.py new file mode 100644 index 0000000000..eb46405550 --- /dev/null +++ b/convo_parser.py @@ -0,0 +1,62 @@ +import os +import glob +import os +import sys +import json + +def get_unique_conversations(simulation_name): + step_folder = "environment/frontend_server/storage" + + # Use glob to find all files that start with the simulation_name + search_pattern = os.path.join(step_folder, f"{simulation_name}*/movement/*") + filepaths = glob.glob(search_pattern) + + observed_conversations = set() + file_contents = [] + + # Iterate over all matching files + for filepath in filepaths: + with open(filepath, "r") as file: + data = json.load(file) + personas = data.get("persona", {}) + + # Loop over all personas except one, since conversations are always + # between two people + for name in list(personas.keys())[:-1]: + persona = personas[name] + chat = persona.get("chat") + + if chat: + chat_string = str(chat) + + # Add unique conversations + if chat_string not in observed_conversations: + observed_conversations.add(chat_string) + file_contents.append(data) + break + + return file_contents + +def write_conversations_to_file(conversations, simulation_name): + output_directory = "logs/conversations" + if not os.path.exists(output_directory): + os.makedirs(output_directory) + output_filename = f"{simulation_name}_highlights.json" + full_path = os.path.join(output_directory, output_filename); + with open(full_path, "w") as file: + for conversation in conversations: + json.dump(conversation, file, indent=4) + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("Please provide the simulation name as a command line argument.") + sys.exit(1) + + simulation_name = sys.argv[1] + unique_conversations = get_unique_conversations(simulation_name) + + if unique_conversations: + write_conversations_to_file(unique_conversations, simulation_name) + print(f"Unique conversations written to {simulation_name}_highlights.txt") + else: + print("No unique conversations found.") \ No newline at end of file From 99cbf7faf8f679a818fbe85b05810224336647c6 Mon Sep 17 00:00:00 2001 From: denialj12 Date: Fri, 11 Oct 2024 17:57:04 -0400 Subject: [PATCH 14/21] Parsers convert simulations output into txt files --- print_all_sim.py | 59 ++++++++++++++++++++++++++++++++++++++++++ print_conversations.py | 56 ++++++++++++++++++++++----------------- 2 files changed, 91 insertions(+), 24 deletions(-) create mode 100644 print_all_sim.py diff --git a/print_all_sim.py b/print_all_sim.py new file mode 100644 index 0000000000..b0c0c71f05 --- /dev/null +++ b/print_all_sim.py @@ -0,0 +1,59 @@ +import os +import json +import re +import sys + +def get_unique_conversations(simulation_name): + sim_folder = os.path.join("environment", "frontend_server", "storage", "base_search_and_rescue") + + regex_name = re.compile(re.escape(simulation_name + '-')) + for file_name in os.listdir(sim_folder): + step=0 + output = [] + if regex_name.search(file_name): + step_folder = os.path.join(sim_folder, file_name, "movement") + for filename in os.listdir(step_folder): + filepath = os.path.join(step_folder, filename) + output.append(f"Step {str(step)}:") + try: + with open(filepath, "r") as file: + data = json.load(file) + for k, v in data.items(): + output.append(k) + if k == 'persona': + for key, value in v.items(): + output.append(f' {key}') + for attribute, val in value.items(): + if attribute != 'chat' or (attribute == 'chat' and val is None): + output.append(f' {attribute}: {val}') + else: + output.append(f' {attribute}:') + for convo in val: + output.append(f' {convo[0]}: {convo[1]}') + else: + for key, value in v.items(): + output.append(f' {key}: {value}') + output.append('\n') + except json.JSONDecodeError: + continue + except Exception as e: + print(f"Error processing file {filename}: {e}") + continue + step+=1 + + output_filename = os.path.join(sim_folder, file_name, f"output_0-{file_name.split('-')[5]}.txt", ) + with open(output_filename, "w") as output_file: + output_file.write('\n'.join(output)) + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("Please provide the simulation name as a command line argument.") + sys.exit(1) + + simulation_name = sys.argv[1] + ''' + unique_conversations = get_unique_conversations(simulation_name) + print(json.dumps(unique_conversations, indent=2)) + ''' + get_unique_conversations(simulation_name) diff --git a/print_conversations.py b/print_conversations.py index ff7cf382d3..77db0625fb 100644 --- a/print_conversations.py +++ b/print_conversations.py @@ -1,37 +1,42 @@ import os import sys import json +import re def get_unique_conversations(simulation_name): - step_folder = f"environment/frontend_server/storage/{simulation_name}/movement" - - observed_conversations = set() - file_contents = [] - + + step_folder = f"environment/frontend_server/storage/base_search_and_rescue/{simulation_name}/movement" + print(os.listdir()) # Iterate over all files in the simulation folder for filename in os.listdir(step_folder): filepath = os.path.join(step_folder, filename) + try: + with open(filepath, "r") as file: + data = json.load(file) + for k, v in data.items(): + print(k) + if(k=='persona'): + for key, value in v.items(): + print(f' {key}') + for attribute, val in value.items(): + if attribute!='chat' or attribute=='chat' and val is None: + print(f' {attribute}: {val}') + else: + print(f' {attribute}:') + for convo in val: + print(f' {convo[0]}: {convo[1]}') + + else: + for key,value in v.items(): + print(f' {key}: {value}') + print('\n') + except json.JSONDecodeError: + print("Failed to decode JSON. Please check the file format.") + except Exception as e: + print(f"An error occurred: {e}") + - with open(filepath, "r") as file: - data = json.load(file) - personas = data["persona"] - - # Loop over all personas except one, since conversations are always - # between two people - for name in list(personas.keys())[:-1]: - persona = personas[name] - chat = persona["chat"] - - if chat: - chat_string = str(chat) - - if chat_string not in observed_conversations: - observed_conversations.add(chat_string) - file_contents.append(data) - break - - return file_contents if __name__ == "__main__": @@ -40,5 +45,8 @@ def get_unique_conversations(simulation_name): sys.exit(1) simulation_name = sys.argv[1] + ''' unique_conversations = get_unique_conversations(simulation_name) print(json.dumps(unique_conversations, indent=2)) + ''' + get_unique_conversations(simulation_name) From b0ba4ac3cb57bef14a163c62daab09e7855a3f34 Mon Sep 17 00:00:00 2001 From: Henry Lemersal Date: Mon, 21 Oct 2024 17:38:29 -0700 Subject: [PATCH 15/21] commit changes --- .../persona/prompt_template/run_gpt_prompt.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index 02f983cbf2..0b8437ecb6 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -2721,6 +2721,8 @@ def get_fail_safe(): return output, [output, prompt, gpt_param, prompt_input, fail_safe] +class Convo_Takeaways(BaseModel): + takeaway: str def run_gpt_prompt_memo_on_convo(persona, all_utt, test_input=None, verbose=False): def create_prompt_input(persona, all_utt, test_input=None): @@ -2765,8 +2767,17 @@ def __chat_func_validate(gpt_response, prompt=""): ############ example_output = 'Jane Doe was interesting to talk to.' ######## special_instruction = 'The output should ONLY contain a string that summarizes anything interesting that the agent may have noticed' ######## fail_safe = get_fail_safe() ######## - output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe, - __chat_func_validate, __chat_func_clean_up, True) + + #output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe, __chat_func_validate, __chat_func_clean_up, True) + output = generate_structured_response( + prompt, + gpt_param, + Convo_Takeaways, + 5, + fail_safe, + __func_validate, + __func_clean_up + ) if output != False: return output, [output, prompt, gpt_param, prompt_input, fail_safe] # ChatGPT Plugin =========================================================== From 54b9703ebb57173de26e00f63a6d4c5dc41f5023 Mon Sep 17 00:00:00 2001 From: Henry Lemersal Date: Wed, 23 Oct 2024 01:26:45 -0700 Subject: [PATCH 16/21] add structured output to my share of functions --- .../temp_storage/curr_sim_code.json | 6 +- .../persona/prompt_template/gpt_structure.py | 83 ++++++++++++++ .../persona/prompt_template/run_gpt_prompt.py | 107 ++++++++++++------ 3 files changed, 155 insertions(+), 41 deletions(-) diff --git a/environment/frontend_server/temp_storage/curr_sim_code.json b/environment/frontend_server/temp_storage/curr_sim_code.json index 16f265bc5a..9ed7bf3470 100644 --- a/environment/frontend_server/temp_storage/curr_sim_code.json +++ b/environment/frontend_server/temp_storage/curr_sim_code.json @@ -1,7 +1,3 @@ { -<<<<<<< HEAD - "sim_code": "ssar-47-s-0-0-200" -======= - "sim_code": "33" ->>>>>>> origin/dev + "sim_code": "so-9-s-2-399-599" } \ No newline at end of file diff --git a/reverie/backend_server/persona/prompt_template/gpt_structure.py b/reverie/backend_server/persona/prompt_template/gpt_structure.py index 7d4364e076..298dd593dc 100644 --- a/reverie/backend_server/persona/prompt_template/gpt_structure.py +++ b/reverie/backend_server/persona/prompt_template/gpt_structure.py @@ -188,7 +188,40 @@ def ChatGPT_request(prompt): traceback.print_exc() return "LLM ERROR" +def ChatGPT_structured_request(prompt, response_format): + """ + Given a prompt and a dictionary of GPT parameters, make a request to OpenAI + server and returns the response. + ARGS: + prompt: a str prompt + gpt_parameter: a python dictionary with the keys indicating the names of + the parameter and the values indicating the parameter + values. + RETURNS: + a str of GPT-3's response. + """ + # temp_sleep() + print("--- ChatGPT_request() ---") + print("Prompt:", prompt) + try: + completion = client.chat.completions.create( + model=openai_config["model"], + response_format=response_format, + messages=[{"role": "user", "content": prompt}] + ) + content = completion.choices[0].message.content + print("Response content:", content) + cost_logger.update_cost( + completion, input_cost=openai_config["model-costs"]["input"], output_cost=openai_config["model-costs"]["output"] + ) + return content + + except Exception as e: + print(f"Error: {e}") + traceback.print_exc() + return "LLM ERROR" + # def GPT4_safe_generate_response( # prompt, # example_output, @@ -284,6 +317,56 @@ def ChatGPT_safe_generate_response( return fail_safe_response +def ChatGPT_generate_structured_response( + prompt, + response_format, + example_output, + special_instruction, + repeat=3, + fail_safe_response="error", + func_validate=None, + func_clean_up=None, + verbose=False, +): + if func_validate and func_clean_up: + # prompt = 'GPT-3 Prompt:\n"""\n' + prompt + '\n"""\n' + prompt = '"""\n' + prompt + '\n"""\n' + prompt += ( + f"Output the response to the prompt above in json. {special_instruction}\n" + ) + prompt += "Example output json:\n" + prompt += '{"output": "' + str(example_output) + '"}' + + if verbose: + print("LLM PROMPT") + print(prompt) + + for i in range(repeat): + try: + chatgpt_response = ChatGPT_structured_request(prompt, response_format) + if not chatgpt_response: + raise Exception("No valid response from LLM.") + curr_gpt_response = chatgpt_response.strip() + end_index = curr_gpt_response.rfind("}") + 1 + curr_gpt_response = curr_gpt_response[:end_index] + print(curr_gpt_response) + curr_gpt_response = json.loads(curr_gpt_response)["output"] + + if verbose: + print("---- repeat count:", i) + print("~~~~ curr_gpt_response:") + print(curr_gpt_response) + print("~~~~") + + if func_validate(curr_gpt_response, prompt=prompt) and not isinstance(curr_gpt_response, str): + return func_clean_up(curr_gpt_response, prompt=prompt) + + except Exception as e: + print("ERROR:", e) + traceback.print_exc() + + return fail_safe_response + def ChatGPT_safe_generate_response_OLD(prompt, repeat=3, diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index 50302cb68c..2ba55d5741 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -26,6 +26,7 @@ generate_prompt, safe_generate_response, generate_structured_response, + ChatGPT_generate_structured_response, ) from persona.prompt_template.print_prompt import print_run_prompts @@ -1271,6 +1272,8 @@ def get_fail_safe(main_act_dur, truncated_act_dur): return output, [output, prompt, gpt_param, prompt_input, fail_safe] +class Decide_to_Talk(BaseModel): + decision: str def run_gpt_prompt_decide_to_talk(persona, target_persona, retrieved,test_input=None, verbose=False): @@ -1349,7 +1352,7 @@ def __func_clean_up(gpt_response, prompt=""): def get_fail_safe(): fs = "yes" return fs - + gpt_param = {"engine": openai_config["model"], "max_tokens": 20, @@ -1361,9 +1364,17 @@ def get_fail_safe(): prompt = generate_prompt(prompt_input, prompt_template) fail_safe = get_fail_safe() - output = safe_generate_response(prompt, gpt_param, 5, fail_safe, - __func_validate, __func_clean_up) - + #output = safe_generate_response(prompt, gpt_param, 5, fail_safe, + # __func_validate, __func_clean_up) + output = generate_structured_response( + prompt, + gpt_param, + Conversation_Topic, + 5, + fail_safe, + __func_validate, + __func_clean_up + ) if debug or verbose: print_run_prompts(prompt_template, persona, gpt_param, prompt_input, prompt, output) @@ -1463,7 +1474,8 @@ def get_fail_safe(): return output, [output, prompt, gpt_param, prompt_input, fail_safe] - +class Conversation_Topic(BaseModel): + convo_topic: str def run_gpt_prompt_create_conversation(persona, target_persona, curr_loc, test_input=None, verbose=False): def create_prompt_input(init_persona, target_persona, curr_loc, @@ -1580,9 +1592,17 @@ def get_fail_safe(init_persona, target_persona): prompt = generate_prompt(prompt_input, prompt_template) fail_safe = get_fail_safe(persona, target_persona) - output = safe_generate_response(prompt, gpt_param, 5, fail_safe, - __func_validate, __func_clean_up) - + #output = safe_generate_response(prompt, gpt_param, 5, fail_safe, + # __func_validate, __func_clean_up) + output = generate_structured_response( + prompt, + gpt_param, + Conversation_Topic, + 5, + fail_safe, + __func_validate, + __func_clean_up + ) if debug or verbose: print_run_prompts(prompt_template, persona, gpt_param, prompt_input, prompt, output) @@ -1639,6 +1659,8 @@ def __chat_func_validate(gpt_response, prompt=""): ############ fail_safe = get_fail_safe() ######## output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe, __chat_func_validate, __chat_func_clean_up, True) + + if output != False: return output, [output, prompt, gpt_param, prompt_input, fail_safe] # ChatGPT Plugin =========================================================== @@ -1661,6 +1683,10 @@ def __chat_func_validate(gpt_response, prompt=""): ############ # return output, [output, prompt, gpt_param, prompt_input, fail_safe] +class Keywords(BaseModel): + emotive_keywords: list[str] + factual_keywords: list[str] + all_keywords: list[list] def run_gpt_prompt_extract_keywords(persona, description, test_input=None, verbose=False): def create_prompt_input(description, test_input=None): @@ -1705,9 +1731,17 @@ def get_fail_safe(): prompt = generate_prompt(prompt_input, prompt_template) fail_safe = get_fail_safe() - output = safe_generate_response(prompt, gpt_param, 5, fail_safe, - __func_validate, __func_clean_up) + #output = safe_generate_response(prompt, gpt_param, 5, fail_safe, __func_validate, __func_clean_up) + output = generate_structured_response( + prompt, + gpt_param, + Keywords, + 5, + fail_safe, + __func_validate, + __func_clean_up + ) if debug or verbose: print_run_prompts(prompt_template, persona, gpt_param, prompt_input, prompt, output) @@ -2130,6 +2164,8 @@ def get_fail_safe(n): return output, [output, prompt, gpt_param, prompt_input, fail_safe] +class Idea_Summary(BaseModel): + idea_summary: str def run_gpt_prompt_agent_chat_summarize_ideas(persona, target_persona, statements, curr_context, test_input=None, verbose=False): def create_prompt_input(persona, target_persona, statements, curr_context, test_input=None): @@ -2173,8 +2209,8 @@ def __chat_func_validate(gpt_response, prompt=""): ############ example_output = 'Jane Doe is working on a project' ######## special_instruction = 'The output should be a string that responds to the question.' ######## fail_safe = get_fail_safe() ######## - output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe, - __chat_func_validate, __chat_func_clean_up, True) + output = ChatGPT_generate_structured_response(prompt, Idea_Summary, example_output, special_instruction, 3, fail_safe, __chat_func_validate, __chat_func_clean_up, True) + if output != False: return output, [output, prompt, gpt_param, prompt_input, fail_safe] # ChatGPT Plugin =========================================================== @@ -2449,7 +2485,8 @@ def __chat_func_validate(gpt_response, prompt=""): ############ # return output, [output, prompt, gpt_param, prompt_input, fail_safe] - +class Next_Conversation_Line(BaseModel): + next_conversation_line: str def run_gpt_prompt_generate_next_convo_line(persona, interlocutor_desc, prev_convo, retrieved_summary, test_input=None, verbose=False): def create_prompt_input(persona, interlocutor_desc, prev_convo, retrieved_summary, test_input=None): prompt_input = [persona.scratch.name, @@ -2511,9 +2548,17 @@ def get_fail_safe(): prompt = generate_prompt(prompt_input, prompt_template) fail_safe = get_fail_safe() - output = safe_generate_response(prompt, gpt_param, 5, fail_safe, - __func_validate, __func_clean_up) - + #output = safe_generate_response(prompt, gpt_param, 5, fail_safe, + # __func_validate, __func_clean_up) + output = generate_structured_response( + prompt, + gpt_param, + Next_Conversation_Line, + 5, + fail_safe, + __func_validate, + __func_clean_up + ) if debug or verbose: print_run_prompts(prompt_template, persona, gpt_param, prompt_input, prompt, output) @@ -2557,6 +2602,8 @@ def get_fail_safe(): return output, [output, prompt, gpt_param, prompt_input, fail_safe] +class Planning_Thought(BaseModel): + planning_thought: str def run_gpt_prompt_planning_thought_on_convo(persona, all_utt, test_input=None, verbose=False): def create_prompt_input(persona, all_utt, test_input=None): @@ -2585,8 +2632,9 @@ def get_fail_safe(): prompt = generate_prompt(prompt_input, prompt_template) fail_safe = get_fail_safe() - output = safe_generate_response(prompt, gpt_param, 5, fail_safe, - __func_validate, __func_clean_up) + #output = safe_generate_response(prompt, gpt_param, 5, fail_safe, + # __func_validate, __func_clean_up) + output = generate_structured_response(prompt, gpt_param, Planning_Thought, 5, fail_safe, __func_validate,__func_clean_up) if debug or verbose: print_run_prompts(prompt_template, persona, gpt_param, @@ -2595,12 +2643,8 @@ def get_fail_safe(): return output, [output, prompt, gpt_param, prompt_input, fail_safe] -<<<<<<< HEAD class Convo_Takeaways(BaseModel): takeaway: str - -======= ->>>>>>> origin/dev def run_gpt_prompt_memo_on_convo(persona, all_utt, test_input=None, verbose=False): def create_prompt_input(persona, all_utt, test_input=None): prompt_input = [all_utt, persona.scratch.name, persona.scratch.name, persona.scratch.name] @@ -2643,16 +2687,8 @@ def __chat_func_validate(gpt_response, prompt=""): ############ special_instruction = 'The output should ONLY contain a string that summarizes anything interesting that the agent may have noticed' ######## fail_safe = get_fail_safe() ######## - #output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe, __chat_func_validate, __chat_func_clean_up, True) - output = generate_structured_response( - prompt, - gpt_param, - Convo_Takeaways, - 5, - fail_safe, - __func_validate, - __func_clean_up - ) + output = ChatGPT_generate_structured_response(prompt, Convo_Takeaways,example_output, special_instruction, 3, fail_safe, __chat_func_validate, __chat_func_clean_up, True) + if output != False: return output, [output, prompt, gpt_param, prompt_input, fail_safe] # ChatGPT Plugin =========================================================== @@ -2665,15 +2701,14 @@ def __chat_func_validate(gpt_response, prompt=""): ############ prompt = generate_prompt(prompt_input, prompt_template) fail_safe = get_fail_safe() - output = safe_generate_response(prompt, gpt_param, 5, fail_safe, - __func_validate, __func_clean_up) - + #output = safe_generate_response(prompt, gpt_param, 5, fail_safe, __func_validate, __func_clean_up) + output = generate_structured_response(prompt, gpt_param, Convo_Takeaways, 5, fail_safe, __func_validate,__func_clean_up) if debug or verbose: print_run_prompts(prompt_template, persona, gpt_param, prompt_input, prompt, output) return output, [output, prompt, gpt_param, prompt_input, fail_safe] - + def run_gpt_generate_safety_score(persona, comment, test_input=None, verbose=False): def create_prompt_input(comment, test_input=None): From 1546c80ca9564bb45de03c4ea5f90d6af2670591 Mon Sep 17 00:00:00 2001 From: Henry Lemersal Date: Wed, 23 Oct 2024 02:33:38 -0700 Subject: [PATCH 17/21] fix parsing error --- environment/frontend_server/temp_storage/curr_sim_code.json | 2 +- reverie/backend_server/persona/prompt_template/gpt_structure.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment/frontend_server/temp_storage/curr_sim_code.json b/environment/frontend_server/temp_storage/curr_sim_code.json index 9ed7bf3470..f7ad328190 100644 --- a/environment/frontend_server/temp_storage/curr_sim_code.json +++ b/environment/frontend_server/temp_storage/curr_sim_code.json @@ -1,3 +1,3 @@ { - "sim_code": "so-9-s-2-399-599" + "sim_code": "so-11-s-23-4599-4799" } \ No newline at end of file diff --git a/reverie/backend_server/persona/prompt_template/gpt_structure.py b/reverie/backend_server/persona/prompt_template/gpt_structure.py index 298dd593dc..661be8b71c 100644 --- a/reverie/backend_server/persona/prompt_template/gpt_structure.py +++ b/reverie/backend_server/persona/prompt_template/gpt_structure.py @@ -205,7 +205,7 @@ def ChatGPT_structured_request(prompt, response_format): print("Prompt:", prompt) try: - completion = client.chat.completions.create( + completion = client.beta.chat.completions.parse( model=openai_config["model"], response_format=response_format, messages=[{"role": "user", "content": prompt}] From 71c2b99c17815186af03cdd72a485fe72993c7dd Mon Sep 17 00:00:00 2001 From: Henry Lemersal Date: Sun, 27 Oct 2024 14:49:13 -0500 Subject: [PATCH 18/21] fix structured output functions --- .../temp_storage/curr_sim_code.json | 2 +- .../persona/cognitive_modules/converse.py | 2 +- .../persona/prompt_template/gpt_structure.py | 5 ++- .../persona/prompt_template/run_gpt_prompt.py | 38 ++++++++++--------- 4 files changed, 26 insertions(+), 21 deletions(-) diff --git a/environment/frontend_server/temp_storage/curr_sim_code.json b/environment/frontend_server/temp_storage/curr_sim_code.json index f7ad328190..afaa39cca2 100644 --- a/environment/frontend_server/temp_storage/curr_sim_code.json +++ b/environment/frontend_server/temp_storage/curr_sim_code.json @@ -1,3 +1,3 @@ { - "sim_code": "so-11-s-23-4599-4799" + "sim_code": "so-38-s-55-10397-10597" } \ No newline at end of file diff --git a/reverie/backend_server/persona/cognitive_modules/converse.py b/reverie/backend_server/persona/cognitive_modules/converse.py index a491bf7133..4162031a44 100644 --- a/reverie/backend_server/persona/cognitive_modules/converse.py +++ b/reverie/backend_server/persona/cognitive_modules/converse.py @@ -176,7 +176,7 @@ def agent_chat_v2(maze, init_persona, target_persona): focal_points = [f"{target_persona.scratch.name}"] retrieved = new_retrieve(init_persona, focal_points, 50) relationship = generate_summarize_agent_relationship(init_persona, target_persona, retrieved) - print ("-------- relationshopadsjfhkalsdjf", relationship) + print ("-------- relationship", relationship) last_chat = "" for i in curr_chat[-4:]: last_chat += ": ".join(i) + "\n" diff --git a/reverie/backend_server/persona/prompt_template/gpt_structure.py b/reverie/backend_server/persona/prompt_template/gpt_structure.py index 661be8b71c..13a4ba8bdc 100644 --- a/reverie/backend_server/persona/prompt_template/gpt_structure.py +++ b/reverie/backend_server/persona/prompt_template/gpt_structure.py @@ -575,8 +575,9 @@ def generate_structured_response( prompt=prompt ): return func_clean_up(curr_gpt_response, prompt=prompt) - print("Response validation failed.") - except: + print("Response validation failed.", func_validate(curr_gpt_response, + prompt=prompt), curr_gpt_response.decision) + except Exception as e: print("Could not process response.") if verbose: print("---- repeat count: ", i, curr_gpt_response) diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index 2ba55d5741..7bcab3b3b5 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -1337,25 +1337,24 @@ def create_prompt_input(init_persona, target_persona, retrieved, prompt_input += [target_persona.name] return prompt_input + def __func_clean_up(gpt_response, prompt=""): + return gpt_response.decision + def __func_validate(gpt_response, prompt=""): try: - if gpt_response.split("Answer in yes or no:")[-1].strip().lower() in ["yes", "no"]: + if __func_clean_up(gpt_response, prompt) in ["yes", "no"]: return True + print(__func_clean_up(gpt_response, prompt)) return False except: traceback.print_exc() return False - - def __func_clean_up(gpt_response, prompt=""): - return gpt_response.split("Answer in yes or no:")[-1].strip().lower() - + def get_fail_safe(): fs = "yes" - return fs - + return fs - - gpt_param = {"engine": openai_config["model"], "max_tokens": 20, + gpt_param = {"engine": openai_config["model"], "max_tokens": 2000, "temperature": 0, "top_p": 1, "stream": False, "frequency_penalty": 0, "presence_penalty": 0, "stop": None} prompt_template = "persona/prompt_template/v2/decide_to_talk_v2.txt" @@ -1369,12 +1368,14 @@ def get_fail_safe(): output = generate_structured_response( prompt, gpt_param, - Conversation_Topic, + Decide_to_Talk, 5, fail_safe, __func_validate, __func_clean_up ) + print("decide_to_talk") + if debug or verbose: print_run_prompts(prompt_template, persona, gpt_param, prompt_input, prompt, output) @@ -2174,7 +2175,7 @@ def create_prompt_input(persona, target_persona, statements, curr_context, test_ return prompt_input def __func_clean_up(gpt_response, prompt=""): - return gpt_response.split('"')[0].strip() + return gpt_response.idea_summary def __func_validate(gpt_response, prompt=""): try: @@ -2210,7 +2211,7 @@ def __chat_func_validate(gpt_response, prompt=""): ############ special_instruction = 'The output should be a string that responds to the question.' ######## fail_safe = get_fail_safe() ######## output = ChatGPT_generate_structured_response(prompt, Idea_Summary, example_output, special_instruction, 3, fail_safe, __chat_func_validate, __chat_func_clean_up, True) - + print("agent_chat_summarize_ideas") if output != False: return output, [output, prompt, gpt_param, prompt_input, fail_safe] # ChatGPT Plugin =========================================================== @@ -2500,7 +2501,7 @@ def create_prompt_input(persona, interlocutor_desc, prev_convo, retrieved_summar return prompt_input def __func_clean_up(gpt_response, prompt=""): - return gpt_response.split('"')[0].strip() + return gpt_response.next_conversation_line def __func_validate(gpt_response, prompt=""): try: @@ -2559,6 +2560,7 @@ def get_fail_safe(): __func_validate, __func_clean_up ) + print("generate_next_convo_line") if debug or verbose: print_run_prompts(prompt_template, persona, gpt_param, prompt_input, prompt, output) @@ -2611,7 +2613,7 @@ def create_prompt_input(persona, all_utt, test_input=None): return prompt_input def __func_clean_up(gpt_response, prompt=""): - return gpt_response.split('"')[0].strip() + return gpt_response.planning_thought def __func_validate(gpt_response, prompt=""): try: @@ -2635,6 +2637,7 @@ def get_fail_safe(): #output = safe_generate_response(prompt, gpt_param, 5, fail_safe, # __func_validate, __func_clean_up) output = generate_structured_response(prompt, gpt_param, Planning_Thought, 5, fail_safe, __func_validate,__func_clean_up) + print("planning_thought_on_convo") if debug or verbose: print_run_prompts(prompt_template, persona, gpt_param, @@ -2651,7 +2654,7 @@ def create_prompt_input(persona, all_utt, test_input=None): return prompt_input def __func_clean_up(gpt_response, prompt=""): - return gpt_response.split('"')[0].strip() + return gpt_response.takeaway def __func_validate(gpt_response, prompt=""): try: @@ -2666,7 +2669,7 @@ def get_fail_safe(): # ChatGPT Plugin =========================================================== def __chat_func_clean_up(gpt_response, prompt=""): ############ - return gpt_response.strip() + return gpt_response.takeaway def __chat_func_validate(gpt_response, prompt=""): ############ try: @@ -2688,7 +2691,7 @@ def __chat_func_validate(gpt_response, prompt=""): ############ fail_safe = get_fail_safe() ######## output = ChatGPT_generate_structured_response(prompt, Convo_Takeaways,example_output, special_instruction, 3, fail_safe, __chat_func_validate, __chat_func_clean_up, True) - + print("memo_on_convo") if output != False: return output, [output, prompt, gpt_param, prompt_input, fail_safe] # ChatGPT Plugin =========================================================== @@ -2703,6 +2706,7 @@ def __chat_func_validate(gpt_response, prompt=""): ############ fail_safe = get_fail_safe() #output = safe_generate_response(prompt, gpt_param, 5, fail_safe, __func_validate, __func_clean_up) output = generate_structured_response(prompt, gpt_param, Convo_Takeaways, 5, fail_safe, __func_validate,__func_clean_up) + print("memo_on_convo") if debug or verbose: print_run_prompts(prompt_template, persona, gpt_param, prompt_input, prompt, output) From 72671906e64f4892f6dcd3e7f0719705676864b1 Mon Sep 17 00:00:00 2001 From: denialj12 Date: Sun, 27 Oct 2024 19:37:52 -0400 Subject: [PATCH 19/21] Structured Output Revisions --- .../persona/prompt_template/run_gpt_prompt.py | 172 +++++++++++------- 1 file changed, 106 insertions(+), 66 deletions(-) diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index 61adb20c82..5f4afbe42c 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -26,8 +26,7 @@ ChatGPT_safe_generate_response_OLD, generate_prompt, safe_generate_response, - generate_structured_response, - ChatGPT_generate_structured_response, + generate_structured_response ) from persona.prompt_template.print_prompt import print_run_prompts @@ -759,8 +758,9 @@ def get_fail_safe(): return output, [output, prompt, gpt_param, prompt_input, fail_safe] - - +class GameObject(BaseModel): + object:str + def run_gpt_prompt_action_game_object(action_description, persona, maze, @@ -776,8 +776,7 @@ def create_prompt_input(action_description, action_description = action_description.split("(")[-1][:-1] prompt_input += [action_description] - prompt_input += [persona - .s_mem.get_str_accessible_arena_game_objects(temp_address)] + prompt_input += [persona.s_mem.get_str_accessible_arena_game_objects(temp_address)] return prompt_input def __func_validate(gpt_response, prompt=""): @@ -785,8 +784,8 @@ def __func_validate(gpt_response, prompt=""): return False return True - def __func_clean_up(gpt_response, prompt=""): - return ''.join(gpt_response.split("---")[0]).strip() + def __func_clean_up(gpt_response:GameObject, prompt=""): + return ''.join(gpt_response.object.split("---")[0]).strip() def get_fail_safe(): fs = ("bed") @@ -816,6 +815,9 @@ def get_fail_safe(): return output, [output, prompt, gpt_param, prompt_input, fail_safe] +#TODO +class Pronunciatio(BaseModel): + emoji: str def run_gpt_prompt_pronunciatio(action_description, persona, verbose=False): def create_prompt_input(action_description): @@ -823,20 +825,6 @@ def create_prompt_input(action_description): action_description = action_description.split("(")[-1].split(")")[0] prompt_input = [action_description] return prompt_input - - # def __func_clean_up(gpt_response, prompt=""): - # cr = gpt_response.strip() - # if len(cr) > 3: - # cr = cr[:3] - # return cr - - # def __func_validate(gpt_response, prompt=""): - # try: - # __func_clean_up(gpt_response, prompt="") - # if len(gpt_response) == 0: - # return False - # except: return False - # return True def get_fail_safe(): fs = "😋" @@ -844,17 +832,17 @@ def get_fail_safe(): # ChatGPT Plugin =========================================================== - def __chat_func_clean_up(gpt_response, prompt=""): ############ + def __func_clean_up(gpt_response:Pronunciatio, prompt=""): ############ pattern = r'[\U0001F600-\U0001F64F\U0001F300-\U0001F5FF\U0001F680-\U0001F6FF\U0001F700-\U0001F77F\U0001F780-\U0001F7FF\U0001F800-\U0001F8FF\U0001F900-\U0001F9FF\U0001FA00-\U0001FA6F\U0001FA70-\U0001FAFF]' - result = re.search(pattern, gpt_response) + result = re.search(pattern, gpt_response.emoji) if result: return result.group() raise ValueError("No emoji found in the response.") - def __chat_func_validate(gpt_response, prompt=""): ############ + def __func_validate(gpt_response:Pronunciatio, prompt=""): ############ try: - __chat_func_clean_up(gpt_response, prompt="") - if len(gpt_response) == 0: + __func_clean_up(gpt_response, prompt="") + if len(gpt_response.emoji) == 0: return False except: traceback.print_exc() @@ -871,8 +859,16 @@ def __chat_func_validate(gpt_response, prompt=""): ############ example_output = "🛁🧖‍♀️" ######## special_instruction = "The value for the output must ONLY contain the emojis." ######## fail_safe = get_fail_safe() - output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe, - __chat_func_validate, __chat_func_clean_up, True) + output = ChatGPT_safe_generate_response( + prompt, + gpt_param, + Pronunciatio, + 3, + fail_safe, + __func_validate, + __func_clean_up, + True + ) if verbose: print_run_prompts( @@ -1611,7 +1607,10 @@ def get_fail_safe(init_persona, target_persona): return output, [output, prompt, gpt_param, prompt_input, fail_safe] - +#TODO +class Conversation(BaseModel): + log:str + def run_gpt_prompt_summarize_conversation(persona, conversation, test_input=None, verbose=False): def create_prompt_input(conversation, test_input=None): convo_str = "" @@ -1621,8 +1620,8 @@ def create_prompt_input(conversation, test_input=None): prompt_input = [convo_str] return prompt_input - def __func_clean_up(gpt_response, prompt=""): - ret = "conversing about " + gpt_response.strip() + def __func_clean_up(gpt_response:Conversation, prompt=""): + ret = "conversing about " + gpt_response.log.strip() return ret def __func_validate(gpt_response, prompt=""): @@ -1636,18 +1635,6 @@ def __func_validate(gpt_response, prompt=""): def get_fail_safe(): return "conversing with a housemate about morning greetings" - # ChatGPT Plugin =========================================================== - def __chat_func_clean_up(gpt_response, prompt=""): ############ - ret = "conversing about " + gpt_response.strip() - return ret - - def __chat_func_validate(gpt_response, prompt=""): ############ - try: - __func_clean_up(gpt_response, prompt) - return True - except: - traceback.print_exc() - return False print ("DEBUG 11") ######## gpt_param = {"engine": openai_config["model"], "max_tokens": 15, @@ -1659,8 +1646,16 @@ def __chat_func_validate(gpt_response, prompt=""): ############ example_output = "conversing about what to eat for lunch" ######## special_instruction = "The output must continue the sentence above by filling in the tag. Don't start with 'this is a conversation about...' Just finish the sentence but do not miss any important details (including who are chatting)." ######## fail_safe = get_fail_safe() ######## - output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe, - __chat_func_validate, __chat_func_clean_up, True) + output = ChatGPT_safe_generate_response( + prompt, + gpt_param, + Conversation, + 3, + fail_safe, + __func_validate, + __func_clean_up, + True + ) if output != False: @@ -1841,6 +1836,9 @@ def get_fail_safe(): return output, [output, prompt, gpt_param, prompt_input, fail_safe] +class Poignancy(BaseModel): + number:int + def run_gpt_prompt_event_poignancy(persona, event_description, test_input=None, verbose=False): def create_prompt_input(persona, event_description, test_input=None): prompt_input = [persona.scratch.name, @@ -1849,8 +1847,8 @@ def create_prompt_input(persona, event_description, test_input=None): event_description] return prompt_input - def __func_clean_up(gpt_response, prompt=""): - gpt_response = int(gpt_response.strip()) + def __func_clean_up(gpt_response:Poignancy, prompt=""): + gpt_response = gpt_response.number return gpt_response def __func_validate(gpt_response, prompt=""): @@ -1865,14 +1863,14 @@ def get_fail_safe(): return 4 # ChatGPT Plugin =========================================================== - def __chat_func_clean_up(gpt_response, prompt=""): ############ - gpt_response = int(gpt_response) + def __chat_func_clean_up(gpt_response:Poignancy, prompt=""): ############ + gpt_response = gpt_response.number return gpt_response def __chat_func_validate(gpt_response, prompt=""): ############ try: - __func_clean_up(gpt_response, prompt) - return True + poignancy = __func_clean_up(gpt_response, prompt) + return poignancy is not None except: traceback.print_exc() return False @@ -1887,8 +1885,16 @@ def __chat_func_validate(gpt_response, prompt=""): ############ example_output = "5" ######## special_instruction = "The output should ONLY contain ONE integer value on the scale of 1 to 10." ######## fail_safe = get_fail_safe() ######## - output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe, - __chat_func_validate, __chat_func_clean_up, True) + output = generate_structured_response( + prompt, + gpt_param, + Poignancy, + 3, + fail_safe, + __func_validate, + __func_clean_up, + True + ) if output != False: return output, [output, prompt, gpt_param, prompt_input, fail_safe] # ChatGPT Plugin =========================================================== @@ -2211,7 +2217,17 @@ def __chat_func_validate(gpt_response, prompt=""): ############ example_output = 'Jane Doe is working on a project' ######## special_instruction = 'The output should be a string that responds to the question.' ######## fail_safe = get_fail_safe() ######## - output = ChatGPT_generate_structured_response(prompt, Idea_Summary, example_output, special_instruction, 3, fail_safe, __chat_func_validate, __chat_func_clean_up, True) + output = generate_structured_response( + prompt, + Idea_Summary, + example_output, + special_instruction, + 3, + fail_safe, + __func_validate, + __func_clean_up, + True) + print("agent_chat_summarize_ideas") if output != False: return output, [output, prompt, gpt_param, prompt_input, fail_safe] @@ -2422,22 +2438,27 @@ def __chat_func_validate(gpt_response, prompt=""): ############ # ======================= # ======================= - +#TODO +class Idea_Summary(BaseModel): + idea_summary: str + def run_gpt_prompt_summarize_ideas(persona, statements, question, test_input=None, verbose=False): def create_prompt_input(persona, statements, question, test_input=None): prompt_input = [statements, persona.scratch.name, question] return prompt_input - def __func_clean_up(gpt_response, prompt=""): - return gpt_response.split('"')[0].strip() + def __func_clean_up(gpt_response:Idea_Summary, prompt=""): + return gpt_response.idea_summary.strip() def __func_validate(gpt_response, prompt=""): try: - __func_clean_up(gpt_response, prompt) - return True + gpt_response = __func_clean_up(gpt_response, prompt) + if gpt_response is None: + return False except: traceback.print_exc() return False + return True def get_fail_safe(): return "..." @@ -2464,8 +2485,16 @@ def __chat_func_validate(gpt_response, prompt=""): ############ example_output = 'Jane Doe is working on a project' ######## special_instruction = 'The output should be a string that responds to the question.' ######## fail_safe = get_fail_safe() ######## - output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe, - __chat_func_validate, __chat_func_clean_up, True) + output = ChatGPT_safe_generate_response( + prompt, + gpt_param, + Idea_Summary, + 3, + fail_safe, + __func_validate, + __func_clean_up, + True + ) if output != False: return output, [output, prompt, gpt_param, prompt_input, fail_safe] # ChatGPT Plugin =========================================================== @@ -2568,14 +2597,17 @@ def get_fail_safe(): return output, [output, prompt, gpt_param, prompt_input, fail_safe] +#TODO +class Inner_Thought(BaseModel): + thought: str def run_gpt_prompt_generate_whisper_inner_thought(persona, whisper, test_input=None, verbose=False): def create_prompt_input(persona, whisper, test_input=None): prompt_input = [persona.scratch.name, whisper] return prompt_input - def __func_clean_up(gpt_response, prompt=""): - return gpt_response.split('"')[0].strip() + def __func_clean_up(gpt_response:Inner_Thought, prompt=""): + return gpt_response.thought.split('"')[0].strip() def __func_validate(gpt_response, prompt=""): try: @@ -2596,8 +2628,16 @@ def get_fail_safe(): prompt = generate_prompt(prompt_input, prompt_template) fail_safe = get_fail_safe() - output = safe_generate_response(prompt, gpt_param, 5, fail_safe, - __func_validate, __func_clean_up) + output = safe_generate_response( + prompt, + gpt_param, + Inner_Thought, + 5, + fail_safe, + __func_validate, + __func_clean_up, + True + ) if debug or verbose: print_run_prompts(prompt_template, persona, gpt_param, From ce9039b534787d99fb6ad28ae949b346b3c1aa46 Mon Sep 17 00:00:00 2001 From: chowington Date: Tue, 29 Oct 2024 13:31:48 -0400 Subject: [PATCH 20/21] Clean PR --- .gitignore | 9 +- README.md | 85 +----------------- environment/frontend_server/manage.py | 1 + environment/frontend_server/requirements.txt | 68 +++++++++++++++ .../environment/0.json | 17 ++++ .../associative_memory/embeddings.json | 1 + .../associative_memory/kw_strength.json | 2 + .../associative_memory/nodes.json | 1 + .../bootstrap_memory/scratch.json | 51 +++++++++++ .../bootstrap_memory/spatial_memory.json | 66 ++++++++++++++ .../associative_memory/embeddings.json | 1 + .../associative_memory/kw_strength.json | 2 + .../associative_memory/nodes.json | 1 + .../bootstrap_memory/scratch.json | 51 +++++++++++ .../bootstrap_memory/spatial_memory.json | 86 ++++++++++++++++++ .../associative_memory/embeddings.json | 1 + .../associative_memory/kw_strength.json | 2 + .../associative_memory/nodes.json | 1 + .../Maria Lopez/bootstrap_memory/scratch.json | 51 +++++++++++ .../bootstrap_memory/spatial_memory.json | 87 +++++++++++++++++++ .../reverie/meta.json | 13 +++ output.txt | 7 -- print_conversations.py | 47 +--------- .../persona/prompt_template/run_gpt_prompt.py | 9 +- .../prompt_template/structured_output.py | 22 ----- reverie/backend_server/survey.ipynb | 2 +- reverie/backend_server/test.py | 4 +- reverie/backend_server/utils.py | 20 ----- run_backend.sh | 2 +- run_backend_automatic.sh | 2 +- test.py | 48 ---------- 31 files changed, 517 insertions(+), 243 deletions(-) create mode 100644 environment/frontend_server/requirements.txt create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/environment/0.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/embeddings.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/kw_strength.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/nodes.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/scratch.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/spatial_memory.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/embeddings.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/kw_strength.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/nodes.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/scratch.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/spatial_memory.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/embeddings.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/kw_strength.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/nodes.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/scratch.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/spatial_memory.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/reverie/meta.json delete mode 100644 output.txt delete mode 100644 reverie/backend_server/persona/prompt_template/structured_output.py delete mode 100644 reverie/backend_server/utils.py delete mode 100644 test.py diff --git a/.gitignore b/.gitignore index e3f9ee44f8..aeb19ab758 100644 --- a/.gitignore +++ b/.gitignore @@ -14,7 +14,7 @@ logs/* !logs/skip-morning_2024-05-15_13-54-44.txt ### Add simulations to keep here ### -!environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/* +!environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/ !environment/frontend_server/storage/base_the_ville_n25/* !environment/frontend_server/storage/July1_the_ville_isabella_maria_klaus-step-3-*/* !environment/frontend_server/storage/skip-morning-s-14/ @@ -124,9 +124,4 @@ ENV/ # Rope project settings .ropeproject -<<<<<<< HEAD -.history/ -environment/frontend_server/temp_storage/curr_sim_code.json -======= -.history/ ->>>>>>> origin/dev +.history/ \ No newline at end of file diff --git a/README.md b/README.md index c6f7bb2862..1464593686 100644 --- a/README.md +++ b/README.md @@ -52,81 +52,7 @@ Create a file called `openai_config.json` in the root directory. "cost-upperbound": 10 } ``` -<<<<<<< HEAD -OpenAI example: -```json -{ - "client": "openai", - "model": "gpt-4-turbo", - "model-key": "", - "model-costs": { - "input": 0.5, - "output": 1.5 - }, - "embeddings-client": "openai", - "embeddings": "text-embedding-3-small", - "embeddings-key": "", - "embeddings-costs": { - "input": 0.02, - "output": 0.0 - }, - "experiment-name": "simulacra-test", - "cost-upperbound": 10 -} -``` -======= ->>>>>>> origin/chowington-search-and-rescue -<<<<<<< HEAD -Azure example: -```json -{ - "client": "azure", - "model": "gpt-4o-mini", - "model-key": "", - "model-endpoint": "", - "model-api-version": "", - "model-costs": { - "input": 0.5, - "output": 1.5 - }, - "embeddings-client": "azure", - "embeddings": "text-embedding-3-small", - "embeddings-key": "", - "embeddings-endpoint": "", - "embeddings-api-version": "", - "embeddings-costs": { - "input": 0.02, - "output": 0.0 - }, - "experiment-name": "simulacra-test", - "cost-upperbound": 10 -} -``` -OpenAI example: -```json -{ - "client": "openai", - "model": "gpt-4o-mini", - "model-key": "", - "model-costs": { - "input": 0.5, - "output": 1.5 - }, - "embeddings-client": "openai", - "embeddings": "text-embedding-3-small", - "embeddings-key": "", - "embeddings-costs": { - "input": 0.02, - "output": 0.0 - }, - "experiment-name": "simulacra-test", - "cost-upperbound": 10 -} -``` - -======= ->>>>>>> origin/dev Feel free to change and test also other models (and change accordingly the input and output costs). Note that this repo uses OpenAI's Structured Outputs feature, which is currently only available for certain models, like the GPT-4o series. Check the OpenAI docs for more info. \ The generation and the embedding models are configured separately to be able to use different clients.\ Change also the `cost-upperbound` according to your needs (the cost computation is done using "[openai-cost-logger](https://github.com/drudilorenzo/openai-cost-logger)" and the costs are specified per million tokens). @@ -209,7 +135,7 @@ See all the details of your expenses using the notebook "[cost_viz.ipynb](cost_v ### 1. base_the_ville_isabella_maria_klaus -- **Model**: "gpt-4o-mini" +- **Model**: "gpt-3.5-turbo-0125" - **Embeddings**: "text-embedding-3-small" - **N. Agents**: 3 - **Steps**: ~5000 @@ -217,13 +143,8 @@ See all the details of your expenses using the notebook "[cost_viz.ipynb](cost_v ### 2. base_the_ville_n25 -<<<<<<< HEAD -- See the simulation saved: [skip-morning-s-14](https://github.com/drudilorenzo/generative_agents/tree/fix-and-improve/environment/frontend_server/storage/skip-morning-s-14) -- **Model**: "gpt-4o-mini" -======= - See the simulation saved: [skip-morning-s-14](environment/frontend_server/storage/skip-morning-s-14) - **Model**: "gpt-3.5-turbo-0125" ->>>>>>> origin/dev - **Embeddings**: "text-embedding-3-small" - **N. Agents**: 25 - **Steps**: ~3000 (until ~8 a.m.) @@ -231,9 +152,9 @@ See all the details of your expenses using the notebook "[cost_viz.ipynb](cost_v ### 3. base_the_ville_n25 -- **Model**: "gpt-4o-mini" +- **Model**: "gpt-3.5-turbo-0125" - **Embeddings**: "text-embedding-3-small" -- **N. Agents**: 25 +- **N. Agents**: 25 - **Steps**: ~8650 (full day) - **Final Cost**: ~18.5 USD diff --git a/environment/frontend_server/manage.py b/environment/frontend_server/manage.py index dd9d9e68a3..30edeba553 100644 --- a/environment/frontend_server/manage.py +++ b/environment/frontend_server/manage.py @@ -3,6 +3,7 @@ import os import sys + def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'frontend_server.settings') try: diff --git a/environment/frontend_server/requirements.txt b/environment/frontend_server/requirements.txt new file mode 100644 index 0000000000..61b15a9c3d --- /dev/null +++ b/environment/frontend_server/requirements.txt @@ -0,0 +1,68 @@ +aiohttp==3.8.3 +aiosignal==1.3.1 +asgiref==3.5.2 +async-generator==1.10 +async-timeout==4.0.2 +attrs==22.2.0 +boto==2.49.0 +botocore==1.29.43 +certifi==2021.10.8 +charset-normalizer==2.0.12 +click==8.0.3 +cycler==0.11.0 +dj-database-url==0.5.0 +Django==2.2 +django-cors-headers==2.5.3 +django-storages-redux==1.3.3 +exceptiongroup==1.1.0 +frozenlist==1.3.3 +gensim==3.8.0 +gunicorn==20.1.0 +h11==0.14.0 +idna==3.3 +importlib-metadata==4.8.2 +jmespath==1.0.1 +joblib==1.1.0 +kiwisolver==1.3.1 +matplotlib==3.3.4 +multidict==6.0.4 +nltk==3.6.5 +numpy==1.19.5 +openai==0.27.0 +outcome==1.2.0 +packaging==23.0 +pandas==1.1.5 +patsy==0.5.3 +Pillow==8.4.0 +psycopg2-binary==2.9.5 +pycparser==2.21 +pyparsing==3.0.6 +PySocks==1.7.1 +python-dateutil==2.8.2 +pytz==2021.3 +regex==2021.11.10 +requests==2.26.0 +s3transfer==0.6.0 +scikit-learn==0.24.2 +scikit-posthocs==0.7.0 +scipy==1.5.4 +seaborn==0.12.2 +selenium==4.8.2 +six==1.16.0 +sklearn==0.0 +smart-open==5.2.1 +sniffio==1.3.0 +sortedcontainers==2.4.0 +sqlparse==0.4.3 +statsmodels==0.13.5 +threadpoolctl==3.0.0 +tqdm==4.62.3 +trio==0.22.0 +trio-websocket==0.9.2 +trueskill==0.4.5 +typing-extensions==4.0.0 +urllib3==1.26.7 +wsproto==1.2.0 +yarl==1.8.2 +yellowbrick==1.3.post1 +zipp==3.6.0 \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/environment/0.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/environment/0.json new file mode 100644 index 0000000000..0b2fb23dc3 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/environment/0.json @@ -0,0 +1,17 @@ +{ + "Isabella Rodriguez": { + "maze": "the_ville", + "x": 72, + "y": 14 + }, + "Klaus Mueller": { + "maze": "the_ville", + "x": 126, + "y": 46 + }, + "Maria Lopez": { + "maze": "the_ville", + "x": 123, + "y": 57 + } +} diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/embeddings.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/embeddings.json new file mode 100644 index 0000000000..9e26dfeeb6 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/embeddings.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/kw_strength.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/kw_strength.json new file mode 100644 index 0000000000..6dc73c1c85 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/kw_strength.json @@ -0,0 +1,2 @@ +{"kw_strength_event": {}, + "kw_strength_thought": {}} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/nodes.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/nodes.json new file mode 100644 index 0000000000..9e26dfeeb6 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/nodes.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/scratch.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/scratch.json new file mode 100644 index 0000000000..dbed4b705e --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/scratch.json @@ -0,0 +1,51 @@ +{ + "vision_r": 8, + "att_bandwidth": 8, + "retention": 8, + "curr_time": null, + "curr_tile": null, + "daily_plan_req": "Isabella Rodriguez opens Hobbs Cafe at 8am everyday, and works at the counter until 8pm, at which point she closes the cafe.", + "name": "Isabella Rodriguez", + "first_name": "Isabella", + "last_name": "Rodriguez", + "age": 34, + "innate": "friendly, outgoing, hospitable", + "learned": "Isabella Rodriguez is a cafe owner of Hobbs Cafe who loves to make people feel welcome. She is always looking for ways to make the cafe a place where people can come to relax and enjoy themselves.", + "currently": "Isabella Rodriguez is planning on having a Valentine's Day party at Hobbs Cafe with her customers on February 14th, 2023 at 5pm. She is gathering party material, and is telling everyone to join the party at Hobbs Cafe on February 14th, 2023, from 5pm to 7pm.", + "lifestyle": "Isabella Rodriguez goes to bed around 11pm, awakes up around 6am.", + "living_area": "the Ville:Isabella Rodriguez's apartment:main room", + "concept_forget": 100, + "daily_reflection_time": 180, + "daily_reflection_size": 5, + "overlap_reflect_th": 4, + "kw_strg_event_reflect_th": 10, + "kw_strg_thought_reflect_th": 9, + + "recency_w": 1, + "relevance_w": 1, + "importance_w": 1, + "recency_decay": 0.995, + "importance_trigger_max": 150, + "importance_trigger_curr": 150, + "importance_ele_n": 0, + "thought_count": 5, + + "daily_req": [], + "f_daily_schedule": [], + "f_daily_schedule_hourly_org": [], + "act_address": null, + "act_start_time": null, + "act_duration": null, + "act_description": null, + "act_pronunciatio": null, + "act_event": ["Isabella Rodriguez", null, null], + "act_obj_description": null, + "act_obj_pronunciatio": null, + "act_obj_event": [null, null, null], + "chatting_with": null, + "chat": null, + "chatting_with_buffer": {}, + "chatting_end_time": null, + "act_path_set": false, + "planned_path": [] +} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/spatial_memory.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/spatial_memory.json new file mode 100644 index 0000000000..f881579508 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/spatial_memory.json @@ -0,0 +1,66 @@ +{ + "the Ville": { + "Hobbs Cafe": { + "cafe": [ + "refrigerator", + "cafe customer seating", + "cooking area", + "kitchen sink", + "behind the cafe counter", + "piano" + ] + }, + "Isabella Rodriguez's apartment": { + "main room": [ + "bed", + "desk", + "refrigerator", + "closet", + "shelf" + ] + }, + "The Rose and Crown Pub": { + "pub": [ + "shelf", + "refrigerator", + "bar customer seating", + "behind the bar counter", + "kitchen sink", + "cooking area", + "microphone" + ] + }, + "Harvey Oak Supply Store": { + "supply store": [ + "supply store product shelf", + "behind the supply store counter", + "supply store counter" + ] + }, + "The Willows Market and Pharmacy": { + "store": [ + "behind the pharmacy counter", + "pharmacy store shelf", + "pharmacy store counter", + "grocery store shelf", + "behind the grocery counter", + "grocery store counter" + ] + }, + "Dorm for Oak Hill College": { + "garden": [ + "dorm garden" + ], + "common room": [ + "common room sofa", + "pool table", + "common room table" + ] + }, + "Johnson Park": { + "park": [ + "park garden" + ] + } + } +} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/embeddings.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/embeddings.json new file mode 100644 index 0000000000..9e26dfeeb6 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/embeddings.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/kw_strength.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/kw_strength.json new file mode 100644 index 0000000000..6dc73c1c85 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/kw_strength.json @@ -0,0 +1,2 @@ +{"kw_strength_event": {}, + "kw_strength_thought": {}} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/nodes.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/nodes.json new file mode 100644 index 0000000000..9e26dfeeb6 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/nodes.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/scratch.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/scratch.json new file mode 100644 index 0000000000..7b0ce7d722 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/scratch.json @@ -0,0 +1,51 @@ +{ + "vision_r": 8, + "att_bandwidth": 8, + "retention": 8, + "curr_time": null, + "curr_tile": null, + "daily_plan_req": "Klaus Mueller goes to the library at Oak Hill College early in the morning, spends his days writing, and eats at Hobbs Cafe.", + "name": "Klaus Mueller", + "first_name": "Klaus", + "last_name": "Mueller", + "age": 20, + "innate": "kind, inquisitive, passionate", + "learned": "Klaus Mueller is a student at Oak Hill College studying sociology. He is passionate about social justice and loves to explore different perspectives.", + "currently": "Klaus Mueller is writing a research paper on the effects of gentrification in low-income communities.", + "lifestyle": "Klaus Mueller goes to bed around 11pm, awakes up around 7am, eats dinner around 5pm.", + "living_area": "the Ville:Dorm for Oak Hill College:Klaus Mueller's room", + "concept_forget": 100, + "daily_reflection_time": 180, + "daily_reflection_size": 5, + "overlap_reflect_th": 4, + "kw_strg_event_reflect_th": 10, + "kw_strg_thought_reflect_th": 9, + + "recency_w": 1, + "relevance_w": 1, + "importance_w": 1, + "recency_decay": 0.99, + "importance_trigger_max": 150, + "importance_trigger_curr": 150, + "importance_ele_n": 0, + "thought_count": 5, + + "daily_req": [], + "f_daily_schedule": [], + "f_daily_schedule_hourly_org": [], + "act_address": null, + "act_start_time": null, + "act_duration": null, + "act_description": null, + "act_pronunciatio": null, + "act_event": ["Klaus Mueller", null, null], + "act_obj_description": null, + "act_obj_pronunciatio": null, + "act_obj_event": [null, null, null], + "chatting_with": null, + "chat": null, + "chatting_with_buffer": {}, + "chatting_end_time": null, + "act_path_set": false, + "planned_path": [] +} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/spatial_memory.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/spatial_memory.json new file mode 100644 index 0000000000..4f41686772 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/spatial_memory.json @@ -0,0 +1,86 @@ +{ + "the Ville": { + "Oak Hill College": { + "hallway": [], + "library": [ + "library sofa", + "library table", + "bookshelf" + ], + "classroom": [ + "blackboard", + "classroom podium", + "classroom student seating" + ] + }, + "Dorm for Oak Hill College": { + "garden": [ + "dorm garden" + ], + "Klaus Mueller's room": [ + "bed", + "game console", + "closet", + "desk" + ], + "woman's bathroom": [ + "toilet", + "shower", + "bathroom sink" + ], + "common room": [ + "common room sofa", + "pool table", + "common room table" + ], + "man's bathroom": [ + "shower", + "bathroom sink", + "toilet" + ] + }, + "The Willows Market and Pharmacy": { + "store": [ + "grocery store shelf", + "behind the grocery counter", + "grocery store counter", + "pharmacy store shelf", + "pharmacy store counter", + "behind the pharmacy counter" + ] + }, + "Harvey Oak Supply Store": { + "supply store": [ + "supply store product shelf", + "behind the supply store counter", + "supply store counter" + ] + }, + "Johnson Park": { + "park": [ + "park garden" + ] + }, + "The Rose and Crown Pub": { + "pub": [ + "shelf", + "refrigerator", + "bar customer seating", + "behind the bar counter", + "kitchen sink", + "cooking area", + "microphone" + ] + }, + "Hobbs Cafe": { + "cafe": [ + "refrigerator", + "cafe customer seating", + "cooking area", + "kitchen sink", + "behind the cafe counter", + "piano" + ] + } + } +} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/embeddings.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/embeddings.json new file mode 100644 index 0000000000..9e26dfeeb6 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/embeddings.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/kw_strength.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/kw_strength.json new file mode 100644 index 0000000000..6dc73c1c85 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/kw_strength.json @@ -0,0 +1,2 @@ +{"kw_strength_event": {}, + "kw_strength_thought": {}} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/nodes.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/nodes.json new file mode 100644 index 0000000000..9e26dfeeb6 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/nodes.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/scratch.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/scratch.json new file mode 100644 index 0000000000..c3a304952d --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/scratch.json @@ -0,0 +1,51 @@ +{ + "vision_r": 8, + "att_bandwidth": 8, + "retention": 8, + "curr_time": null, + "curr_tile": null, + "daily_plan_req": "Maria Lopez spends at least 3 hours a day Twitch streaming or gaming.", + "name": "Maria Lopez", + "first_name": "Maria", + "last_name": "Lopez", + "age": 21, + "innate": "energetic, enthusiastic, inquisitive", + "learned": "Maria Lopez is a student at Oak Hill College studying physics and a part time Twitch game streamer who loves to connect with people and explore new ideas.", + "currently": "Maria Lopez is working on her physics degree and streaming games on Twitch to make some extra money. She visits Hobbs Cafe for studying and eating just about everyday.", + "lifestyle": "Maria Lopez goes to bed around 2am, awakes up around 9am, eats dinner around 6pm. She likes to hang out at Hobbs Cafe if it's before 6pm.", + "living_area": "the Ville:Dorm for Oak Hill College:Maria Lopez's room", + "concept_forget": 100, + "daily_reflection_time": 180, + "daily_reflection_size": 5, + "overlap_reflect_th": 4, + "kw_strg_event_reflect_th": 10, + "kw_strg_thought_reflect_th": 9, + + "recency_w": 1, + "relevance_w": 1, + "importance_w": 1, + "recency_decay": 0.99, + "importance_trigger_max": 150, + "importance_trigger_curr": 150, + "importance_ele_n": 0, + "thought_count": 5, + + "daily_req": [], + "f_daily_schedule": [], + "f_daily_schedule_hourly_org": [], + "act_address": null, + "act_start_time": null, + "act_duration": null, + "act_description": null, + "act_pronunciatio": null, + "act_event": ["Maria Lopez", null, null], + "act_obj_description": null, + "act_obj_pronunciatio": null, + "act_obj_event": [null, null, null], + "chatting_with": null, + "chat": null, + "chatting_with_buffer": {}, + "chatting_end_time": null, + "act_path_set": false, + "planned_path": [] +} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/spatial_memory.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/spatial_memory.json new file mode 100644 index 0000000000..0a58212bda --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/spatial_memory.json @@ -0,0 +1,87 @@ +{ + "the Ville": { + "Oak Hill College": { + "hallway": [], + "library": [ + "library sofa", + "library table", + "bookshelf" + ], + "classroom": [ + "blackboard", + "classroom podium", + "classroom student seating" + ] + }, + "Dorm for Oak Hill College": { + "garden": [ + "dorm garden" + ], + "Maria Lopez's room": [ + "closet", + "desk", + "bed", + "computer", + "blackboard" + ], + "woman's bathroom": [ + "toilet", + "shower", + "bathroom sink" + ], + "common room": [ + "common room sofa", + "pool table", + "common room table" + ], + "man's bathroom": [ + "shower", + "bathroom sink", + "toilet" + ] + }, + "The Willows Market and Pharmacy": { + "store": [ + "grocery store shelf", + "behind the grocery counter", + "grocery store counter", + "pharmacy store shelf", + "pharmacy store counter", + "behind the pharmacy counter" + ] + }, + "Harvey Oak Supply Store": { + "supply store": [ + "supply store product shelf", + "behind the supply store counter", + "supply store counter" + ] + }, + "Johnson Park": { + "park": [ + "park garden" + ] + }, + "The Rose and Crown Pub": { + "pub": [ + "shelf", + "refrigerator", + "bar customer seating", + "behind the bar counter", + "kitchen sink", + "cooking area", + "microphone" + ] + }, + "Hobbs Cafe": { + "cafe": [ + "refrigerator", + "cafe customer seating", + "cooking area", + "kitchen sink", + "behind the cafe counter", + "piano" + ] + } + } +} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/reverie/meta.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/reverie/meta.json new file mode 100644 index 0000000000..1e81ec12d2 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/reverie/meta.json @@ -0,0 +1,13 @@ +{ + "fork_sim_code": "base_the_ville_isabella_maria_klaus", + "start_date": "February 13, 2023", + "curr_time": "February 13, 2023, 00:00:00", + "sec_per_step": 10, + "maze_name": "the_ville", + "persona_names": [ + "Isabella Rodriguez", + "Maria Lopez", + "Klaus Mueller" + ], + "step": 0 +} \ No newline at end of file diff --git a/output.txt b/output.txt deleted file mode 100644 index 3e3298f424..0000000000 --- a/output.txt +++ /dev/null @@ -1,7 +0,0 @@ -Work sessions 9/27/24 -Changes -- pulled Connor's branch -- updated to gpt-4o-mini -- added type hinting to run_gpt_prompt.py using List[str] rather than list[str] and other places in there -- installed the new requirement.txt -- added new gpt-key \ No newline at end of file diff --git a/print_conversations.py b/print_conversations.py index 675c730300..34de32d23f 100644 --- a/print_conversations.py +++ b/print_conversations.py @@ -1,46 +1,9 @@ import glob import os -import glob -import os import sys import json -import re def get_unique_conversations(simulation_name): -<<<<<<< HEAD - - step_folder = f"environment/frontend_server/storage/base_search_and_rescue/{simulation_name}/movement" - print(os.listdir()) - # Iterate over all files in the simulation folder - for filename in os.listdir(step_folder): - filepath = os.path.join(step_folder, filename) - try: - with open(filepath, "r") as file: - data = json.load(file) - for k, v in data.items(): - print(k) - if(k=='persona'): - for key, value in v.items(): - print(f' {key}') - for attribute, val in value.items(): - if attribute!='chat' or attribute=='chat' and val is None: - print(f' {attribute}: {val}') - else: - print(f' {attribute}:') - for convo in val: - print(f' {convo[0]}: {convo[1]}') - - else: - for key,value in v.items(): - print(f' {key}: {value}') - print('\n') - except json.JSONDecodeError: - print("Failed to decode JSON. Please check the file format.") - except Exception as e: - print(f"An error occurred: {e}") - - -======= step_folder = "environment/frontend_server/storage" # Use glob to find all files that start with the simulation_name @@ -72,7 +35,6 @@ def get_unique_conversations(simulation_name): break return file_contents ->>>>>>> origin/henry-dev-rebase def write_conversations_to_file(conversations, simulation_name): output_directory = "logs/conversations" @@ -90,17 +52,10 @@ def write_conversations_to_file(conversations, simulation_name): sys.exit(1) simulation_name = sys.argv[1] - ''' unique_conversations = get_unique_conversations(simulation_name) -<<<<<<< HEAD - print(json.dumps(unique_conversations, indent=2)) - ''' - get_unique_conversations(simulation_name) -======= if unique_conversations: write_conversations_to_file(unique_conversations, simulation_name) print(f"Unique conversations written to {simulation_name}_highlights.txt") else: - print("No unique conversations found.") ->>>>>>> origin/henry-dev-rebase + print("No unique conversations found.") \ No newline at end of file diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index 5f4afbe42c..307a65d804 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -16,7 +16,6 @@ import string import traceback from pydantic import BaseModel -from typing import List import sys sys.path.append('../../') @@ -118,7 +117,7 @@ def get_fail_safe(): class DailyPlan(BaseModel): - daily_plan: List[str] + daily_plan: list[str] def run_gpt_prompt_daily_plan(persona, wake_up_hour, @@ -196,7 +195,7 @@ class Activity(BaseModel): activity: str class HourlySchedule(BaseModel): - hourly_schedule: List[Activity] + hourly_schedule: list[Activity] def run_gpt_prompt_generate_hourly_schedule( persona, @@ -342,7 +341,7 @@ class Subtask(BaseModel): minutes_left: int class TaskDecomposition(BaseModel): - subtasks: List[Subtask] + subtasks: list[Subtask] def run_gpt_prompt_task_decomp(persona, task, @@ -1117,7 +1116,7 @@ class NewActivity(BaseModel): subtask: str class NewSchedule(BaseModel): - schedule: List[NewActivity] + schedule: list[NewActivity] def run_gpt_prompt_new_decomp_schedule(persona, main_act_dur, diff --git a/reverie/backend_server/persona/prompt_template/structured_output.py b/reverie/backend_server/persona/prompt_template/structured_output.py deleted file mode 100644 index dfc2757c71..0000000000 --- a/reverie/backend_server/persona/prompt_template/structured_output.py +++ /dev/null @@ -1,22 +0,0 @@ -from pydantic import BaseModel -from openai import OpenAI - -client = OpenAI() - -class Movements(BaseModel): - x_pos: int - y_pos: int - message: str - -class Person(BaseModel): - name: str - actions: list[Movements] - -completion = client.beta.chat.completions.parse( - model="gpt-4o-2024-08-06", - messages=[ {"role": "system", "content": "You are overseeing a town of people interacting with one another, pulling data from what they see and do"}, - {"role": "user", "content": "Give the daily plan of these people"} - ], - response_format=Person,) - -math_reasoning = completion.choices[0].message.parsed \ No newline at end of file diff --git a/reverie/backend_server/survey.ipynb b/reverie/backend_server/survey.ipynb index 6255ad8e5e..209f209d74 100644 --- a/reverie/backend_server/survey.ipynb +++ b/reverie/backend_server/survey.ipynb @@ -277,7 +277,7 @@ " with open(path, \"rb\") as f:\n", " return pickle.load(f)\n", "\n", - "gpt_param = {\"engine\": \"gpt-4o-mini\", \"max_tokens\": 250, \n", + "gpt_param = {\"engine\": \"gpt-35-turbo-0125\", \"max_tokens\": 250, \n", " \"temperature\": 0, \"top_p\": 1, \"stream\": False,\n", " \"frequency_penalty\": 0, \"presence_penalty\": 0, \"stop\": None}\n", "\n", diff --git a/reverie/backend_server/test.py b/reverie/backend_server/test.py index 60924c2ad6..f7663c3a5f 100644 --- a/reverie/backend_server/test.py +++ b/reverie/backend_server/test.py @@ -5,11 +5,9 @@ Description: Wrapper functions for calling OpenAI APIs. """ from openai import OpenAI +from utils import openai_api_key client = OpenAI(api_key=openai_api_key) -import time - -from utils import * def ChatGPT_request(prompt): """ diff --git a/reverie/backend_server/utils.py b/reverie/backend_server/utils.py deleted file mode 100644 index 1d105f13fb..0000000000 --- a/reverie/backend_server/utils.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copy and paste your OpenAI API Key -openai_api_key = "" -# Put your name -key_owner = "Jonathan" - -maze_assets_loc = "../../environment/frontend_server/static_dirs/assets" -env_matrix = f"{maze_assets_loc}/the_ville/matrix" -env_visuals = f"{maze_assets_loc}/the_ville/visuals" - -fs_storage = "../../environment/frontend_server/storage" -fs_temp_storage = "../../environment/frontend_server/temp_storage" - -collision_block_id = "32125" - -# Verbose -debug = True - -use_openai = True -# If you're not using OpenAI, define api_model -# api_model = "" \ No newline at end of file diff --git a/run_backend.sh b/run_backend.sh index 0dfc135ab0..61dfc8be86 100755 --- a/run_backend.sh +++ b/run_backend.sh @@ -7,7 +7,7 @@ LOGS_PATH="../../logs" echo "Running backend server at: http://127.0.0.1:8000/simulator_home" cd ${BACKEND_SCRIPT_PATH} -source /home/${USER}/miniconda3/bin/activate ${CONDA_ENV} +source /home/${USER}/anaconda3/bin/activate ${CONDA_ENV} timestamp=$(date +"%Y-%m-%d_%H-%M-%S") echo "Timestamp: ${timestamp}" diff --git a/run_backend_automatic.sh b/run_backend_automatic.sh index 97b6d1b901..513ef637ae 100755 --- a/run_backend_automatic.sh +++ b/run_backend_automatic.sh @@ -7,7 +7,7 @@ LOGS_PATH="../../logs" FILE_NAME="Bash-Script" cd ${BACKEND_SCRIPT_PATH} -source /home/${USER}/miniconda3/bin/activate ${CONDA_ENV} +source /home/${USER}/anaconda3/bin/activate ${CONDA_ENV} ARGS="" while [[ $# -gt 0 ]]; do diff --git a/test.py b/test.py deleted file mode 100644 index cb5cf91305..0000000000 --- a/test.py +++ /dev/null @@ -1,48 +0,0 @@ -def ChatGPT_safe_generate_response( - prompt, - example_output, - special_instruction, - repeat=3, - fail_safe_response="error", - func_validate=None, - func_clean_up=None, - verbose=False, -): - - if func_validate and func_clean_up: - # prompt = 'GPT-3 Prompt:\n"""\n' + prompt + '\n"""\n' - prompt = '"""\n' + prompt + '\n"""\n' - prompt += ( - f"Output the response to the prompt above in json. {special_instruction}\n" - ) - prompt += "Example output json:\n" - prompt += '{"output": "' + str(example_output) + '"}' - - if verbose: - print("LLM PROMPT") - print(prompt) - - for i in range(repeat): - try: - chatgpt_response = ChatGPT_request(prompt) - if not chatgpt_response: - raise Exception("No valid response from LLM.") - curr_gpt_response = chatgpt_response.strip() - end_index = curr_gpt_response.rfind("}") + 1 - curr_gpt_response = curr_gpt_response[:end_index] - curr_gpt_response = json.loads(curr_gpt_response)["output"] - - if verbose: - print("---- repeat count:", i) - print("~~~~ curr_gpt_response:") - print(curr_gpt_response) - print("~~~~") - - if func_validate(curr_gpt_response, prompt=prompt): - return func_clean_up(curr_gpt_response, prompt=prompt) - - except Exception as e: - print("ERROR:", e) - traceback.print_exc() - - return fail_safe_response \ No newline at end of file From 8d1c9912225756c5f13b111ba7a62c36e4998b68 Mon Sep 17 00:00:00 2001 From: chowington Date: Tue, 29 Oct 2024 13:33:21 -0400 Subject: [PATCH 21/21] More PR cleaning --- environment/frontend_server/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environment/frontend_server/requirements.txt b/environment/frontend_server/requirements.txt index 61b15a9c3d..c7679c58b5 100644 --- a/environment/frontend_server/requirements.txt +++ b/environment/frontend_server/requirements.txt @@ -65,4 +65,4 @@ urllib3==1.26.7 wsproto==1.2.0 yarl==1.8.2 yellowbrick==1.3.post1 -zipp==3.6.0 \ No newline at end of file +zipp==3.6.0