diff --git a/LowCodeLLM/README.md b/LowCodeLLM/README.md index 60ae826c..57f12134 100644 --- a/LowCodeLLM/README.md +++ b/LowCodeLLM/README.md @@ -4,7 +4,7 @@ See our paper: [Low-code LLM: Visual Programming over LLMs](https://arxiv.org/abs/2304.08103) -In the future, [TaskMatrix.AI](https://arxiv.org/abs/2304.08103) can enhance task automation by breaking down tasks more effectively and utilizing existing foundation models and APIs of other AI models and systems to achieve diversified tasks in both digital and physical domains. And the low-code human-LLM interaction pattern can enhance user's experience on controling over the process and expressing their preference. +In the future, [TaskMatrix.AI](https://arxiv.org/abs/2304.08103) can enhance task automation by breaking down tasks more effectively and utilizing existing foundation models and APIs of other AI models and systems to achieve diversified tasks in both digital and physical domains. And the low-code human-LLM interaction pattern can enhance user's experience on controlling over the process and expressing their preference. ## Video Demo https://user-images.githubusercontent.com/43716920/233937121-cd057f04-dec8-45b8-9c52-a9e9594eec80.mp4 diff --git a/LowCodeLLM/src/executingLLM.py b/LowCodeLLM/src/executingLLM.py index bc4da92d..1442d3ac 100644 --- a/LowCodeLLM/src/executingLLM.py +++ b/LowCodeLLM/src/executingLLM.py @@ -5,7 +5,7 @@ EXECUTING_LLM_PREFIX = """Executing LLM is designed to provide outstanding responses. Executing LLM will be given a overall task as the background of the conversation between the Executing LLM and human. -When providing response, Executing LLM MUST STICTLY follow the provided standard operating procedure (SOP). +When providing response, Executing LLM MUST STRICTLY follow the provided standard operating procedure (SOP). the SOP is formatted as: ''' STEP 1: [step name][step descriptions][[[if 'condition1'][Jump to STEP]], [[if 'condition2'][Jump to STEP]], ...] diff --git a/LowCodeLLM/src/test/testcases/execute_test_cases.json b/LowCodeLLM/src/test/testcases/execute_test_cases.json index a2d62b8e..4e755786 100644 --- a/LowCodeLLM/src/test/testcases/execute_test_cases.json +++ b/LowCodeLLM/src/test/testcases/execute_test_cases.json @@ -14,7 +14,7 @@ { "task_prompt": "I want to write a two-player battle game.", "confirmed_workflow": "[{\"stepId\": \"STEP 1\", \"stepName\": \"Game Concept\", \"stepDescription\": \"Decide on the game concept and mechanics\", \"jumpLogic\": [], \"extension\": []}, {\"stepId\": \"STEP 2\", \"stepName\": \"Game Design\", \"stepDescription\": \"Create a rough sketch of the game, including the game board, characters, and rules\", \"jumpLogic\": [], \"extension\": []}, {\"stepId\": \"STEP 3\", \"stepName\": \"Programming\", \"stepDescription\": \"Write the code for the game\", \"jumpLogic\": [{\"Condition\": \"if 'game mechanics are too complex'\", \"Target\": \"STEP 1\"}], \"extension\": []}, {\"stepId\": \"STEP 4\", \"stepName\": \"Testing\", \"stepDescription\": \"Test the game for bugs and glitches\", \"jumpLogic\": [{\"Condition\": \"if 'gameplay is not balanced'\", \"Target\": \"STEP 2\"}], \"extension\": []}, {\"stepId\": \"STEP 5\", \"stepName\": \"Polishing\", \"stepDescription\": \"Add finishing touches to the game, including graphics and sound effects\", \"jumpLogic\": [], \"extension\": []}, {\"stepId\": \"STEP 6\", \"stepName\": \"Release\", \"stepDescription\": \"Publish the game for players to enjoy\", \"jumpLogic\": [], \"extension\": []}]", - "history": [{"role": "asistant", "content": "sure, I can write it for you, do you want me show you the code?"}], + "history": [{"role": "assistant", "content": "sure, I can write it for you, do you want me show you the code?"}], "curr_input": "Sure." } ] \ No newline at end of file diff --git a/visual_chatgpt.py b/visual_chatgpt.py index 17dc4def..2ac7d764 100644 --- a/visual_chatgpt.py +++ b/visual_chatgpt.py @@ -831,7 +831,7 @@ def show_mask(self, mask: np.ndarray,image: np.ndarray, Outputs: np.ndarray: A 3D array of shape (H, W, 3) with the mask visualized on top of the image. - transparenccy: the transparency of the segmentation mask + transparency: the transparency of the segmentation mask """ if random_color: @@ -958,7 +958,7 @@ def segment_image_with_coordinate(self, img, is_positive: bool, result_mask (numpy.ndarray): the result mask, shape: H x W Other parameters: - transparency (float): the transparenccy of the mask + transparency (float): the transparency of the mask to control he degree of transparency after the mask is superimposed. if transparency=1, then the masked part will be completely replaced with other colors. ''' @@ -1163,7 +1163,7 @@ def inference(self, inputs): updated_image = image_with_box.resize(size) updated_image.save(updated_image_path) print( - f"\nProcessed ObejectDetecting, Input Image: {image_path}, Object to be Detect {det_prompt}, " + f"\nProcessed ObjectDetecting, Input Image: {image_path}, Object to be Detect {det_prompt}, " f"Output Image: {updated_image_path}") return updated_image_path @@ -1292,7 +1292,7 @@ def __init__(self, Text2Box:Text2Box, Segmenting:Segmenting): description="useful when you only want to segment the certain objects in the picture" "according to the given text" "like: segment the cat," - "or can you segment an obeject for me" + "or can you segment an object for me" "The input to this tool should be a comma separated string of two, " "representing the image_path, the text description of the object to be found") def inference(self, inputs): @@ -1303,7 +1303,7 @@ def inference(self, inputs): boxes_filt, pred_phrases = self.grounding.get_grounding_boxes(image, det_prompt) updated_image_path = self.sam.segment_image_with_boxes(image_pil,image_path,boxes_filt,pred_phrases) print( - f"\nProcessed ObejectSegmenting, Input Image: {image_path}, Object to be Segment {det_prompt}, " + f"\nProcessed ObjectSegmenting, Input Image: {image_path}, Object to be Segment {det_prompt}, " f"Output Image: {updated_image_path}") return updated_image_path