Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix some typo error #445

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion LowCodeLLM/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

See our paper: [Low-code LLM: Visual Programming over LLMs](https://arxiv.org/abs/2304.08103)

In the future, [TaskMatrix.AI](https://arxiv.org/abs/2304.08103) can enhance task automation by breaking down tasks more effectively and utilizing existing foundation models and APIs of other AI models and systems to achieve diversified tasks in both digital and physical domains. And the low-code human-LLM interaction pattern can enhance user's experience on controling over the process and expressing their preference.
In the future, [TaskMatrix.AI](https://arxiv.org/abs/2304.08103) can enhance task automation by breaking down tasks more effectively and utilizing existing foundation models and APIs of other AI models and systems to achieve diversified tasks in both digital and physical domains. And the low-code human-LLM interaction pattern can enhance user's experience on controlling over the process and expressing their preference.

## Video Demo
https://user-images.githubusercontent.com/43716920/233937121-cd057f04-dec8-45b8-9c52-a9e9594eec80.mp4
Expand Down
2 changes: 1 addition & 1 deletion LowCodeLLM/src/executingLLM.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

EXECUTING_LLM_PREFIX = """Executing LLM is designed to provide outstanding responses.
Executing LLM will be given a overall task as the background of the conversation between the Executing LLM and human.
When providing response, Executing LLM MUST STICTLY follow the provided standard operating procedure (SOP).
When providing response, Executing LLM MUST STRICTLY follow the provided standard operating procedure (SOP).
the SOP is formatted as:
'''
STEP 1: [step name][step descriptions][[[if 'condition1'][Jump to STEP]], [[if 'condition2'][Jump to STEP]], ...]
Expand Down
2 changes: 1 addition & 1 deletion LowCodeLLM/src/test/testcases/execute_test_cases.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
{
"task_prompt": "I want to write a two-player battle game.",
"confirmed_workflow": "[{\"stepId\": \"STEP 1\", \"stepName\": \"Game Concept\", \"stepDescription\": \"Decide on the game concept and mechanics\", \"jumpLogic\": [], \"extension\": []}, {\"stepId\": \"STEP 2\", \"stepName\": \"Game Design\", \"stepDescription\": \"Create a rough sketch of the game, including the game board, characters, and rules\", \"jumpLogic\": [], \"extension\": []}, {\"stepId\": \"STEP 3\", \"stepName\": \"Programming\", \"stepDescription\": \"Write the code for the game\", \"jumpLogic\": [{\"Condition\": \"if 'game mechanics are too complex'\", \"Target\": \"STEP 1\"}], \"extension\": []}, {\"stepId\": \"STEP 4\", \"stepName\": \"Testing\", \"stepDescription\": \"Test the game for bugs and glitches\", \"jumpLogic\": [{\"Condition\": \"if 'gameplay is not balanced'\", \"Target\": \"STEP 2\"}], \"extension\": []}, {\"stepId\": \"STEP 5\", \"stepName\": \"Polishing\", \"stepDescription\": \"Add finishing touches to the game, including graphics and sound effects\", \"jumpLogic\": [], \"extension\": []}, {\"stepId\": \"STEP 6\", \"stepName\": \"Release\", \"stepDescription\": \"Publish the game for players to enjoy\", \"jumpLogic\": [], \"extension\": []}]",
"history": [{"role": "asistant", "content": "sure, I can write it for you, do you want me show you the code?"}],
"history": [{"role": "assistant", "content": "sure, I can write it for you, do you want me show you the code?"}],
"curr_input": "Sure."
}
]
10 changes: 5 additions & 5 deletions visual_chatgpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -831,7 +831,7 @@ def show_mask(self, mask: np.ndarray,image: np.ndarray,
Outputs:
np.ndarray: A 3D array of shape (H, W, 3) with the mask
visualized on top of the image.
transparenccy: the transparency of the segmentation mask
transparency: the transparency of the segmentation mask
"""

if random_color:
Expand Down Expand Up @@ -958,7 +958,7 @@ def segment_image_with_coordinate(self, img, is_positive: bool,
result_mask (numpy.ndarray): the result mask, shape: H x W

Other parameters:
transparency (float): the transparenccy of the mask
transparency (float): the transparency of the mask
to control he degree of transparency after the mask is superimposed.
if transparency=1, then the masked part will be completely replaced with other colors.
'''
Expand Down Expand Up @@ -1163,7 +1163,7 @@ def inference(self, inputs):
updated_image = image_with_box.resize(size)
updated_image.save(updated_image_path)
print(
f"\nProcessed ObejectDetecting, Input Image: {image_path}, Object to be Detect {det_prompt}, "
f"\nProcessed ObjectDetecting, Input Image: {image_path}, Object to be Detect {det_prompt}, "
f"Output Image: {updated_image_path}")
return updated_image_path

Expand Down Expand Up @@ -1292,7 +1292,7 @@ def __init__(self, Text2Box:Text2Box, Segmenting:Segmenting):
description="useful when you only want to segment the certain objects in the picture"
"according to the given text"
"like: segment the cat,"
"or can you segment an obeject for me"
"or can you segment an object for me"
"The input to this tool should be a comma separated string of two, "
"representing the image_path, the text description of the object to be found")
def inference(self, inputs):
Expand All @@ -1303,7 +1303,7 @@ def inference(self, inputs):
boxes_filt, pred_phrases = self.grounding.get_grounding_boxes(image, det_prompt)
updated_image_path = self.sam.segment_image_with_boxes(image_pil,image_path,boxes_filt,pred_phrases)
print(
f"\nProcessed ObejectSegmenting, Input Image: {image_path}, Object to be Segment {det_prompt}, "
f"\nProcessed ObjectSegmenting, Input Image: {image_path}, Object to be Segment {det_prompt}, "
f"Output Image: {updated_image_path}")
return updated_image_path

Expand Down