Skip to content

Commit

Permalink
fix: async invocation
Browse files Browse the repository at this point in the history
  • Loading branch information
VinciGit00 committed Oct 13, 2024
1 parent 0e4ff09 commit c2179ab
Show file tree
Hide file tree
Showing 15 changed files with 23 additions and 23 deletions.
2 changes: 1 addition & 1 deletion scrapegraphai/builders/graph_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def build_graph(self):
Returns:
dict: A JSON representation of the graph configuration.
"""
return self.chain.ainvoke(self.prompt)
return self.chain.invoke(self.prompt)

@staticmethod
def convert_json_to_graphviz(json_data, format: str = 'pdf'):
Expand Down
2 changes: 1 addition & 1 deletion scrapegraphai/nodes/generate_answer_csv_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def execute(self, state):
)

chain = prompt | self.llm_model | output_parser
answer = chain.ainvoke({"question": user_prompt})
answer = chain.invoke({"question": user_prompt})
state.update({self.output[0]: answer})
return state

Expand Down
2 changes: 1 addition & 1 deletion scrapegraphai/nodes/generate_answer_node_k_level.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def execute(self, state: dict) -> dict:
merge_chain = merge_prompt | self.llm_model
if output_parser:
merge_chain = merge_chain | output_parser
answer = merge_chain.ainvoke({"context": batch_results, "question": user_prompt})
answer = merge_chain.invoke({"context": batch_results, "question": user_prompt})

state["answer"] = answer

Expand Down
4 changes: 2 additions & 2 deletions scrapegraphai/nodes/generate_answer_omni_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def execute(self, state: dict) -> dict:
)

chain = prompt | self.llm_model | output_parser
answer = chain.ainvoke({"question": user_prompt})
answer = chain.invoke({"question": user_prompt})

state.update({self.output[0]: answer})
return state
Expand Down Expand Up @@ -149,7 +149,7 @@ def execute(self, state: dict) -> dict:
)

merge_chain = merge_prompt | self.llm_model | output_parser
answer = merge_chain.ainvoke({"context": batch_results, "question": user_prompt})
answer = merge_chain.invoke({"context": batch_results, "question": user_prompt})

state.update({self.output[0]: answer})
return state
4 changes: 2 additions & 2 deletions scrapegraphai/nodes/generate_code_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,7 @@ def generate_initial_code(self, state: dict) -> str:
output_parser = StrOutputParser()

chain = prompt | self.llm_model | output_parser
generated_code = chain.ainvoke({})
generated_code = chain.invoke({})
return generated_code

def semantic_comparison(self, generated_result: Any, reference_result: Any) -> Dict[str, Any]:
Expand Down Expand Up @@ -368,7 +368,7 @@ def semantic_comparison(self, generated_result: Any, reference_result: Any) -> D
)

chain = prompt | self.llm_model | output_parser
return chain.ainvoke({
return chain.invoke({
"generated_result": json.dumps(generated_result, indent=2),
"reference_result": json.dumps(reference_result_dict, indent=2)
})
Expand Down
2 changes: 1 addition & 1 deletion scrapegraphai/nodes/generate_scraper_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def execute(self, state: dict) -> dict:
)
map_chain = prompt | self.llm_model | StrOutputParser()

answer = map_chain.ainvoke({"question": user_prompt})
answer = map_chain.invoke({"question": user_prompt})

state.update({self.output[0]: answer})
return state
2 changes: 1 addition & 1 deletion scrapegraphai/nodes/html_analyzer_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def execute(self, state: dict) -> dict:
output_parser = StrOutputParser()

chain = prompt | self.llm_model | output_parser
html_analysis = chain.ainvoke({})
html_analysis = chain.invoke({})

state.update({self.output[0]: html_analysis, self.output[1]: reduced_html})
return state
2 changes: 1 addition & 1 deletion scrapegraphai/nodes/merge_answers_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def execute(self, state: dict) -> dict:
)

merge_chain = prompt_template | self.llm_model | output_parser
answer = merge_chain.ainvoke({"user_prompt": user_prompt})
answer = merge_chain.invoke({"user_prompt": user_prompt})
answer["sources"] = state.get("urls", [])

state.update({self.output[0]: answer})
Expand Down
2 changes: 1 addition & 1 deletion scrapegraphai/nodes/merge_generated_scripts_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def execute(self, state: dict) -> dict:
)

merge_chain = prompt_template | self.llm_model | StrOutputParser()
answer = merge_chain.ainvoke({"user_prompt": user_prompt})
answer = merge_chain.invoke({"user_prompt": user_prompt})

state.update({self.output[0]: answer})
return state
2 changes: 1 addition & 1 deletion scrapegraphai/nodes/prompt_refiner_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def execute(self, state: dict) -> dict:
output_parser = StrOutputParser()

chain = prompt | self.llm_model | output_parser
refined_prompt = chain.ainvoke({})
refined_prompt = chain.invoke({})

state.update({self.output[0]: refined_prompt})
return state
2 changes: 1 addition & 1 deletion scrapegraphai/nodes/reasoning_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def execute(self, state: dict) -> dict:
output_parser = StrOutputParser()

chain = prompt | self.llm_model | output_parser
refined_prompt = chain.ainvoke({})
refined_prompt = chain.invoke({})

state.update({self.output[0]: refined_prompt})
return state
2 changes: 1 addition & 1 deletion scrapegraphai/nodes/robots_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def execute(self, state: dict) -> dict:
)

chain = prompt | self.llm_model | output_parser
is_scrapable = chain.ainvoke({"path": source})[0]
is_scrapable = chain.invoke({"path": source})[0]

if "no" in is_scrapable:
self.logger.warning(
Expand Down
2 changes: 1 addition & 1 deletion scrapegraphai/nodes/search_link_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ def execute(self, state: dict) -> dict:
input_variables=["content", "user_prompt"],
)
merge_chain = merge_prompt | self.llm_model | output_parser
answer = merge_chain.ainvoke(
answer = merge_chain.invoke(
{"content": chunk.page_content}
)
relevant_links += answer
Expand Down
8 changes: 4 additions & 4 deletions scrapegraphai/utils/code_error_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def syntax_focused_analysis(state: dict, llm_model) -> str:
prompt = PromptTemplate(template=TEMPLATE_SYNTAX_ANALYSIS,
input_variables=["generated_code", "errors"])
chain = prompt | llm_model | StrOutputParser()
return chain.ainvoke({
return chain.invoke({
"generated_code": state["generated_code"],
"errors": state["errors"]["syntax"]
})
Expand All @@ -53,7 +53,7 @@ def execution_focused_analysis(state: dict, llm_model) -> str:
input_variables=["generated_code", "errors",
"html_code", "html_analysis"])
chain = prompt | llm_model | StrOutputParser()
return chain.ainvoke({
return chain.invoke({
"generated_code": state["generated_code"],
"errors": state["errors"]["execution"],
"html_code": state["html_code"],
Expand All @@ -76,7 +76,7 @@ def validation_focused_analysis(state: dict, llm_model) -> str:
input_variables=["generated_code", "errors",
"json_schema", "execution_result"])
chain = prompt | llm_model | StrOutputParser()
return chain.ainvoke({
return chain.invoke({
"generated_code": state["generated_code"],
"errors": state["errors"]["validation"],
"json_schema": state["json_schema"],
Expand All @@ -100,7 +100,7 @@ def semantic_focused_analysis(state: dict, comparison_result: Dict[str, Any], ll
input_variables=["generated_code",
"differences", "explanation"])
chain = prompt | llm_model | StrOutputParser()
return chain.ainvoke({
return chain.invoke({
"generated_code": state["generated_code"],
"differences": json.dumps(comparison_result["differences"], indent=2),
"explanation": comparison_result["explanation"]
Expand Down
8 changes: 4 additions & 4 deletions scrapegraphai/utils/code_error_correction.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def syntax_focused_code_generation(state: dict, analysis: str, llm_model) -> str
prompt = PromptTemplate(template=TEMPLATE_SYNTAX_CODE_GENERATION,
input_variables=["analysis", "generated_code"])
chain = prompt | llm_model | StrOutputParser()
return chain.ainvoke({
return chain.invoke({
"analysis": analysis,
"generated_code": state["generated_code"]
})
Expand All @@ -52,7 +52,7 @@ def execution_focused_code_generation(state: dict, analysis: str, llm_model) ->
prompt = PromptTemplate(template=TEMPLATE_EXECUTION_CODE_GENERATION,
input_variables=["analysis", "generated_code"])
chain = prompt | llm_model | StrOutputParser()
return chain.ainvoke({
return chain.invoke({
"analysis": analysis,
"generated_code": state["generated_code"]
})
Expand All @@ -72,7 +72,7 @@ def validation_focused_code_generation(state: dict, analysis: str, llm_model) ->
prompt = PromptTemplate(template=TEMPLATE_VALIDATION_CODE_GENERATION,
input_variables=["analysis", "generated_code", "json_schema"])
chain = prompt | llm_model | StrOutputParser()
return chain.ainvoke({
return chain.invoke({
"analysis": analysis,
"generated_code": state["generated_code"],
"json_schema": state["json_schema"]
Expand All @@ -93,7 +93,7 @@ def semantic_focused_code_generation(state: dict, analysis: str, llm_model) -> s
prompt = PromptTemplate(template=TEMPLATE_SEMANTIC_CODE_GENERATION,
input_variables=["analysis", "generated_code", "generated_result", "reference_result"])
chain = prompt | llm_model | StrOutputParser()
return chain.ainvoke({
return chain.invoke({
"analysis": analysis,
"generated_code": state["generated_code"],
"generated_result": json.dumps(state["execution_result"], indent=2),
Expand Down

0 comments on commit c2179ab

Please sign in to comment.