Skip to content

Commit c2179ab

Browse files
committed
fix: async invocation
1 parent 0e4ff09 commit c2179ab

15 files changed

+23
-23
lines changed

scrapegraphai/builders/graph_builder.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ def build_graph(self):
119119
Returns:
120120
dict: A JSON representation of the graph configuration.
121121
"""
122-
return self.chain.ainvoke(self.prompt)
122+
return self.chain.invoke(self.prompt)
123123

124124
@staticmethod
125125
def convert_json_to_graphviz(json_data, format: str = 'pdf'):

scrapegraphai/nodes/generate_answer_csv_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ def execute(self, state):
126126
)
127127

128128
chain = prompt | self.llm_model | output_parser
129-
answer = chain.ainvoke({"question": user_prompt})
129+
answer = chain.invoke({"question": user_prompt})
130130
state.update({self.output[0]: answer})
131131
return state
132132

scrapegraphai/nodes/generate_answer_node_k_level.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ def execute(self, state: dict) -> dict:
143143
merge_chain = merge_prompt | self.llm_model
144144
if output_parser:
145145
merge_chain = merge_chain | output_parser
146-
answer = merge_chain.ainvoke({"context": batch_results, "question": user_prompt})
146+
answer = merge_chain.invoke({"context": batch_results, "question": user_prompt})
147147

148148
state["answer"] = answer
149149

scrapegraphai/nodes/generate_answer_omni_node.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ def execute(self, state: dict) -> dict:
117117
)
118118

119119
chain = prompt | self.llm_model | output_parser
120-
answer = chain.ainvoke({"question": user_prompt})
120+
answer = chain.invoke({"question": user_prompt})
121121

122122
state.update({self.output[0]: answer})
123123
return state
@@ -149,7 +149,7 @@ def execute(self, state: dict) -> dict:
149149
)
150150

151151
merge_chain = merge_prompt | self.llm_model | output_parser
152-
answer = merge_chain.ainvoke({"context": batch_results, "question": user_prompt})
152+
answer = merge_chain.invoke({"context": batch_results, "question": user_prompt})
153153

154154
state.update({self.output[0]: answer})
155155
return state

scrapegraphai/nodes/generate_code_node.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -325,7 +325,7 @@ def generate_initial_code(self, state: dict) -> str:
325325
output_parser = StrOutputParser()
326326

327327
chain = prompt | self.llm_model | output_parser
328-
generated_code = chain.ainvoke({})
328+
generated_code = chain.invoke({})
329329
return generated_code
330330

331331
def semantic_comparison(self, generated_result: Any, reference_result: Any) -> Dict[str, Any]:
@@ -368,7 +368,7 @@ def semantic_comparison(self, generated_result: Any, reference_result: Any) -> D
368368
)
369369

370370
chain = prompt | self.llm_model | output_parser
371-
return chain.ainvoke({
371+
return chain.invoke({
372372
"generated_result": json.dumps(generated_result, indent=2),
373373
"reference_result": json.dumps(reference_result_dict, indent=2)
374374
})

scrapegraphai/nodes/generate_scraper_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ def execute(self, state: dict) -> dict:
130130
)
131131
map_chain = prompt | self.llm_model | StrOutputParser()
132132

133-
answer = map_chain.ainvoke({"question": user_prompt})
133+
answer = map_chain.invoke({"question": user_prompt})
134134

135135
state.update({self.output[0]: answer})
136136
return state

scrapegraphai/nodes/html_analyzer_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def execute(self, state: dict) -> dict:
9393
output_parser = StrOutputParser()
9494

9595
chain = prompt | self.llm_model | output_parser
96-
html_analysis = chain.ainvoke({})
96+
html_analysis = chain.invoke({})
9797

9898
state.update({self.output[0]: html_analysis, self.output[1]: reduced_html})
9999
return state

scrapegraphai/nodes/merge_answers_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ def execute(self, state: dict) -> dict:
9595
)
9696

9797
merge_chain = prompt_template | self.llm_model | output_parser
98-
answer = merge_chain.ainvoke({"user_prompt": user_prompt})
98+
answer = merge_chain.invoke({"user_prompt": user_prompt})
9999
answer["sources"] = state.get("urls", [])
100100

101101
state.update({self.output[0]: answer})

scrapegraphai/nodes/merge_generated_scripts_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ def execute(self, state: dict) -> dict:
7474
)
7575

7676
merge_chain = prompt_template | self.llm_model | StrOutputParser()
77-
answer = merge_chain.ainvoke({"user_prompt": user_prompt})
77+
answer = merge_chain.invoke({"user_prompt": user_prompt})
7878

7979
state.update({self.output[0]: answer})
8080
return state

scrapegraphai/nodes/prompt_refiner_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ def execute(self, state: dict) -> dict:
9696
output_parser = StrOutputParser()
9797

9898
chain = prompt | self.llm_model | output_parser
99-
refined_prompt = chain.ainvoke({})
99+
refined_prompt = chain.invoke({})
100100

101101
state.update({self.output[0]: refined_prompt})
102102
return state

0 commit comments

Comments
 (0)