-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtest.py
389 lines (298 loc) · 16.1 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
from langchain.agents import ConversationalChatAgent, AgentExecutor
from langchain.callbacks import StreamlitCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
from langchain.tools import DuckDuckGoSearchRun
import streamlit as st
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
from langchain.prompts import StringPromptTemplate
from langchain import OpenAI, LLMChain
from langchain.tools import DuckDuckGoSearchRun
from typing import List, Union
from langchain.schema import AgentAction, AgentFinish
import re
import langchain
from langchain.tools import BaseTool
import os
import requests
from PIL import Image
st.set_page_config(page_title="Fashion Outfit Generator", page_icon="🔮")
# st.title("🔮 DaVinci Dresser")
openai_api_key = st.sidebar.text_input("Please insert your OpenAI API Key", type="password")
st.sidebar.write("error will go away once you enter your OpenAI API key")
radio_btn = st.sidebar.radio(
"Choose model",
('Curie Matcher','DaVinci Dresser'))
st.sidebar.info('Both our DaVinci Dresser and Curi Matcher are build using [langchain](https://www.langchain.com/) and are based on [MRKL](https://arxiv.org/pdf/2205.00445.pdf) and [ReAct](https://ai.googleblog.com/2022/11/react-synergizing-reasoning-and-acting.html). Both of our AI are capable of understanding all the fashion trends, knows what is trending on social media and also knows the user past purchase history/most viewed item and based on that assist user with there query. ')
st.sidebar.write("made with ❤️ by rythmn")
if radio_btn == "DaVinci Dresser":
st.title("🔮 DaVinci Dresser")
with st.expander("DaVinci Dresser is a smart AI which can recommend fashion product for any given query"):
image = Image.open('ss.PNG')
st.image(image, caption='process')
msgs = StreamlitChatMessageHistory()
memory = ConversationBufferMemory(
chat_memory=msgs, return_messages=True, memory_key="chat_history", output_key="output"
)
if len(msgs.messages) == 0 or st.sidebar.button("Reset chat history"):
msgs.clear()
msgs.add_ai_message("Your personal fashion assistant is here to help you find the perfect outfit!")
st.session_state.steps = {}
avatars = {"human": "user", "ai": "assistant"}
for idx, msg in enumerate(msgs.messages):
with st.chat_message(avatars[msg.type]):
# Render intermediate steps if any were saved
for step in st.session_state.steps.get(str(idx), []):
if step[0].tool == "_Exception":
continue
with st.expander(f"✅ **{step[0].tool}**: {step[0].tool_input}"):
st.write(step[0].log)
st.write(f"**{step[1]}**")
st.write(msg.content)
if prompt121 := st.chat_input(placeholder="Show me something dark academia aesthetic"):
st.chat_message("user").write(prompt121)
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
#setting up OpenAI api key
os.environ["OPENAI_API_KEY"] = openai_api_key
#setting up custom tool for product search
class flipkart_search(BaseTool):
name = "product_link_generator"
description = "useful when you need to return link of a particular product. input should be a product name"
def _run(self, item_name: str, num_items=3):
api_base_url = "https://flipkart-scraper-api.dvishal485.workers.dev/search/"
api_url = f"{api_base_url}{item_name}"
response = requests.get(api_url)
if response.status_code == 200:
product_data = response.json()
if 'result' in product_data:
result_list = product_data['result']
num_items = min(num_items, len(result_list))
product_links = [result_list[i].get('link') for i in range(num_items)]
return product_links
else:
return None
else:
return None
product_link_generator = flipkart_search()
search = DuckDuckGoSearchRun()
#defining the agent tools
tools = [
Tool(
name = "Search",
func=search.run,
description="useful for when you need to answer questions about current events"
),
Tool(
name = "product_link_generator",
func=product_link_generator.run,
description="useful when you need to return link of a particular product. input should be a product name"
)
]
#setting up custom template for prompt
template = """Answer the following question as best you can, so you are a fashion outfit and link generator bot for an Indian e-commerce company flipkart. Here is what you need to do when user give any query related to fashion first you need to search for what user wants and then search for what fashion items comes under that and at last you COMPULSORY need to search if those products are on flipkart and then finally you need to give the link at last using the tool product_link_generator. If user asks for some trend related outfit then first search what comes under that trend and then use product_link_generator to give link .REMEMBER two important things that you COMPULSORY need to provide the product link, second is the product link should be at last and if user asks something other than fashion query then dont answer that. You have access to the following tools:
{tools}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin! Remember to answer as a compasionate fashion outfit and accessories suggestion bot when giving your final answer.
Question: {input}
{agent_scratchpad}"""
#Setting up a prompt template
class CustomPromptTemplate(StringPromptTemplate):
template: str
tools: List[Tool]
def format(self, **kwargs) -> str:
intermediate_steps = kwargs.pop("intermediate_steps")
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
kwargs["agent_scratchpad"] = thoughts
kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in self.tools])
kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
return self.template.format(**kwargs)
prompt = CustomPromptTemplate(
template=template,
tools=tools,
input_variables=["input", "intermediate_steps"]
)
#setting up output parse
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
if "Final Answer:" in llm_output:
return AgentFinish(
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
log=llm_output,
)
# Parse out the action and action input
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
match = re.search(regex, llm_output, re.DOTALL)
if not match:
raise ValueError(f"Could not parse LLM output: `{llm_output}`")
action = match.group(1).strip()
action_input = match.group(2)
# Return the action and action input
return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
output_parser = CustomOutputParser()
llm = OpenAI(temperature=0,model_name='gpt-3.5-turbo')
# llm = OpenAI(temperature=0)
# LLM chain consisting of the LLM and a prompt
llm_chain = LLMChain(llm=llm, prompt=prompt)
tool_names = [tool.name for tool in tools]
agent = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=output_parser,
stop=["\nObservation:"],
allowed_tools=tool_names
)
agent_executor = AgentExecutor.from_agent_and_tools(agent=agent,
tools=tools,
return_intermediate_steps=True,
verbose=True)
#setting up the chat model
#agent_executor.run(prompt121)
with st.chat_message("assistant"):
st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
response = agent_executor(prompt121, callbacks=[st_cb])
st.write(response["output"])
st.session_state.steps[str(len(msgs.messages) - 1)] = response["intermediate_steps"]
else:
st.title("🧬 Curie Matcher")
with st.expander("Confused about what to wear with which outfit? Ask Curie Matcher."):
st.write("Our AI powered fashion outfit generator ")
msgs = StreamlitChatMessageHistory()
memory = ConversationBufferMemory(
chat_memory=msgs, return_messages=True, memory_key="chat_history", output_key="output"
)
if len(msgs.messages) == 0 or st.sidebar.button("Reset chat history"):
msgs.clear()
msgs.add_ai_message("Your personal fashion assistant is here to help you find the perfect outfit!")
st.session_state.steps = {}
avatars = {"human": "user", "ai": "assistant"}
for idx, msg in enumerate(msgs.messages):
with st.chat_message(avatars[msg.type]):
# Render intermediate steps if any were saved
for step in st.session_state.steps.get(str(idx), []):
if step[0].tool == "_Exception":
continue
with st.expander(f"✅ **{step[0].tool}**: {step[0].tool_input}"):
st.write(step[0].log)
st.write(f"**{step[1]}**")
st.write(msg.content)
if prompt121 := st.chat_input(placeholder="Show me something dark academia aesthetic"):
st.chat_message("user").write(prompt121)
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
#setting up OpenAI api key
os.environ["OPENAI_API_KEY"] = openai_api_key
#setting up custom tool for product search
class flipkart_search(BaseTool):
name = "product_link_generator"
description = "useful when you need to return link of a particular product. input should be a product name"
def _run(self, item_name: str, num_items=3):
api_base_url = "https://flipkart-scraper-api.dvishal485.workers.dev/search/"
api_url = f"{api_base_url}{item_name}"
response = requests.get(api_url)
if response.status_code == 200:
product_data = response.json()
if 'result' in product_data:
result_list = product_data['result']
num_items = min(num_items, len(result_list))
product_links = [result_list[i].get('link') for i in range(num_items)]
return product_links
else:
return None
else:
return None
product_link_generator = flipkart_search()
search = DuckDuckGoSearchRun()
#defining the agent tools
tools = [
Tool(
name = "Search",
func=search.run,
description="useful for when you need to answer questions about current events"
)
]
#setting up custom template for prompt
template = """Answer the following question as best you can, so you are a fashion outfit generator to whom user can ask what they can wear with a particular cloth or any other fashion accessories, basically you need to find them fashion items which goes with there cloths or they can also ask you some particular trend and you need to tell them what kind of outfit they can wear for that trend. You have access to the following tools:
{tools}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin! Remember to answer as a compasionate fashion outfit and accessories suggestion bot when giving your final answer.
Question: {input}
{agent_scratchpad}"""
#Setting up a prompt template
class CustomPromptTemplate(StringPromptTemplate):
template: str
tools: List[Tool]
def format(self, **kwargs) -> str:
intermediate_steps = kwargs.pop("intermediate_steps")
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
kwargs["agent_scratchpad"] = thoughts
kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in self.tools])
kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
return self.template.format(**kwargs)
prompt = CustomPromptTemplate(
template=template,
tools=tools,
input_variables=["input", "intermediate_steps"]
)
#setting up output parse
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
if "Final Answer:" in llm_output:
return AgentFinish(
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
log=llm_output,
)
# Parse out the action and action input
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
match = re.search(regex, llm_output, re.DOTALL)
if not match:
raise ValueError(f"Could not parse LLM output: `{llm_output}`")
action = match.group(1).strip()
action_input = match.group(2)
# Return the action and action input
return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
output_parser = CustomOutputParser()
llm = OpenAI(temperature=0)
# LLM chain consisting of the LLM and a prompt
llm_chain = LLMChain(llm=llm, prompt=prompt)
tool_names = [tool.name for tool in tools]
agent = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=output_parser,
stop=["\nObservation:"],
allowed_tools=tool_names
)
agent_executor = AgentExecutor.from_agent_and_tools(agent=agent,
tools=tools,
return_intermediate_steps=True,
verbose=True)
#setting up the chat model
with st.chat_message("assistant"):
st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
response = agent_executor(prompt121, callbacks=[st_cb])
st.write(response["output"])
st.session_state.steps[str(len(msgs.messages) - 1)] = response["intermediate_steps"]