Using ChatMistralAI with structured output : Pydantic model with a datetime.date value using json_schema
raises a 400 bad request
#29604
Labels
🤖:bug
Related to a bug, vulnerability, unexpected error with an existing feature
Checked other resources
Example Code
Error Message and Stack Trace (if applicable)
HTTPStatusError Traceback (most recent call last)
Cell In[124], line 12
7 date: date
10 llm = ChatMistralAI(api_key=api_key, model='mistral-small-latest',temperature=0).with_structured_output(DummyClass, method='json_schema')
---> 12 result: DummyClass = llm.invoke('Answer me with a date. When was the first man on the moon ?')
File ~/rag-project/rag-sandbox/.venv/lib/python3.12/site-packages/langchain_core/runnables/base.py:3014, in RunnableSequence.invoke(self, input, config, **kwargs)
3012 context.run(_set_config_context, config)
3013 if i == 0:
-> 3014 input = context.run(step.invoke, input, config, **kwargs)
3015 else:
3016 input = context.run(step.invoke, input, config)
File ~/rag-project/rag-sandbox/.venv/lib/python3.12/site-packages/langchain_core/runnables/base.py:5352, in RunnableBindingBase.invoke(self, input, config, **kwargs)
5346 def invoke(
5347 self,
5348 input: Input,
5349 config: Optional[RunnableConfig] = None,
5350 **kwargs: Optional[Any],
5351 ) -> Output:
-> 5352 return self.bound.invoke(
5353 input,
5354 self._merge_configs(config),
5355 **{**self.kwargs, **kwargs},
5356 )
File ~/rag-project/rag-sandbox/.venv/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py:284, in BaseChatModel.invoke(self, input, config, stop, **kwargs)
273 def invoke(
274 self,
275 input: LanguageModelInput,
(...)
279 **kwargs: Any,
280 ) -> BaseMessage:
281 config = ensure_config(config)
282 return cast(
283 ChatGeneration,
--> 284 self.generate_prompt(
285 [self._convert_input(input)],
286 stop=stop,
287 callbacks=config.get("callbacks"),
288 tags=config.get("tags"),
289 metadata=config.get("metadata"),
290 run_name=config.get("run_name"),
291 run_id=config.pop("run_id", None),
292 **kwargs,
293 ).generations[0][0],
294 ).message
File ~/rag-project/rag-sandbox/.venv/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py:860, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, **kwargs)
852 def generate_prompt(
853 self,
854 prompts: list[PromptValue],
(...)
857 **kwargs: Any,
858 ) -> LLMResult:
859 prompt_messages = [p.to_messages() for p in prompts]
--> 860 return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
File ~/rag-project/rag-sandbox/.venv/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py:690, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
687 for i, m in enumerate(messages):
688 try:
689 results.append(
--> 690 self._generate_with_cache(
691 m,
692 stop=stop,
693 run_manager=run_managers[i] if run_managers else None,
694 **kwargs,
695 )
696 )
697 except BaseException as e:
698 if run_managers:
File ~/rag-project/rag-sandbox/.venv/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py:925, in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, **kwargs)
923 else:
924 if inspect.signature(self._generate).parameters.get("run_manager"):
--> 925 result = self._generate(
926 messages, stop=stop, run_manager=run_manager, **kwargs
927 )
928 else:
929 result = self._generate(messages, stop=stop, **kwargs)
File ~/rag-project/rag-sandbox/.venv/lib/python3.12/site-packages/langchain_mistralai/chat_models.py:547, in ChatMistralAI._generate(self, messages, stop, run_manager, stream, **kwargs)
545 message_dicts, params = self._create_message_dicts(messages, stop)
546 params = {**params, **kwargs}
--> 547 response = self.completion_with_retry(
548 messages=message_dicts, run_manager=run_manager, **params
549 )
550 return self._create_chat_result(response)
File ~/rag-project/rag-sandbox/.venv/lib/python3.12/site-packages/langchain_mistralai/chat_models.py:466, in ChatMistralAI.completion_with_retry(self, run_manager, **kwargs)
463 _raise_on_error(response)
464 return response.json()
--> 466 rtn = _completion_with_retry(**kwargs)
467 return rtn
File ~/rag-project/rag-sandbox/.venv/lib/python3.12/site-packages/langchain_mistralai/chat_models.py:463, in ChatMistralAI.completion_with_retry.._completion_with_retry(**kwargs)
461 else:
462 response = self.client.post(url="/chat/completions", json=kwargs)
--> 463 _raise_on_error(response)
464 return response.json()
File ~/rag-project/rag-sandbox/.venv/lib/python3.12/site-packages/langchain_mistralai/chat_models.py:170, in _raise_on_error(response)
168 if httpx.codes.is_error(response.status_code):
169 error_message = response.read().decode("utf-8")
--> 170 raise httpx.HTTPStatusError(
171 f"Error response {response.status_code} "
172 f"while fetching {response.url}: {error_message}",
173 request=response.request,
174 response=response,
175 )
HTTPStatusError: Error response 400 while fetching https://api.mistral.ai/v1/chat/completions: {"object":"error","message":"Received unsupported keyword
format
in schema.","type":"invalid_request_error","param":null,"code":null}Description
I am trying to use langchain to identify dates for downstream filtering. I used the
with_structured_output
and it seemed to work out-of-the-box but I encountered some issues with themethod='function_calling'
approach (sometimes the model was not properly following the pydantic schema), so I tried using themethod='json_schema'
to constrain it another way.I expected
json_schema
to work the same or better, but it did not. I got the stacktrace above.I followed the problem down to the
_convert_pydantic_to_openai_function
method.Using pydantic
model_json_schema
returns the following :The problem lies in the
format
key. This key is not supported by mistral, nor openai as documented here.Deleting this
format
key and providing the following description :does the call properly.
I believe that the
_rm_titles
function should be extended to remove all keys that are unsupported per the openai documentation.System Info
System Information
Package Information
Optional packages not installed
Other Dependencies
The text was updated successfully, but these errors were encountered: