-
Notifications
You must be signed in to change notification settings - Fork 4.4k
Open
Labels
bugSomething isn't workingSomething isn't working
Description
Confirm this is an issue with the Python library and not an underlying OpenAI API
- This is an issue with the Python library
Describe the bug
When running this minimal example code:
from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages
from langchain_core.messages import AnyMessage, HumanMessage
from langchain.tools import tool
from langgraph.prebuilt import ToolNode
from pydantic import BaseModel, Field
from typing import Literal, Optional, List, TypedDict, Dict, Annotated
from models import model
from langgraph.graph import StateGraph
class AddOneSchema(BaseModel):
n: int = Field(..., description="Number to add one to.")
class ExtractTextSchema(BaseModel):
text: str = Field(..., description="Text to extract substring from.")
i: int = Field(..., description="Initial index.")
j: int = Field(..., description="Final index.")
@tool(args_schema=AddOneSchema)
def add_one(n):
"""add one to number"""
return n + 1
@tool(args_schema=ExtractTextSchema)
def extract_text(text, i, j):
"""Extract a substring from text between two integers"""
return text[i:j]
class Answer(BaseModel):
response: str = Field(..., description="Standard response")
reasoning: str = Field(..., description="Reasoning")
question_type: Literal["geography", "history", "art", "maths"]
class TestState(TypedDict):
messages: Annotated[List[AnyMessage], add_messages]
def agent_node(
state: TestState
) -> Dict:
llm = model['gpt5_low_reasoning'].bind_tools(
strict=True,
response_format=Answer,
tools=[add_one, extract_text]
)
response = llm.invoke(
state.get('messages', [])
)
return {
"messages": [response]
}
graph = StateGraph(TestState)
graph.add_node('agent', agent_node)
graph.add_node(
"tools",
ToolNode(
[
add_one,
extract_text
]
)
)
graph.add_edge(START, "agent")
def routing_logic(state: TestState):
last_message = state["messages"][-1] if state["messages"] else {}
if last_message.tool_calls:
return "tools"
else:
return END
graph.add_conditional_edges(
"agent",
routing_logic,
{
"tools": "tools",
END: END,
}
)
graph.add_edge("tools", "agent")
graph.add_edge("agent", END)
graph = graph.compile()
test = graph.invoke(
{
"messages": """
Add one to two using your tools
"""
}
)
Leveraging langchain/langraph and AzureChatOpenAI. I get the following error:
BadRequestError: Error code: 400 - {'error': {'message': "Unknown parameter: 'input[2].parsed_arguments'.", 'type': 'invalid_request_error', 'param': 'input[2].parsed_arguments', 'code': 'unknown_parameter'}}
[Trace ID: 00-a6b4f6bb62b988a668f48fddeaadb350-fbb90fc452cfdf3f-00]
File <command-7705379453879500>, line 84
81 graph.add_edge("agent", END)
82 graph = graph.compile()
---> 84 test = graph.invoke(
85 {
86 "messages": """
87 Add one to two using your tools
88 """
89 }
90 )
BadRequestError: Error code: 400 - {'error': {'message': "Unknown parameter: 'input[2].parsed_arguments'.", 'type': 'invalid_request_error', 'param': 'input[2].parsed_arguments', 'code': 'unknown_parameter'}}
To Reproduce
- Using langchain == 1.0.2, langgraph == 1.0.3, langchain-openai == 1.0.1, open-ai == 2.6.1 (and mlflow == 3.5.1, unsure if this is related).
- Execute a standard ReAct agent with tools
- See error
Code snippets
OS
Databricks Linux
Python version
3.12.3
Library version
2.6.1
Metadata
Metadata
Assignees
Labels
bugSomething isn't workingSomething isn't working