class Agent:
def __init__(self, model, tools, system=""):
self.system = system
graph = StateGraph(AgentState)
graph.add_node("llm", self.call_openai)
graph.add_node("action", self.take_action)
graph.add_conditional_edges(
"llm",
self.exists_action,
{True: "action", False: END}
)
graph.add_edge("action", "llm")
graph.set_entry_point("llm")
self.graph = graph.compile()
self.tools = {t.name: t for t in tools}
self.model = model.bind_tools(tools)
def exists_action(self, state: AgentState):
result = state['messages'][-1]
return len(result.tool_calls) > 0
def call_openai(self, state: AgentState):
messages = state['messages']
if self.system:
messages = [SystemMessage(content=self.system)] + messages
message = self.model.invoke(messages)
return {'messages': [message]}
def take_action(self, state: AgentState):
tool_calls = state['messages'][-1].tool_calls
results = []
for t in tool_calls:
print(f"Calling: {t}")
print("TOOL ARG" , t['args'])
if not t['name'] in self.tools: # check for bad tool name from LLM
print("\n ....bad tool name....")
result = "bad tool name, retry" # instruct LLM to retry if bad
else:
search = t['args']['query']
print('QUERY:' , search)
print(type(search))
print(type(t['args']))
result = self.tools[t['name']].invoke({'query': search})
results.append(ToolMessage(tool_call_id=t['id'], name=t['name'], content=str(result)))
print("Back to the model!")
return {'messages': results}
prompt = """You are a smart research assistant. Use the search engine to look up information. \
You are allowed to make multiple calls (either together or in sequence). \
Only look up information when you are sure of what you want. \
If you need to look up some information before asking a follow up question, you are allowed to do that!
"""
model = ChatOpenAI(model="Meta-Llama-3.3-70B-Instruct", base_url="https://api.sambanova.ai/v1") #reduce inference cost
abot = Agent(model, [tool], system=prompt)
query = "Who won the super bowl in 2024? What is the longest river"
messages = [HumanMessage(content = query)]
result = abot.graph.invoke({"messages": messages})
I run this code. But there is an error in the final line. The error means that in part I invoke the tool, the input is not dict, str. How can I solve it? Help me!
ValueError: {‘code’: None, ‘message’: ‘ModelMetaclass object argument
after ** must be a mapping, not str’, ‘model_output’: ‘{“name”:
“tavily_search_results_json”, “parameters”: “{\“query\”: \“longest
river\”}”}’, ‘param’: None, ‘type’: ‘Invalid function calling
output.’}