update mrkl agents

pull/1763/head
Henry 2024-02-20 18:23:39 +08:00
parent d1fdd8b3bd
commit 15afb8a2dd
4 changed files with 345 additions and 105 deletions

View File

@ -1,12 +1,13 @@
import { flatten } from 'lodash' import { flatten } from 'lodash'
import { AgentExecutor, createReactAgent } from 'langchain/agents' import { AgentExecutor } from 'langchain/agents'
import { pull } from 'langchain/hub' import { pull } from 'langchain/hub'
import { Tool } from '@langchain/core/tools' import { Tool } from '@langchain/core/tools'
import type { PromptTemplate } from '@langchain/core/prompts' import type { PromptTemplate } from '@langchain/core/prompts'
import { BaseChatModel } from '@langchain/core/language_models/chat_models' import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { additionalCallbacks } from '../../../src/handler' import { additionalCallbacks } from '../../../src/handler'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils' import { getBaseClasses } from '../../../src/utils'
import { createReactAgent } from '../../../src/agents'
class MRKLAgentChat_Agents implements INode { class MRKLAgentChat_Agents implements INode {
label: string label: string
@ -18,11 +19,12 @@ class MRKLAgentChat_Agents implements INode {
category: string category: string
baseClasses: string[] baseClasses: string[]
inputs: INodeParams[] inputs: INodeParams[]
sessionId?: string
constructor() { constructor(fields?: { sessionId?: string }) {
this.label = 'ReAct Agent for Chat Models' this.label = 'ReAct Agent for Chat Models'
this.name = 'mrklAgentChat' this.name = 'mrklAgentChat'
this.version = 2.0 this.version = 3.0
this.type = 'AgentExecutor' this.type = 'AgentExecutor'
this.category = 'Agents' this.category = 'Agents'
this.icon = 'agent.svg' this.icon = 'agent.svg'
@ -39,8 +41,14 @@ class MRKLAgentChat_Agents implements INode {
label: 'Chat Model', label: 'Chat Model',
name: 'model', name: 'model',
type: 'BaseChatModel' type: 'BaseChatModel'
},
{
label: 'Memory',
name: 'memory',
type: 'BaseChatMemory'
} }
] ]
this.sessionId = fields?.sessionId
} }
async init(): Promise<any> { async init(): Promise<any> {
@ -48,6 +56,7 @@ class MRKLAgentChat_Agents implements INode {
} }
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const memory = nodeData.inputs?.memory as FlowiseMemory
const model = nodeData.inputs?.model as BaseChatModel const model = nodeData.inputs?.model as BaseChatModel
let tools = nodeData.inputs?.tools as Tool[] let tools = nodeData.inputs?.tools as Tool[]
tools = flatten(tools) tools = flatten(tools)
@ -68,10 +77,25 @@ class MRKLAgentChat_Agents implements INode {
const callbacks = await additionalCallbacks(nodeData, options) const callbacks = await additionalCallbacks(nodeData, options)
const result = await executor.invoke({ const prevChatHistory = options.chatHistory
input, const chatHistory = ((await memory.getChatMessages(this.sessionId, false, prevChatHistory)) as IMessage[]) ?? []
callbacks const chatHistoryString = chatHistory.map((hist) => hist.message).join('\\n')
})
const result = await executor.invoke({ input, chat_history: chatHistoryString }, { callbacks })
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: result?.output,
type: 'apiMessage'
}
],
this.sessionId
)
return result?.output return result?.output
} }

View File

@ -1,5 +1,5 @@
import { flatten } from 'lodash' import { flatten } from 'lodash'
import { AgentExecutor, createReactAgent } from 'langchain/agents' import { AgentExecutor } from 'langchain/agents'
import { pull } from 'langchain/hub' import { pull } from 'langchain/hub'
import { Tool } from '@langchain/core/tools' import { Tool } from '@langchain/core/tools'
import type { PromptTemplate } from '@langchain/core/prompts' import type { PromptTemplate } from '@langchain/core/prompts'
@ -7,6 +7,7 @@ import { BaseLanguageModel } from 'langchain/base_language'
import { additionalCallbacks } from '../../../src/handler' import { additionalCallbacks } from '../../../src/handler'
import { getBaseClasses } from '../../../src/utils' import { getBaseClasses } from '../../../src/utils'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { createReactAgent } from '../../../src/agents'
class MRKLAgentLLM_Agents implements INode { class MRKLAgentLLM_Agents implements INode {
label: string label: string
@ -68,10 +69,7 @@ class MRKLAgentLLM_Agents implements INode {
const callbacks = await additionalCallbacks(nodeData, options) const callbacks = await additionalCallbacks(nodeData, options)
const result = await executor.invoke({ const result = await executor.invoke({ input }, { callbacks })
input,
callbacks
})
return result?.output return result?.output
} }

View File

@ -3,12 +3,23 @@ import { ChainValues } from '@langchain/core/utils/types'
import { AgentStep, AgentAction } from '@langchain/core/agents' import { AgentStep, AgentAction } from '@langchain/core/agents'
import { BaseMessage, FunctionMessage, AIMessage } from '@langchain/core/messages' import { BaseMessage, FunctionMessage, AIMessage } from '@langchain/core/messages'
import { OutputParserException } from '@langchain/core/output_parsers' import { OutputParserException } from '@langchain/core/output_parsers'
import { BaseLanguageModel } from '@langchain/core/language_models/base'
import { CallbackManager, CallbackManagerForChainRun, Callbacks } from '@langchain/core/callbacks/manager' import { CallbackManager, CallbackManagerForChainRun, Callbacks } from '@langchain/core/callbacks/manager'
import { ToolInputParsingException, Tool } from '@langchain/core/tools' import { ToolInputParsingException, Tool, StructuredToolInterface } from '@langchain/core/tools'
import { Runnable } from '@langchain/core/runnables' import { Runnable, RunnableSequence, RunnablePassthrough } from '@langchain/core/runnables'
import { Serializable } from '@langchain/core/load/serializable' import { Serializable } from '@langchain/core/load/serializable'
import { renderTemplate } from '@langchain/core/prompts'
import { BaseChain, SerializedLLMChain } from 'langchain/chains' import { BaseChain, SerializedLLMChain } from 'langchain/chains'
import { AgentExecutorInput, BaseSingleActionAgent, BaseMultiActionAgent, RunnableAgent, StoppingMethod } from 'langchain/agents' import {
CreateReactAgentParams,
AgentExecutorInput,
AgentActionOutputParser,
BaseSingleActionAgent,
BaseMultiActionAgent,
RunnableAgent,
StoppingMethod
} from 'langchain/agents'
import { formatLogToString } from 'langchain/agents/format_scratchpad/log'
export const SOURCE_DOCUMENTS_PREFIX = '\n\n----FLOWISE_SOURCE_DOCUMENTS----\n\n' export const SOURCE_DOCUMENTS_PREFIX = '\n\n----FLOWISE_SOURCE_DOCUMENTS----\n\n'
type AgentFinish = { type AgentFinish = {
@ -647,3 +658,110 @@ export const formatAgentSteps = (steps: AgentStep[]): BaseMessage[] =>
return [new AIMessage(action.log)] return [new AIMessage(action.log)]
} }
}) })
const renderTextDescription = (tools: StructuredToolInterface[]): string => {
return tools.map((tool) => `${tool.name}: ${tool.description}`).join('\n')
}
export const createReactAgent = async ({ llm, tools, prompt }: CreateReactAgentParams) => {
const missingVariables = ['tools', 'tool_names', 'agent_scratchpad'].filter((v) => !prompt.inputVariables.includes(v))
if (missingVariables.length > 0) {
throw new Error(`Provided prompt is missing required input variables: ${JSON.stringify(missingVariables)}`)
}
const toolNames = tools.map((tool) => tool.name)
const partialedPrompt = await prompt.partial({
tools: renderTextDescription(tools),
tool_names: toolNames.join(', ')
})
// TODO: Add .bind to core runnable interface.
const llmWithStop = (llm as BaseLanguageModel).bind({
stop: ['\nObservation:']
})
const agent = RunnableSequence.from([
RunnablePassthrough.assign({
//@ts-ignore
agent_scratchpad: (input: { steps: AgentStep[] }) => formatLogToString(input.steps)
}),
partialedPrompt,
llmWithStop,
new ReActSingleInputOutputParser({
toolNames
})
])
return agent
}
class ReActSingleInputOutputParser extends AgentActionOutputParser {
lc_namespace = ['langchain', 'agents', 'react']
private toolNames: string[]
private FINAL_ANSWER_ACTION = 'Final Answer:'
private FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = 'Parsing LLM output produced both a final answer and a parse-able action:'
private FORMAT_INSTRUCTIONS = `Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question`
constructor(fields: { toolNames: string[] }) {
super(...arguments)
this.toolNames = fields.toolNames
}
/**
* Parses the given text into an AgentAction or AgentFinish object. If an
* output fixing parser is defined, uses it to parse the text.
* @param text Text to parse.
* @returns Promise that resolves to an AgentAction or AgentFinish object.
*/
async parse(text: string): Promise<AgentAction | AgentFinish> {
const includesAnswer = text.includes(this.FINAL_ANSWER_ACTION)
const regex = /Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)/
const actionMatch = text.match(regex)
if (actionMatch) {
if (includesAnswer) {
throw new Error(`${this.FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: ${text}`)
}
const action = actionMatch[1]
const actionInput = actionMatch[2]
const toolInput = actionInput.trim().replace(/"/g, '')
return {
tool: action,
toolInput,
log: text
}
}
if (includesAnswer) {
const finalAnswerText = text.split(this.FINAL_ANSWER_ACTION)[1].trim()
return {
returnValues: {
output: finalAnswerText
},
log: text
}
}
// Instead of throwing Error, we return a AgentFinish object
return { returnValues: { output: text }, log: text }
}
/**
* Returns the format instructions as a string. If the 'raw' option is
* true, returns the raw FORMAT_INSTRUCTIONS.
* @param options Options for getting the format instructions.
* @returns Format instructions as a string.
*/
getFormatInstructions(): string {
return renderTemplate(this.FORMAT_INSTRUCTIONS, 'f-string', {
tool_names: this.toolNames.join(', ')
})
}
}

View File

@ -5,11 +5,11 @@
"nodes": [ "nodes": [
{ {
"width": 300, "width": 300,
"height": 143, "height": 142,
"id": "calculator_1", "id": "calculator_1",
"position": { "position": {
"x": 664.1366474718458, "x": 466.86432329033937,
"y": 123.16419000640141 "y": 230.0825123205457
}, },
"type": "customNode", "type": "customNode",
"data": { "data": {
@ -36,66 +36,171 @@
"selected": false "selected": false
}, },
"positionAbsolute": { "positionAbsolute": {
"x": 664.1366474718458, "x": 466.86432329033937,
"y": 123.16419000640141 "y": 230.0825123205457
}, },
"selected": false, "selected": false,
"dragging": false "dragging": false
}, },
{ {
"width": 300, "id": "mrklAgentChat_0",
"height": 277,
"id": "serper_0",
"position": { "position": {
"x": 330.964079024626, "x": 905.8535326018256,
"y": 109.83185250619351 "y": 388.58312223652564
}, },
"type": "customNode", "type": "customNode",
"data": { "data": {
"id": "serper_0", "id": "mrklAgentChat_0",
"label": "Serper", "label": "ReAct Agent for Chat Models",
"version": 1, "version": 3,
"name": "serper", "name": "mrklAgentChat",
"type": "Serper", "type": "AgentExecutor",
"baseClasses": ["Serper", "Tool", "StructuredTool"], "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"],
"category": "Tools", "category": "Agents",
"description": "Wrapper around Serper.dev - Google Search API", "description": "Agent that uses the ReAct logic to decide what action to take, optimized to be used with Chat Models",
"inputParams": [ "inputParams": [],
"inputAnchors": [
{ {
"label": "Connect Credential", "label": "Allowed Tools",
"name": "credential", "name": "tools",
"type": "credential", "type": "Tool",
"credentialNames": ["serperApi"], "list": true,
"id": "serper_0-input-credential-credential" "id": "mrklAgentChat_0-input-tools-Tool"
},
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel",
"id": "mrklAgentChat_0-input-model-BaseChatModel"
},
{
"label": "Memory",
"name": "memory",
"type": "BaseChatMemory",
"id": "mrklAgentChat_0-input-memory-BaseChatMemory"
} }
], ],
"inputAnchors": [], "inputs": {
"inputs": {}, "tools": ["{{calculator_1.data.instance}}", "{{serper_0.data.instance}}"],
"model": "{{chatOpenAI_0.data.instance}}",
"memory": "{{RedisBackedChatMemory_0.data.instance}}"
},
"outputAnchors": [ "outputAnchors": [
{ {
"id": "serper_0-output-serper-Serper|Tool|StructuredTool", "id": "mrklAgentChat_0-output-mrklAgentChat-AgentExecutor|BaseChain|Runnable",
"name": "serper", "name": "mrklAgentChat",
"label": "Serper", "label": "AgentExecutor",
"type": "Serper | Tool | StructuredTool" "description": "Agent that uses the ReAct logic to decide what action to take, optimized to be used with Chat Models",
"type": "AgentExecutor | BaseChain | Runnable"
} }
], ],
"outputs": {}, "outputs": {},
"selected": false "selected": false
}, },
"width": 300,
"height": 330,
"selected": false, "selected": false,
"positionAbsolute": { "positionAbsolute": {
"x": 330.964079024626, "x": 905.8535326018256,
"y": 109.83185250619351 "y": 388.58312223652564
}, },
"dragging": false "dragging": false
}, },
{ {
"id": "RedisBackedChatMemory_0",
"position": {
"x": 473.108799702029,
"y": 401.8098683245926
},
"type": "customNode",
"data": {
"id": "RedisBackedChatMemory_0",
"label": "Redis-Backed Chat Memory",
"version": 2,
"name": "RedisBackedChatMemory",
"type": "RedisBackedChatMemory",
"baseClasses": ["RedisBackedChatMemory", "BaseChatMemory", "BaseMemory"],
"category": "Memory",
"description": "Summarizes the conversation and stores the memory in Redis server",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"optional": true,
"credentialNames": ["redisCacheApi", "redisCacheUrlApi"],
"id": "RedisBackedChatMemory_0-input-credential-credential"
},
{
"label": "Session Id",
"name": "sessionId",
"type": "string",
"description": "If not specified, a random id will be used. Learn <a target=\"_blank\" href=\"https://docs.flowiseai.com/memory/long-term-memory#ui-and-embedded-chat\">more</a>",
"default": "",
"additionalParams": true,
"optional": true,
"id": "RedisBackedChatMemory_0-input-sessionId-string"
},
{
"label": "Session Timeouts",
"name": "sessionTTL",
"type": "number",
"description": "Omit this parameter to make sessions never expire",
"additionalParams": true,
"optional": true,
"id": "RedisBackedChatMemory_0-input-sessionTTL-number"
},
{
"label": "Memory Key",
"name": "memoryKey",
"type": "string",
"default": "chat_history",
"additionalParams": true,
"id": "RedisBackedChatMemory_0-input-memoryKey-string"
},
{
"label": "Window Size",
"name": "windowSize",
"type": "number",
"description": "Window of size k to surface the last k back-and-forth to use as memory.",
"additionalParams": true,
"optional": true,
"id": "RedisBackedChatMemory_0-input-windowSize-number"
}
],
"inputAnchors": [],
"inputs": {
"sessionId": "",
"sessionTTL": "",
"memoryKey": "chat_history",
"windowSize": ""
},
"outputAnchors": [
{
"id": "RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory",
"name": "RedisBackedChatMemory",
"label": "RedisBackedChatMemory",
"description": "Summarizes the conversation and stores the memory in Redis server",
"type": "RedisBackedChatMemory | BaseChatMemory | BaseMemory"
}
],
"outputs": {},
"selected": false
},
"width": 300, "width": 300,
"height": 574, "height": 328,
"selected": false,
"positionAbsolute": {
"x": 473.108799702029,
"y": 401.8098683245926
},
"dragging": false
},
{
"id": "chatOpenAI_0", "id": "chatOpenAI_0",
"position": { "position": {
"x": -27.71074046118335, "x": 81.2222202723384,
"y": 243.62715178281059 "y": 59.395597724017364
}, },
"type": "customNode", "type": "customNode",
"data": { "data": {
@ -282,73 +387,69 @@
"id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", "id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
"name": "chatOpenAI", "name": "chatOpenAI",
"label": "ChatOpenAI", "label": "ChatOpenAI",
"description": "Wrapper around OpenAI large language models that use the Chat endpoint",
"type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable" "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable"
} }
], ],
"outputs": {}, "outputs": {},
"selected": false "selected": false
}, },
"width": 300,
"height": 573,
"selected": false, "selected": false,
"positionAbsolute": { "positionAbsolute": {
"x": -27.71074046118335, "x": 81.2222202723384,
"y": 243.62715178281059 "y": 59.395597724017364
}, },
"dragging": false "dragging": false
}, },
{ {
"width": 300, "id": "serper_0",
"height": 280,
"id": "mrklAgentChat_0",
"position": { "position": {
"x": 1090.2058867451212, "x": 466.4499611299051,
"y": 423.2174695788541 "y": -67.74721119468873
}, },
"type": "customNode", "type": "customNode",
"data": { "data": {
"id": "mrklAgentChat_0", "id": "serper_0",
"label": "ReAct Agent for Chat Models", "label": "Serper",
"version": 1, "version": 1,
"name": "mrklAgentChat", "name": "serper",
"type": "AgentExecutor", "type": "Serper",
"baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], "baseClasses": ["Serper", "Tool", "StructuredTool", "Runnable"],
"category": "Agents", "category": "Tools",
"description": "Agent that uses the ReAct logic to decide what action to take, optimized to be used with Chat Models", "description": "Wrapper around Serper.dev - Google Search API",
"inputParams": [], "inputParams": [
"inputAnchors": [
{ {
"label": "Allowed Tools", "label": "Connect Credential",
"name": "tools", "name": "credential",
"type": "Tool", "type": "credential",
"list": true, "credentialNames": ["serperApi"],
"id": "mrklAgentChat_0-input-tools-Tool" "id": "serper_0-input-credential-credential"
},
{
"label": "Language Model",
"name": "model",
"type": "BaseLanguageModel",
"id": "mrklAgentChat_0-input-model-BaseLanguageModel"
} }
], ],
"inputs": { "inputAnchors": [],
"tools": ["{{calculator_1.data.instance}}", "{{serper_0.data.instance}}"], "inputs": {},
"model": "{{chatOpenAI_0.data.instance}}"
},
"outputAnchors": [ "outputAnchors": [
{ {
"id": "mrklAgentChat_0-output-mrklAgentChat-AgentExecutor|BaseChain|Runnable", "id": "serper_0-output-serper-Serper|Tool|StructuredTool|Runnable",
"name": "mrklAgentChat", "name": "serper",
"label": "AgentExecutor", "label": "Serper",
"type": "AgentExecutor | BaseChain | Runnable" "description": "Wrapper around Serper.dev - Google Search API",
"type": "Serper | Tool | StructuredTool | Runnable"
} }
], ],
"outputs": {}, "outputs": {},
"selected": false "selected": false
}, },
"width": 300,
"height": 276,
"selected": false,
"positionAbsolute": { "positionAbsolute": {
"x": 1090.2058867451212, "x": 466.4499611299051,
"y": 423.2174695788541 "y": -67.74721119468873
}, },
"selected": false "dragging": false
} }
], ],
"edges": [ "edges": [
@ -358,32 +459,31 @@
"target": "mrklAgentChat_0", "target": "mrklAgentChat_0",
"targetHandle": "mrklAgentChat_0-input-tools-Tool", "targetHandle": "mrklAgentChat_0-input-tools-Tool",
"type": "buttonedge", "type": "buttonedge",
"id": "calculator_1-calculator_1-output-calculator-Calculator|Tool|StructuredTool|BaseLangChain-mrklAgentChat_0-mrklAgentChat_0-input-tools-Tool", "id": "calculator_1-calculator_1-output-calculator-Calculator|Tool|StructuredTool|BaseLangChain-mrklAgentChat_0-mrklAgentChat_0-input-tools-Tool"
"data": {
"label": ""
}
}, },
{ {
"source": "serper_0", "source": "RedisBackedChatMemory_0",
"sourceHandle": "serper_0-output-serper-Serper|Tool|StructuredTool", "sourceHandle": "RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory",
"target": "mrklAgentChat_0", "target": "mrklAgentChat_0",
"targetHandle": "mrklAgentChat_0-input-tools-Tool", "targetHandle": "mrklAgentChat_0-input-memory-BaseChatMemory",
"type": "buttonedge", "type": "buttonedge",
"id": "serper_0-serper_0-output-serper-Serper|Tool|StructuredTool-mrklAgentChat_0-mrklAgentChat_0-input-tools-Tool", "id": "RedisBackedChatMemory_0-RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory-mrklAgentChat_0-mrklAgentChat_0-input-memory-BaseChatMemory"
"data": {
"label": ""
}
}, },
{ {
"source": "chatOpenAI_0", "source": "chatOpenAI_0",
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
"target": "mrklAgentChat_0", "target": "mrklAgentChat_0",
"targetHandle": "mrklAgentChat_0-input-model-BaseLanguageModel", "targetHandle": "mrklAgentChat_0-input-model-BaseChatModel",
"type": "buttonedge", "type": "buttonedge",
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-mrklAgentChat_0-mrklAgentChat_0-input-model-BaseLanguageModel", "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-mrklAgentChat_0-mrklAgentChat_0-input-model-BaseChatModel"
"data": { },
"label": "" {
} "source": "serper_0",
"sourceHandle": "serper_0-output-serper-Serper|Tool|StructuredTool|Runnable",
"target": "mrklAgentChat_0",
"targetHandle": "mrklAgentChat_0-input-tools-Tool",
"type": "buttonedge",
"id": "serper_0-serper_0-output-serper-Serper|Tool|StructuredTool|Runnable-mrklAgentChat_0-mrklAgentChat_0-input-tools-Tool"
} }
] ]
} }