diff --git a/packages/server/marketplaces/agentflows/Multi Agents.json b/packages/server/marketplaces/agentflows/Multi Agents.json new file mode 100644 index 00000000..8b930509 --- /dev/null +++ b/packages/server/marketplaces/agentflows/Multi Agents.json @@ -0,0 +1,1717 @@ +{ + "description": "Multi agents with supervisor and agents, constructed using Sequential Agents nodes", + "framework": ["Langchain"], + "usecases": ["Reflective Agent"], + "nodes": [ + { + "id": "seqStart_0", + "position": { + "x": 41.85333333333335, + "y": 89.63333333333333 + }, + "type": "customNode", + "data": { + "id": "seqStart_0", + "label": "Start", + "version": 1, + "name": "seqStart", + "type": "Start", + "baseClasses": ["Start"], + "category": "Sequential Agents", + "description": "Starting point of the conversation", + "inputParams": [], + "inputAnchors": [ + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel", + "description": "Only compatible with models that are capable of function calling: ChatOpenAI, ChatMistral, ChatAnthropic, ChatGoogleGenerativeAI, ChatVertexAI, GroqChat", + "id": "seqStart_0-input-model-BaseChatModel" + }, + { + "label": "Agent Memory", + "name": "agentMemory", + "type": "BaseCheckpointSaver", + "description": "Save the state of the agent", + "optional": true, + "id": "seqStart_0-input-agentMemory-BaseCheckpointSaver" + }, + { + "label": "State", + "name": "state", + "type": "State", + "description": "State is an object that is updated by nodes in the graph, passing from one node to another. Agent Memory must be connected when using State. By default, state contains \"messages\" that got updated with each message sent and received.", + "optional": true, + "id": "seqStart_0-input-state-State" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "seqStart_0-input-inputModeration-Moderation" + } + ], + "inputs": { + "model": "{{chatOpenAI_0.data.instance}}", + "agentMemory": "{{agentMemory_0.data.instance}}", + "state": "{{seqState_0.data.instance}}", + "inputModeration": "" + }, + "outputAnchors": [ + { + "id": "seqStart_0-output-seqStart-Start", + "name": "seqStart", + "label": "Start", + "description": "Starting point of the conversation", + "type": "Start" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 382, + "selected": false, + "positionAbsolute": { + "x": 41.85333333333335, + "y": 89.63333333333333 + }, + "dragging": false + }, + { + "id": "seqLLMNode_0", + "position": { + "x": 410.6133428124564, + "y": 60.16965318723166 + }, + "type": "customNode", + "data": { + "id": "seqLLMNode_0", + "label": "LLM Node", + "version": 1, + "name": "seqLLMNode", + "type": "LLMNode", + "baseClasses": ["LLMNode"], + "category": "Sequential Agents", + "description": "Run Chat Model and return the output", + "inputParams": [ + { + "label": "Name", + "name": "llmNodeName", + "type": "string", + "placeholder": "LLM", + "id": "seqLLMNode_0-input-llmNodeName-string" + }, + { + "label": "System Prompt", + "name": "systemMessagePrompt", + "type": "string", + "rows": 4, + "optional": true, + "additionalParams": true, + "id": "seqLLMNode_0-input-systemMessagePrompt-string" + }, + { + "label": "Human Prompt", + "name": "humanMessagePrompt", + "type": "string", + "description": "This prompt will be added at the end of the messages as human message", + "rows": 4, + "optional": true, + "additionalParams": true, + "id": "seqLLMNode_0-input-humanMessagePrompt-string" + }, + { + "label": "Format Prompt Values", + "name": "promptValues", + "description": "Assign values to the prompt variables. You can also use $flow.state. to get the state value", + "type": "json", + "optional": true, + "acceptVariable": true, + "list": true, + "additionalParams": true, + "id": "seqLLMNode_0-input-promptValues-json" + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "type": "datagrid", + "description": "Instruct the LLM to give output in a JSON structured schema", + "datagrid": [ + { + "field": "key", + "headerName": "Key", + "editable": true + }, + { + "field": "type", + "headerName": "Type", + "type": "singleSelect", + "valueOptions": ["String", "String Array", "Number", "Boolean", "Enum"], + "editable": true + }, + { + "field": "enumValues", + "headerName": "Enum Values", + "editable": true + }, + { + "field": "description", + "headerName": "Description", + "flex": 1, + "editable": true + } + ], + "optional": true, + "additionalParams": true, + "id": "seqLLMNode_0-input-llmStructuredOutput-datagrid" + }, + { + "label": "Update State", + "name": "updateStateMemory", + "type": "tabs", + "tabIdentifier": "selectedUpdateStateMemoryTab", + "default": "updateStateMemoryUI", + "additionalParams": true, + "tabs": [ + { + "label": "Update State (Table)", + "name": "updateStateMemoryUI", + "type": "datagrid", + "hint": { + "label": "How to use", + "value": "\n1. Key and value pair to be updated. For example: if you have the following State:\n | Key | Operation | Default Value |\n |-----------|---------------|-------------------|\n | user | Replace | |\n\n You can update the \"user\" value with the following:\n | Key | Value |\n |-----------|-----------|\n | user | john doe |\n\n2. If you want to use the agent's output as the value to update state, it is available as available as `$flow.output` with the following structure:\n ```json\n {\n \"content\": 'Hello! How can I assist you today?',\n \"name\": \"\",\n \"additional_kwargs\": {},\n \"response_metadata\": {},\n \"tool_calls\": [],\n \"invalid_tool_calls\": [],\n \"usage_metadata\": {}\n }\n ```\n\n For example, if the output `content` is the value you want to update the state with, you can do the following:\n | Key | Value |\n |-----------|---------------------------|\n | user | `$flow.output.content` |\n\n3. You can get default flow config, including the current \"state\":\n - `$flow.sessionId`\n - `$flow.chatId`\n - `$flow.chatflowId`\n - `$flow.input`\n - `$flow.state`\n\n4. You can get custom variables: `$vars.`\n\n" + }, + "description": "This is only applicable when you have a custom State at the START node. After agent execution, you might want to update the State values", + "datagrid": [ + { + "field": "key", + "headerName": "Key", + "type": "asyncSingleSelect", + "loadMethod": "loadStateKeys", + "flex": 0.5, + "editable": true + }, + { + "field": "value", + "headerName": "Value", + "type": "freeSolo", + "valueOptions": [ + { + "label": "LLM Node Output (string)", + "value": "$flow.output.content" + }, + { + "label": "LLM JSON Output Key (string)", + "value": "$flow.output." + }, + { + "label": "Global variable (string)", + "value": "$vars." + }, + { + "label": "Input Question (string)", + "value": "$flow.input" + }, + { + "label": "Session Id (string)", + "value": "$flow.sessionId" + }, + { + "label": "Chat Id (string)", + "value": "$flow.chatId" + }, + { + "label": "Chatflow Id (string)", + "value": "$flow.chatflowId" + } + ], + "editable": true, + "flex": 1 + } + ], + "optional": true, + "additionalParams": true + }, + { + "label": "Update State (Code)", + "name": "updateStateMemoryCode", + "type": "code", + "hint": { + "label": "How to use", + "value": "\n1. Return the key value JSON object. For example: if you have the following State:\n ```json\n {\n \"user\": null\n }\n ```\n\n You can update the \"user\" value by returning the following:\n ```js\n return {\n \"user\": \"john doe\"\n }\n ```\n\n2. If you want to use the LLM Node's output as the value to update state, it is available as `$flow.output` with the following structure:\n ```json\n {\n \"content\": 'Hello! How can I assist you today?',\n \"name\": \"\",\n \"additional_kwargs\": {},\n \"response_metadata\": {},\n \"tool_calls\": [],\n \"invalid_tool_calls\": [],\n \"usage_metadata\": {}\n }\n ```\n\n For example, if the output `content` is the value you want to update the state with, you can return the following:\n ```js\n return {\n \"user\": $flow.output.content\n }\n ```\n\n3. You can also get default flow config, including the current \"state\":\n - `$flow.sessionId`\n - `$flow.chatId`\n - `$flow.chatflowId`\n - `$flow.input`\n - `$flow.state`\n\n4. You can get custom variables: `$vars.`\n\n" + }, + "description": "This is only applicable when you have a custom State at the START node. After agent execution, you might want to update the State values. Must return an object representing the state", + "hideCodeExecute": true, + "codeExample": "const result = $flow.output;\n\n/* Suppose we have a custom State schema like this:\n* {\n aggregate: {\n value: (x, y) => x.concat(y),\n default: () => []\n }\n }\n*/\n\nreturn {\n aggregate: [result.content]\n};", + "optional": true, + "additionalParams": true + } + ], + "id": "seqLLMNode_0-input-updateStateMemory-tabs" + } + ], + "inputAnchors": [ + { + "label": "Start | Agent | LLM | Tool Node", + "name": "sequentialNode", + "type": "Start | Agent | LLMNode | ToolNode", + "list": true, + "id": "seqLLMNode_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel", + "optional": true, + "description": "Overwrite model to be used for this node", + "id": "seqLLMNode_0-input-model-BaseChatModel" + } + ], + "inputs": { + "llmNodeName": "supervisor", + "systemMessagePrompt": "You are a supervisor tasked with managing a conversation between the following workers:\n- agent1: software engineer\n- agent2: code reviewer\n\nGiven the following user request, respond with the worker to act next.\nEach worker will perform a task and respond with their results and status.\nWhen finished, respond with FINISH.\nSelect strategically to minimize the number of steps taken.", + "humanMessagePrompt": "Given the conversation above, who should act next? Or should we FINISH? Select one of: agent1, agent2", + "sequentialNode": ["{{seqStart_0.data.instance}}"], + "model": "", + "promptValues": "", + "llmStructuredOutput": "[{\"key\":\"next\",\"type\":\"Enum\",\"enumValues\":\"FINISH, agent1, agent2\",\"description\":\"\",\"actions\":\"\",\"id\":0},{\"key\":\"instructions\",\"type\":\"String\",\"enumValues\":\"The specific instructions of the sub-task the next role should accomplish.\",\"description\":\"\",\"actions\":\"\",\"id\":1},{\"key\":\"reasoning\",\"type\":\"String\",\"enumValues\":\"\",\"description\":\"\",\"actions\":\"\",\"id\":2}]", + "updateStateMemory": "updateStateMemoryUI", + "updateStateMemoryUI": "[{\"key\":\"next\",\"value\":\"$flow.output.next\",\"actions\":\"\",\"id\":1}]" + }, + "outputAnchors": [ + { + "id": "seqLLMNode_0-output-seqLLMNode-LLMNode", + "name": "seqLLMNode", + "label": "LLMNode", + "description": "Run Chat Model and return the output", + "type": "LLMNode" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 431, + "selected": false, + "positionAbsolute": { + "x": 410.6133428124564, + "y": 60.16965318723166 + }, + "dragging": false + }, + { + "id": "seqAgent_0", + "position": { + "x": 1572.857390926285, + "y": 37.72211705823145 + }, + "type": "customNode", + "data": { + "id": "seqAgent_0", + "label": "Agent", + "version": 1, + "name": "seqAgent", + "type": "Agent", + "baseClasses": ["Agent"], + "category": "Sequential Agents", + "description": "Agent that can execute tools", + "inputParams": [ + { + "label": "Agent Name", + "name": "agentName", + "type": "string", + "placeholder": "Agent", + "id": "seqAgent_0-input-agentName-string" + }, + { + "label": "System Prompt", + "name": "systemMessagePrompt", + "type": "string", + "rows": 4, + "optional": true, + "default": "You are a research assistant who can search for up-to-date info using search engine.", + "id": "seqAgent_0-input-systemMessagePrompt-string" + }, + { + "label": "Human Prompt", + "name": "humanMessagePrompt", + "type": "string", + "description": "This prompt will be added at the end of the messages as human message", + "rows": 4, + "optional": true, + "additionalParams": true, + "id": "seqAgent_0-input-humanMessagePrompt-string" + }, + { + "label": "Format Prompt Values", + "name": "promptValues", + "description": "Assign values to the prompt variables. You can also use $flow.state. to get the state value", + "type": "json", + "optional": true, + "acceptVariable": true, + "list": true, + "id": "seqAgent_0-input-promptValues-json" + }, + { + "label": "Update State", + "name": "updateStateMemory", + "type": "tabs", + "tabIdentifier": "selectedUpdateStateMemoryTab", + "additionalParams": true, + "default": "updateStateMemoryUI", + "tabs": [ + { + "label": "Update State (Table)", + "name": "updateStateMemoryUI", + "type": "datagrid", + "hint": { + "label": "How to use", + "value": "\n1. Key and value pair to be updated. For example: if you have the following State:\n | Key | Operation | Default Value |\n |-----------|---------------|-------------------|\n | user | Replace | |\n\n You can update the \"user\" value with the following:\n | Key | Value |\n |-----------|-----------|\n | user | john doe |\n\n2. If you want to use the agent's output as the value to update state, it is available as available as `$flow.output` with the following structure:\n ```json\n {\n \"output\": \"Hello! How can I assist you today?\",\n \"usedTools\": [\n {\n \"tool\": \"tool-name\",\n \"toolInput\": \"{foo: var}\",\n \"toolOutput\": \"This is the tool's output\"\n }\n ],\n \"sourceDocuments\": [\n {\n \"pageContent\": \"This is the page content\",\n \"metadata\": \"{foo: var}\",\n }\n ],\n }\n ```\n\n For example, if the `toolOutput` is the value you want to update the state with, you can do the following:\n | Key | Value |\n |-----------|-------------------------------------------|\n | user | `$flow.output.usedTools[0].toolOutput` |\n\n3. You can get default flow config, including the current \"state\":\n - `$flow.sessionId`\n - `$flow.chatId`\n - `$flow.chatflowId`\n - `$flow.input`\n - `$flow.state`\n\n4. You can get custom variables: `$vars.`\n\n" + }, + "description": "This is only applicable when you have a custom State at the START node. After agent execution, you might want to update the State values", + "datagrid": [ + { + "field": "key", + "headerName": "Key", + "type": "asyncSingleSelect", + "loadMethod": "loadStateKeys", + "flex": 0.5, + "editable": true + }, + { + "field": "value", + "headerName": "Value", + "type": "freeSolo", + "valueOptions": [ + { + "label": "Agent Output (string)", + "value": "$flow.output.content" + }, + { + "label": "Used Tools (array)", + "value": "$flow.output.usedTools" + }, + { + "label": "First Tool Output (string)", + "value": "$flow.output.usedTools[0].toolOutput" + }, + { + "label": "Source Documents (array)", + "value": "$flow.output.sourceDocuments" + }, + { + "label": "Global variable (string)", + "value": "$vars." + }, + { + "label": "Input Question (string)", + "value": "$flow.input" + }, + { + "label": "Session Id (string)", + "value": "$flow.sessionId" + }, + { + "label": "Chat Id (string)", + "value": "$flow.chatId" + }, + { + "label": "Chatflow Id (string)", + "value": "$flow.chatflowId" + } + ], + "editable": true, + "flex": 1 + } + ], + "optional": true, + "additionalParams": true + }, + { + "label": "Update State (Code)", + "name": "updateStateMemoryCode", + "type": "code", + "hint": { + "label": "How to use", + "value": "\n1. Return the key value JSON object. For example: if you have the following State:\n ```json\n {\n \"user\": null\n }\n ```\n\n You can update the \"user\" value by returning the following:\n ```js\n return {\n \"user\": \"john doe\"\n }\n ```\n\n2. If you want to use the agent's output as the value to update state, it is available as `$flow.output` with the following structure:\n ```json\n {\n \"content\": \"Hello! How can I assist you today?\",\n \"usedTools\": [\n {\n \"tool\": \"tool-name\",\n \"toolInput\": \"{foo: var}\",\n \"toolOutput\": \"This is the tool's output\"\n }\n ],\n \"sourceDocuments\": [\n {\n \"pageContent\": \"This is the page content\",\n \"metadata\": \"{foo: var}\",\n }\n ],\n }\n ```\n\n For example, if the `toolOutput` is the value you want to update the state with, you can return the following:\n ```js\n return {\n \"user\": $flow.output.usedTools[0].toolOutput\n }\n ```\n\n3. You can also get default flow config, including the current \"state\":\n - `$flow.sessionId`\n - `$flow.chatId`\n - `$flow.chatflowId`\n - `$flow.input`\n - `$flow.state`\n\n4. You can get custom variables: `$vars.`\n\n" + }, + "description": "This is only applicable when you have a custom State at the START node. After agent execution, you might want to update the State values. Must return an object representing the state", + "hideCodeExecute": true, + "codeExample": "const result = $flow.output;\n\n/* Suppose we have a custom State schema like this:\n* {\n aggregate: {\n value: (x, y) => x.concat(y),\n default: () => []\n }\n }\n*/\n\nreturn {\n aggregate: [result.content]\n};", + "optional": true, + "additionalParams": true + } + ], + "id": "seqAgent_0-input-updateStateMemory-tabs" + }, + { + "label": "Max Iterations", + "name": "maxIterations", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "seqAgent_0-input-maxIterations-number" + } + ], + "inputAnchors": [ + { + "label": "Tools", + "name": "tools", + "type": "Tool", + "list": true, + "id": "seqAgent_0-input-tools-Tool" + }, + { + "label": "Start | Agent | LLM | Tool Node", + "name": "sequentialNode", + "type": "Start | Agent | LLMNode | ToolNode", + "list": true, + "id": "seqAgent_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel", + "optional": true, + "description": "Overwrite model to be used for this agent", + "id": "seqAgent_0-input-model-BaseChatModel" + } + ], + "inputs": { + "agentName": "agent2", + "systemMessagePrompt": "As a Quality Assurance Engineer at {company}, you are an integral part of our development team, ensuring that our software products are of the highest quality. Your meticulous attention to detail and expertise in testing methodologies are crucial in identifying defects and ensuring that our code meets the highest standards.\n\nYour goal is to ensure the delivery of high-quality software through thorough code review and testing.\n\nReview the codebase for the new feature designed and implemented by the Senior Software Engineer. Your expertise goes beyond mere code inspection; you are adept at ensuring that developments not only function as intended but also adhere to the team's coding standards, enhance maintainability, and seamlessly integrate with existing systems. \n\nWith a deep appreciation for collaborative development, you provide constructive feedback, guiding contributors towards best practices and fostering a culture of continuous improvement. Your meticulous approach to reviewing code, coupled with your ability to foresee potential issues and recommend proactive solutions, ensures the delivery of high-quality software that is robust, scalable, and aligned with the team's strategic goals.\n\nAlways pass back the review and feedback to Senior Software Engineer.", + "humanMessagePrompt": "", + "tools": "", + "sequentialNode": ["{{seqCondition_0.data.instance}}"], + "model": "", + "promptValues": "{\"company\":\"FlowiseAI Inc\"}", + "updateStateMemory": "updateStateMemoryUI", + "maxIterations": "" + }, + "outputAnchors": [ + { + "id": "seqAgent_0-output-seqAgent-Agent", + "name": "seqAgent", + "label": "Agent", + "description": "Agent that can execute tools", + "type": "Agent" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 762, + "selected": false, + "positionAbsolute": { + "x": 1572.857390926285, + "y": 37.72211705823145 + }, + "dragging": false + }, + { + "id": "seqAgent_1", + "position": { + "x": 1194.601416892626, + "y": -733.5332098804579 + }, + "type": "customNode", + "data": { + "id": "seqAgent_1", + "label": "Agent", + "version": 1, + "name": "seqAgent", + "type": "Agent", + "baseClasses": ["Agent"], + "category": "Sequential Agents", + "description": "Agent that can execute tools", + "inputParams": [ + { + "label": "Agent Name", + "name": "agentName", + "type": "string", + "placeholder": "Agent", + "id": "seqAgent_1-input-agentName-string" + }, + { + "label": "System Prompt", + "name": "systemMessagePrompt", + "type": "string", + "rows": 4, + "optional": true, + "default": "You are a research assistant who can search for up-to-date info using search engine.", + "id": "seqAgent_1-input-systemMessagePrompt-string" + }, + { + "label": "Human Prompt", + "name": "humanMessagePrompt", + "type": "string", + "description": "This prompt will be added at the end of the messages as human message", + "rows": 4, + "optional": true, + "additionalParams": true, + "id": "seqAgent_1-input-humanMessagePrompt-string" + }, + { + "label": "Format Prompt Values", + "name": "promptValues", + "description": "Assign values to the prompt variables. You can also use $flow.state. to get the state value", + "type": "json", + "optional": true, + "acceptVariable": true, + "list": true, + "id": "seqAgent_1-input-promptValues-json" + }, + { + "label": "Update State", + "name": "updateStateMemory", + "type": "tabs", + "tabIdentifier": "selectedUpdateStateMemoryTab", + "additionalParams": true, + "default": "updateStateMemoryUI", + "tabs": [ + { + "label": "Update State (Table)", + "name": "updateStateMemoryUI", + "type": "datagrid", + "hint": { + "label": "How to use", + "value": "\n1. Key and value pair to be updated. For example: if you have the following State:\n | Key | Operation | Default Value |\n |-----------|---------------|-------------------|\n | user | Replace | |\n\n You can update the \"user\" value with the following:\n | Key | Value |\n |-----------|-----------|\n | user | john doe |\n\n2. If you want to use the agent's output as the value to update state, it is available as available as `$flow.output` with the following structure:\n ```json\n {\n \"output\": \"Hello! How can I assist you today?\",\n \"usedTools\": [\n {\n \"tool\": \"tool-name\",\n \"toolInput\": \"{foo: var}\",\n \"toolOutput\": \"This is the tool's output\"\n }\n ],\n \"sourceDocuments\": [\n {\n \"pageContent\": \"This is the page content\",\n \"metadata\": \"{foo: var}\",\n }\n ],\n }\n ```\n\n For example, if the `toolOutput` is the value you want to update the state with, you can do the following:\n | Key | Value |\n |-----------|-------------------------------------------|\n | user | `$flow.output.usedTools[0].toolOutput` |\n\n3. You can get default flow config, including the current \"state\":\n - `$flow.sessionId`\n - `$flow.chatId`\n - `$flow.chatflowId`\n - `$flow.input`\n - `$flow.state`\n\n4. You can get custom variables: `$vars.`\n\n" + }, + "description": "This is only applicable when you have a custom State at the START node. After agent execution, you might want to update the State values", + "datagrid": [ + { + "field": "key", + "headerName": "Key", + "type": "asyncSingleSelect", + "loadMethod": "loadStateKeys", + "flex": 0.5, + "editable": true + }, + { + "field": "value", + "headerName": "Value", + "type": "freeSolo", + "valueOptions": [ + { + "label": "Agent Output (string)", + "value": "$flow.output.content" + }, + { + "label": "Used Tools (array)", + "value": "$flow.output.usedTools" + }, + { + "label": "First Tool Output (string)", + "value": "$flow.output.usedTools[0].toolOutput" + }, + { + "label": "Source Documents (array)", + "value": "$flow.output.sourceDocuments" + }, + { + "label": "Global variable (string)", + "value": "$vars." + }, + { + "label": "Input Question (string)", + "value": "$flow.input" + }, + { + "label": "Session Id (string)", + "value": "$flow.sessionId" + }, + { + "label": "Chat Id (string)", + "value": "$flow.chatId" + }, + { + "label": "Chatflow Id (string)", + "value": "$flow.chatflowId" + } + ], + "editable": true, + "flex": 1 + } + ], + "optional": true, + "additionalParams": true + }, + { + "label": "Update State (Code)", + "name": "updateStateMemoryCode", + "type": "code", + "hint": { + "label": "How to use", + "value": "\n1. Return the key value JSON object. For example: if you have the following State:\n ```json\n {\n \"user\": null\n }\n ```\n\n You can update the \"user\" value by returning the following:\n ```js\n return {\n \"user\": \"john doe\"\n }\n ```\n\n2. If you want to use the agent's output as the value to update state, it is available as `$flow.output` with the following structure:\n ```json\n {\n \"content\": \"Hello! How can I assist you today?\",\n \"usedTools\": [\n {\n \"tool\": \"tool-name\",\n \"toolInput\": \"{foo: var}\",\n \"toolOutput\": \"This is the tool's output\"\n }\n ],\n \"sourceDocuments\": [\n {\n \"pageContent\": \"This is the page content\",\n \"metadata\": \"{foo: var}\",\n }\n ],\n }\n ```\n\n For example, if the `toolOutput` is the value you want to update the state with, you can return the following:\n ```js\n return {\n \"user\": $flow.output.usedTools[0].toolOutput\n }\n ```\n\n3. You can also get default flow config, including the current \"state\":\n - `$flow.sessionId`\n - `$flow.chatId`\n - `$flow.chatflowId`\n - `$flow.input`\n - `$flow.state`\n\n4. You can get custom variables: `$vars.`\n\n" + }, + "description": "This is only applicable when you have a custom State at the START node. After agent execution, you might want to update the State values. Must return an object representing the state", + "hideCodeExecute": true, + "codeExample": "const result = $flow.output;\n\n/* Suppose we have a custom State schema like this:\n* {\n aggregate: {\n value: (x, y) => x.concat(y),\n default: () => []\n }\n }\n*/\n\nreturn {\n aggregate: [result.content]\n};", + "optional": true, + "additionalParams": true + } + ], + "id": "seqAgent_1-input-updateStateMemory-tabs" + }, + { + "label": "Max Iterations", + "name": "maxIterations", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "seqAgent_1-input-maxIterations-number" + } + ], + "inputAnchors": [ + { + "label": "Tools", + "name": "tools", + "type": "Tool", + "list": true, + "id": "seqAgent_1-input-tools-Tool" + }, + { + "label": "Start | Agent | LLM | Tool Node", + "name": "sequentialNode", + "type": "Start | Agent | LLMNode | ToolNode", + "list": true, + "id": "seqAgent_1-input-sequentialNode-Start | Agent | LLMNode | ToolNode" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel", + "optional": true, + "description": "Overwrite model to be used for this agent", + "id": "seqAgent_1-input-model-BaseChatModel" + } + ], + "inputs": { + "agentName": "agent1", + "systemMessagePrompt": "As a Senior Software Engineer at {company}, you are a pivotal part of our innovative development team. Your expertise and leadership drive the creation of robust, scalable software solutions that meet the needs of our diverse clientele. By applying best practices in software development, you ensure that our products are reliable, efficient, and maintainable.\n\nYour goal is to lead the development of high-quality software solutions.\n\nUtilize your deep technical knowledge and experience to architect, design, and implement software systems that address complex problems. Collaborate closely with other engineers, reviewers to ensure that the solutions you develop align with business objectives and user needs.\n\nDesign and implement new feature for the given task, ensuring it integrates seamlessly with existing systems and meets performance requirements. Use your understanding of {technology} to build this feature. Make sure to adhere to our coding standards and follow best practices.\n\nThe output should be a fully functional, well-documented feature that enhances our product's capabilities. Include detailed comments in the code. Pass the code to Quality Assurance Engineer for review if neccessary. Once ther review is good enough, produce a finalized version of the code.", + "humanMessagePrompt": "", + "tools": "", + "sequentialNode": ["{{seqCondition_0.data.instance}}"], + "model": "", + "promptValues": "{\"company\":\"FlowiseAI Inc\",\"technology\":\"React, Node\"}", + "updateStateMemory": "updateStateMemoryUI", + "maxIterations": "" + }, + "outputAnchors": [ + { + "id": "seqAgent_1-output-seqAgent-Agent", + "name": "seqAgent", + "label": "Agent", + "description": "Agent that can execute tools", + "type": "Agent" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 762, + "selected": false, + "positionAbsolute": { + "x": 1194.601416892626, + "y": -733.5332098804579 + }, + "dragging": false + }, + { + "id": "seqCondition_0", + "position": { + "x": 773.1346576683973, + "y": 25.960079647796476 + }, + "type": "customNode", + "data": { + "id": "seqCondition_0", + "label": "Condition", + "version": 1, + "name": "seqCondition", + "type": "Condition", + "baseClasses": ["Condition"], + "category": "Sequential Agents", + "description": "Conditional function to determine which route to take next", + "inputParams": [ + { + "label": "Condition Name", + "name": "conditionName", + "type": "string", + "optional": true, + "placeholder": "If X, then Y", + "id": "seqCondition_0-input-conditionName-string" + }, + { + "label": "Condition", + "name": "condition", + "type": "conditionFunction", + "tabIdentifier": "selectedConditionFunctionTab", + "tabs": [ + { + "label": "Condition (Table)", + "name": "conditionUI", + "type": "datagrid", + "description": "If a condition is met, the node connected to the respective output will be executed", + "optional": true, + "datagrid": [ + { + "field": "variable", + "headerName": "Variable", + "type": "freeSolo", + "editable": true, + "loadMethod": ["getPreviousMessages", "loadStateKeys"], + "valueOptions": [ + { + "label": "Total Messages (number)", + "value": "$flow.state.messages.length" + }, + { + "label": "First Message Content (string)", + "value": "$flow.state.messages[0].content" + }, + { + "label": "Last Message Content (string)", + "value": "$flow.state.messages[-1].content" + }, + { + "label": "Global variable (string)", + "value": "$vars." + } + ], + "flex": 0.5, + "minWidth": 200 + }, + { + "field": "operation", + "headerName": "Operation", + "type": "singleSelect", + "valueOptions": [ + "Contains", + "Not Contains", + "Start With", + "End With", + "Is", + "Is Not", + "Is Empty", + "Is Not Empty", + "Greater Than", + "Less Than", + "Equal To", + "Not Equal To", + "Greater Than or Equal To", + "Less Than or Equal To" + ], + "editable": true, + "flex": 0.4, + "minWidth": 150 + }, + { + "field": "value", + "headerName": "Value", + "flex": 1, + "editable": true + }, + { + "field": "output", + "headerName": "Output Name", + "editable": true, + "flex": 0.3, + "minWidth": 150 + } + ] + }, + { + "label": "Condition (Code)", + "name": "conditionFunction", + "type": "code", + "description": "Function to evaluate the condition", + "hint": { + "label": "How to use", + "value": "\n1. Must return a string value at the end of function. For example:\n ```js\n if (\"X\" === \"X\") {\n return \"Agent\"; // connect to next agent node\n } else {\n return \"End\"; // connect to end node\n }\n ```\n\n2. In most cases, you would probably get the last message to do some comparison. You can get all current messages from the state: `$flow.state.messages`:\n ```json\n [\n {\n \"content\": \"Hello! How can I assist you today?\",\n \"name\": \"\",\n \"additional_kwargs\": {},\n \"response_metadata\": {},\n \"tool_calls\": [],\n \"invalid_tool_calls\": [],\n \"usage_metadata\": {}\n }\n ]\n ```\n\n For example, to get the last message content:\n ```js\n const messages = $flow.state.messages;\n const lastMessage = messages[messages.length - 1];\n\n // Proceed to do something with the last message content\n ```\n\n3. You can get default flow config, including the current \"state\":\n - `$flow.sessionId`\n - `$flow.chatId`\n - `$flow.chatflowId`\n - `$flow.input`\n - `$flow.state`\n\n4. You can get custom variables: `$vars.`\n\n" + }, + "hideCodeExecute": true, + "codeExample": "const state = $flow.state;\n \nconst messages = state.messages;\n\nconst lastMessage = messages[messages.length - 1];\n\n/* Check if the last message has content */\nif (lastMessage.content) {\n return \"Agent\";\n}\n\nreturn \"End\";", + "optional": true + } + ], + "id": "seqCondition_0-input-condition-conditionFunction" + } + ], + "inputAnchors": [ + { + "label": "Start | Agent | LLM | Tool Node", + "name": "sequentialNode", + "type": "Start | Agent | LLMNode | ToolNode", + "list": true, + "id": "seqCondition_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode" + } + ], + "inputs": { + "conditionName": "", + "sequentialNode": ["{{seqLLMNode_0.data.instance}}"], + "condition": "", + "conditionUI": "[{\"variable\":\"$flow.state.next\",\"operation\":\"Is\",\"value\":\"agent1\",\"output\":\"Agent 1\",\"actions\":\"\",\"id\":0},{\"variable\":\"$flow.state.next\",\"operation\":\"Is\",\"value\":\"agent2\",\"output\":\"Agent 2\",\"actions\":\"\",\"id\":1}]", + "selectedConditionFunctionTab_seqCondition_0": "conditionUI" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "seqCondition_0-output-agent1-Agent|LLMNode|ToolNode", + "name": "agent1", + "label": "Agent 1", + "type": "Agent | LLMNode | ToolNode", + "isAnchor": true + }, + { + "id": "seqCondition_0-output-agent2-Agent|LLMNode|ToolNode", + "name": "agent2", + "label": "Agent 2", + "type": "Agent | LLMNode | ToolNode", + "isAnchor": true + }, + { + "id": "seqCondition_0-output-end-Agent|LLMNode|ToolNode", + "name": "end", + "label": "End", + "type": "Agent | LLMNode | ToolNode", + "isAnchor": true + } + ] + } + ], + "outputs": { + "output": "next" + }, + "selected": false + }, + "width": 300, + "height": 524, + "selected": false, + "positionAbsolute": { + "x": 773.1346576683973, + "y": 25.960079647796476 + }, + "dragging": false + }, + { + "id": "agentMemory_0", + "position": { + "x": -714.5803491336571, + "y": 70.77006261886419 + }, + "type": "customNode", + "data": { + "id": "agentMemory_0", + "label": "Agent Memory", + "version": 1, + "name": "agentMemory", + "type": "AgentMemory", + "baseClasses": ["AgentMemory", "BaseCheckpointSaver"], + "category": "Memory", + "description": "Memory for agentflow to remember the state of the conversation", + "inputParams": [ + { + "label": "Database", + "name": "databaseType", + "type": "options", + "options": [ + { + "label": "SQLite", + "name": "sqlite" + } + ], + "default": "sqlite", + "id": "agentMemory_0-input-databaseType-options" + }, + { + "label": "Database File Path", + "name": "databaseFilePath", + "type": "string", + "placeholder": "C:\\Users\\User\\.flowise\\database.sqlite", + "description": "If SQLite is selected, provide the path to the SQLite database file. Leave empty to use default application database", + "additionalParams": true, + "optional": true, + "id": "agentMemory_0-input-databaseFilePath-string" + }, + { + "label": "Additional Connection Configuration", + "name": "additionalConfig", + "type": "json", + "additionalParams": true, + "optional": true, + "id": "agentMemory_0-input-additionalConfig-json" + } + ], + "inputAnchors": [], + "inputs": { + "databaseType": "sqlite", + "databaseFilePath": "", + "additionalConfig": "" + }, + "outputAnchors": [ + { + "id": "agentMemory_0-output-agentMemory-AgentMemory|BaseCheckpointSaver", + "name": "agentMemory", + "label": "AgentMemory", + "description": "Memory for agentflow to remember the state of the conversation", + "type": "AgentMemory | BaseCheckpointSaver" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 327, + "selected": false, + "positionAbsolute": { + "x": -714.5803491336571, + "y": 70.77006261886419 + }, + "dragging": false + }, + { + "id": "chatOpenAI_0", + "position": { + "x": -348.48591585569204, + "y": -548.745050943517 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_0", + "label": "ChatOpenAI", + "version": 6, + "name": "chatOpenAI", + "type": "ChatOpenAI", + "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], + "category": "Chat Models", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "chatOpenAI_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "asyncOptions", + "loadMethod": "listModels", + "default": "gpt-3.5-turbo", + "id": "chatOpenAI_0-input-modelName-asyncOptions" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-topP-number" + }, + { + "label": "Frequency Penalty", + "name": "frequencyPenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-frequencyPenalty-number" + }, + { + "label": "Presence Penalty", + "name": "presencePenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-presencePenalty-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-basepath-string" + }, + { + "label": "BaseOptions", + "name": "baseOptions", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-baseOptions-json" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatOpenAI_0-input-allowImageUploads-boolean" + }, + { + "label": "Image Resolution", + "description": "This parameter controls the resolution in which the model views the image.", + "name": "imageResolution", + "type": "options", + "options": [ + { + "label": "Low", + "name": "low" + }, + { + "label": "High", + "name": "high" + }, + { + "label": "Auto", + "name": "auto" + } + ], + "default": "low", + "optional": false, + "additionalParams": true, + "id": "chatOpenAI_0-input-imageResolution-options" + } + ], + "inputAnchors": [ + { + "label": "Cache", + "name": "cache", + "type": "BaseCache", + "optional": true, + "id": "chatOpenAI_0-input-cache-BaseCache" + } + ], + "inputs": { + "cache": "", + "modelName": "gpt-4o", + "temperature": "0", + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "name": "chatOpenAI", + "label": "ChatOpenAI", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 669, + "selected": false, + "positionAbsolute": { + "x": -348.48591585569204, + "y": -548.745050943517 + }, + "dragging": false + }, + { + "id": "seqState_0", + "position": { + "x": -347.45783543370027, + "y": 191.39595057599934 + }, + "type": "customNode", + "data": { + "id": "seqState_0", + "label": "State", + "version": 1, + "name": "seqState", + "type": "State", + "baseClasses": ["State"], + "category": "Sequential Agents", + "description": "A centralized state object, updated by nodes in the graph, passing from one node to another", + "inputParams": [ + { + "label": "State", + "name": "stateMemory", + "type": "tabs", + "tabIdentifier": "selectedStateTab", + "additionalParams": true, + "default": "stateMemoryUI", + "tabs": [ + { + "label": "State (Table)", + "name": "stateMemoryUI", + "type": "datagrid", + "description": "Structure for state. By default, state contains \"messages\" that got updated with each message sent and received.", + "hint": { + "label": "How to use", + "value": "\nSpecify the Key, Operation Type, and Default Value for the state object. The Operation Type can be either \"Replace\" or \"Append\".\n\n**Replace**\n- Replace the existing value with the new value.\n- If the new value is null, the existing value will be retained.\n\n**Append**\n- Append the new value to the existing value.\n- Default value can be empty or an array. Ex: [\"a\", \"b\"]\n- Final value is an array.\n" + }, + "datagrid": [ + { + "field": "key", + "headerName": "Key", + "editable": true + }, + { + "field": "type", + "headerName": "Operation", + "type": "singleSelect", + "valueOptions": ["Replace", "Append"], + "editable": true + }, + { + "field": "defaultValue", + "headerName": "Default Value", + "flex": 1, + "editable": true + } + ], + "optional": true, + "additionalParams": true + }, + { + "label": "State (Code)", + "name": "stateMemoryCode", + "type": "code", + "description": "JSON object representing the state", + "hideCodeExecute": true, + "codeExample": "{\n aggregate: {\n value: (x, y) => x.concat(y), // here we append the new message to the existing messages\n default: () => []\n }\n}", + "optional": true, + "additionalParams": true + } + ], + "id": "seqState_0-input-stateMemory-tabs" + } + ], + "inputAnchors": [], + "inputs": { + "stateMemory": "stateMemoryUI", + "stateMemoryUI": "[{\"key\":\"next\",\"type\":\"Replace\",\"defaultValue\":\"\",\"actions\":\"\",\"id\":1}]" + }, + "outputAnchors": [ + { + "id": "seqState_0-output-seqState-State", + "name": "seqState", + "label": "State", + "description": "A centralized state object, updated by nodes in the graph, passing from one node to another", + "type": "State" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 251, + "selected": false, + "positionAbsolute": { + "x": -347.45783543370027, + "y": 191.39595057599934 + }, + "dragging": false + }, + { + "id": "seqLoop_0", + "position": { + "x": 1548.204371709928, + "y": -256.0852771782037 + }, + "type": "customNode", + "data": { + "id": "seqLoop_0", + "label": "Loop", + "version": 1, + "name": "seqLoop", + "type": "Loop", + "baseClasses": ["Loop"], + "category": "Sequential Agents", + "description": "Loop back to the specific sequential node", + "inputParams": [ + { + "label": "Loop To", + "name": "loopToName", + "description": "Name of the agent to loop back to", + "type": "string", + "placeholder": "agent1", + "id": "seqLoop_0-input-loopToName-string" + } + ], + "inputAnchors": [ + { + "label": "Start | Agent | LLM | Tool Node", + "name": "sequentialNode", + "type": "Start | Agent | LLMNode | ToolNode", + "list": true, + "id": "seqLoop_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode" + } + ], + "inputs": { + "sequentialNode": ["{{seqAgent_1.data.instance}}"], + "loopToName": "supervisor" + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 241, + "selected": false, + "positionAbsolute": { + "x": 1548.204371709928, + "y": -256.0852771782037 + }, + "dragging": false + }, + { + "id": "seqLoop_1", + "position": { + "x": 1963.5723995283763, + "y": 573.1052015594319 + }, + "type": "customNode", + "data": { + "id": "seqLoop_1", + "label": "Loop", + "version": 1, + "name": "seqLoop", + "type": "Loop", + "baseClasses": ["Loop"], + "category": "Sequential Agents", + "description": "Loop back to the specific sequential node", + "inputParams": [ + { + "label": "Loop To", + "name": "loopToName", + "description": "Name of the agent to loop back to", + "type": "string", + "placeholder": "agent1", + "id": "seqLoop_1-input-loopToName-string" + } + ], + "inputAnchors": [ + { + "label": "Start | Agent | LLM | Tool Node", + "name": "sequentialNode", + "type": "Start | Agent | LLMNode | ToolNode", + "list": true, + "id": "seqLoop_1-input-sequentialNode-Start | Agent | LLMNode | ToolNode" + } + ], + "inputs": { + "sequentialNode": ["{{seqAgent_0.data.instance}}"], + "loopToName": "supervisor" + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 241, + "selected": false, + "positionAbsolute": { + "x": 1963.5723995283763, + "y": 573.1052015594319 + }, + "dragging": false + }, + { + "id": "seqLLMNode_1", + "position": { + "x": 1190.379211292534, + "y": 487.407396588448 + }, + "type": "customNode", + "data": { + "id": "seqLLMNode_1", + "label": "LLM Node", + "version": 1, + "name": "seqLLMNode", + "type": "LLMNode", + "baseClasses": ["LLMNode"], + "category": "Sequential Agents", + "description": "Run Chat Model and return the output", + "inputParams": [ + { + "label": "Name", + "name": "llmNodeName", + "type": "string", + "placeholder": "LLM", + "id": "seqLLMNode_1-input-llmNodeName-string" + }, + { + "label": "System Prompt", + "name": "systemMessagePrompt", + "type": "string", + "rows": 4, + "optional": true, + "additionalParams": true, + "id": "seqLLMNode_1-input-systemMessagePrompt-string" + }, + { + "label": "Human Prompt", + "name": "humanMessagePrompt", + "type": "string", + "description": "This prompt will be added at the end of the messages as human message", + "rows": 4, + "optional": true, + "additionalParams": true, + "id": "seqLLMNode_1-input-humanMessagePrompt-string" + }, + { + "label": "Format Prompt Values", + "name": "promptValues", + "description": "Assign values to the prompt variables. You can also use $flow.state. to get the state value", + "type": "json", + "optional": true, + "acceptVariable": true, + "list": true, + "additionalParams": true, + "id": "seqLLMNode_1-input-promptValues-json" + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "type": "datagrid", + "description": "Instruct the LLM to give output in a JSON structured schema", + "datagrid": [ + { + "field": "key", + "headerName": "Key", + "editable": true + }, + { + "field": "type", + "headerName": "Type", + "type": "singleSelect", + "valueOptions": ["String", "String Array", "Number", "Boolean", "Enum"], + "editable": true + }, + { + "field": "enumValues", + "headerName": "Enum Values", + "editable": true + }, + { + "field": "description", + "headerName": "Description", + "flex": 1, + "editable": true + } + ], + "optional": true, + "additionalParams": true, + "id": "seqLLMNode_1-input-llmStructuredOutput-datagrid" + }, + { + "label": "Update State", + "name": "updateStateMemory", + "type": "tabs", + "tabIdentifier": "selectedUpdateStateMemoryTab", + "default": "updateStateMemoryUI", + "additionalParams": true, + "tabs": [ + { + "label": "Update State (Table)", + "name": "updateStateMemoryUI", + "type": "datagrid", + "hint": { + "label": "How to use", + "value": "\n1. Key and value pair to be updated. For example: if you have the following State:\n | Key | Operation | Default Value |\n |-----------|---------------|-------------------|\n | user | Replace | |\n\n You can update the \"user\" value with the following:\n | Key | Value |\n |-----------|-----------|\n | user | john doe |\n\n2. If you want to use the agent's output as the value to update state, it is available as available as `$flow.output` with the following structure:\n ```json\n {\n \"content\": 'Hello! How can I assist you today?',\n \"name\": \"\",\n \"additional_kwargs\": {},\n \"response_metadata\": {},\n \"tool_calls\": [],\n \"invalid_tool_calls\": [],\n \"usage_metadata\": {}\n }\n ```\n\n For example, if the output `content` is the value you want to update the state with, you can do the following:\n | Key | Value |\n |-----------|---------------------------|\n | user | `$flow.output.content` |\n\n3. You can get default flow config, including the current \"state\":\n - `$flow.sessionId`\n - `$flow.chatId`\n - `$flow.chatflowId`\n - `$flow.input`\n - `$flow.state`\n\n4. You can get custom variables: `$vars.`\n\n" + }, + "description": "This is only applicable when you have a custom State at the START node. After agent execution, you might want to update the State values", + "datagrid": [ + { + "field": "key", + "headerName": "Key", + "type": "asyncSingleSelect", + "loadMethod": "loadStateKeys", + "flex": 0.5, + "editable": true + }, + { + "field": "value", + "headerName": "Value", + "type": "freeSolo", + "valueOptions": [ + { + "label": "LLM Node Output (string)", + "value": "$flow.output.content" + }, + { + "label": "LLM JSON Output Key (string)", + "value": "$flow.output." + }, + { + "label": "Global variable (string)", + "value": "$vars." + }, + { + "label": "Input Question (string)", + "value": "$flow.input" + }, + { + "label": "Session Id (string)", + "value": "$flow.sessionId" + }, + { + "label": "Chat Id (string)", + "value": "$flow.chatId" + }, + { + "label": "Chatflow Id (string)", + "value": "$flow.chatflowId" + } + ], + "editable": true, + "flex": 1 + } + ], + "optional": true, + "additionalParams": true + }, + { + "label": "Update State (Code)", + "name": "updateStateMemoryCode", + "type": "code", + "hint": { + "label": "How to use", + "value": "\n1. Return the key value JSON object. For example: if you have the following State:\n ```json\n {\n \"user\": null\n }\n ```\n\n You can update the \"user\" value by returning the following:\n ```js\n return {\n \"user\": \"john doe\"\n }\n ```\n\n2. If you want to use the LLM Node's output as the value to update state, it is available as `$flow.output` with the following structure:\n ```json\n {\n \"content\": 'Hello! How can I assist you today?',\n \"name\": \"\",\n \"additional_kwargs\": {},\n \"response_metadata\": {},\n \"tool_calls\": [],\n \"invalid_tool_calls\": [],\n \"usage_metadata\": {}\n }\n ```\n\n For example, if the output `content` is the value you want to update the state with, you can return the following:\n ```js\n return {\n \"user\": $flow.output.content\n }\n ```\n\n3. You can also get default flow config, including the current \"state\":\n - `$flow.sessionId`\n - `$flow.chatId`\n - `$flow.chatflowId`\n - `$flow.input`\n - `$flow.state`\n\n4. You can get custom variables: `$vars.`\n\n" + }, + "description": "This is only applicable when you have a custom State at the START node. After agent execution, you might want to update the State values. Must return an object representing the state", + "hideCodeExecute": true, + "codeExample": "const result = $flow.output;\n\n/* Suppose we have a custom State schema like this:\n* {\n aggregate: {\n value: (x, y) => x.concat(y),\n default: () => []\n }\n }\n*/\n\nreturn {\n aggregate: [result.content]\n};", + "optional": true, + "additionalParams": true + } + ], + "id": "seqLLMNode_1-input-updateStateMemory-tabs" + } + ], + "inputAnchors": [ + { + "label": "Start | Agent | LLM | Tool Node", + "name": "sequentialNode", + "type": "Start | Agent | LLMNode | ToolNode", + "list": true, + "id": "seqLLMNode_1-input-sequentialNode-Start | Agent | LLMNode | ToolNode" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel", + "optional": true, + "description": "Overwrite model to be used for this node", + "id": "seqLLMNode_1-input-model-BaseChatModel" + } + ], + "inputs": { + "llmNodeName": "summarize", + "systemMessagePrompt": "", + "humanMessagePrompt": "Given the above conversations, reasonings and instructions, generate a final summarized answers", + "sequentialNode": ["{{seqCondition_0.data.instance}}"], + "model": "", + "promptValues": "", + "llmStructuredOutput": "", + "updateStateMemory": "updateStateMemoryUI" + }, + "outputAnchors": [ + { + "id": "seqLLMNode_1-output-seqLLMNode-LLMNode", + "name": "seqLLMNode", + "label": "LLMNode", + "description": "Run Chat Model and return the output", + "type": "LLMNode" + } + ], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 431, + "selected": false, + "positionAbsolute": { + "x": 1190.379211292534, + "y": 487.407396588448 + }, + "dragging": false + }, + { + "id": "seqEnd_0", + "position": { + "x": 1531.4101572553902, + "y": 820.7626362717671 + }, + "type": "customNode", + "data": { + "id": "seqEnd_0", + "label": "End", + "version": 1, + "name": "seqEnd", + "type": "End", + "baseClasses": ["End"], + "category": "Sequential Agents", + "description": "End conversation", + "inputParams": [], + "inputAnchors": [ + { + "label": "Start | Agent | LLM | Tool Node", + "name": "sequentialNode", + "type": "Start | Agent | LLMNode | ToolNode", + "id": "seqEnd_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode" + } + ], + "inputs": { + "sequentialNode": "{{seqLLMNode_1.data.instance}}" + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "width": 300, + "height": 143, + "selected": false, + "positionAbsolute": { + "x": 1531.4101572553902, + "y": 820.7626362717671 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "agentMemory_0", + "sourceHandle": "agentMemory_0-output-agentMemory-AgentMemory|BaseCheckpointSaver", + "target": "seqStart_0", + "targetHandle": "seqStart_0-input-agentMemory-BaseCheckpointSaver", + "type": "buttonedge", + "id": "agentMemory_0-agentMemory_0-output-agentMemory-AgentMemory|BaseCheckpointSaver-seqStart_0-seqStart_0-input-agentMemory-BaseCheckpointSaver" + }, + { + "source": "chatOpenAI_0", + "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "target": "seqStart_0", + "targetHandle": "seqStart_0-input-model-BaseChatModel", + "type": "buttonedge", + "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-seqStart_0-seqStart_0-input-model-BaseChatModel" + }, + { + "source": "seqStart_0", + "sourceHandle": "seqStart_0-output-seqStart-Start", + "target": "seqLLMNode_0", + "targetHandle": "seqLLMNode_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode", + "type": "buttonedge", + "id": "seqStart_0-seqStart_0-output-seqStart-Start-seqLLMNode_0-seqLLMNode_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode" + }, + { + "source": "seqLLMNode_0", + "sourceHandle": "seqLLMNode_0-output-seqLLMNode-LLMNode", + "target": "seqCondition_0", + "targetHandle": "seqCondition_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode", + "type": "buttonedge", + "id": "seqLLMNode_0-seqLLMNode_0-output-seqLLMNode-LLMNode-seqCondition_0-seqCondition_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode" + }, + { + "source": "seqState_0", + "sourceHandle": "seqState_0-output-seqState-State", + "target": "seqStart_0", + "targetHandle": "seqStart_0-input-state-State", + "type": "buttonedge", + "id": "seqState_0-seqState_0-output-seqState-State-seqStart_0-seqStart_0-input-state-State" + }, + { + "source": "seqCondition_0", + "sourceHandle": "seqCondition_0-output-agent1-Agent|LLMNode|ToolNode", + "target": "seqAgent_1", + "targetHandle": "seqAgent_1-input-sequentialNode-Start | Agent | LLMNode | ToolNode", + "type": "buttonedge", + "id": "seqCondition_0-seqCondition_0-output-agent1-Agent|LLMNode|ToolNode-seqAgent_1-seqAgent_1-input-sequentialNode-Start | Agent | LLMNode | ToolNode" + }, + { + "source": "seqCondition_0", + "sourceHandle": "seqCondition_0-output-agent2-Agent|LLMNode|ToolNode", + "target": "seqAgent_0", + "targetHandle": "seqAgent_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode", + "type": "buttonedge", + "id": "seqCondition_0-seqCondition_0-output-agent2-Agent|LLMNode|ToolNode-seqAgent_0-seqAgent_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode" + }, + { + "source": "seqAgent_0", + "sourceHandle": "seqAgent_0-output-seqAgent-Agent", + "target": "seqLoop_1", + "targetHandle": "seqLoop_1-input-sequentialNode-Start | Agent | LLMNode | ToolNode", + "type": "buttonedge", + "id": "seqAgent_0-seqAgent_0-output-seqAgent-Agent-seqLoop_1-seqLoop_1-input-sequentialNode-Start | Agent | LLMNode | ToolNode" + }, + { + "source": "seqAgent_1", + "sourceHandle": "seqAgent_1-output-seqAgent-Agent", + "target": "seqLoop_0", + "targetHandle": "seqLoop_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode", + "type": "buttonedge", + "id": "seqAgent_1-seqAgent_1-output-seqAgent-Agent-seqLoop_0-seqLoop_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode" + }, + { + "source": "seqCondition_0", + "sourceHandle": "seqCondition_0-output-end-Agent|LLMNode|ToolNode", + "target": "seqLLMNode_1", + "targetHandle": "seqLLMNode_1-input-sequentialNode-Start | Agent | LLMNode | ToolNode", + "type": "buttonedge", + "id": "seqCondition_0-seqCondition_0-output-end-Agent|LLMNode|ToolNode-seqLLMNode_1-seqLLMNode_1-input-sequentialNode-Start | Agent | LLMNode | ToolNode" + }, + { + "source": "seqLLMNode_1", + "sourceHandle": "seqLLMNode_1-output-seqLLMNode-LLMNode", + "target": "seqEnd_0", + "targetHandle": "seqEnd_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode", + "type": "buttonedge", + "id": "seqLLMNode_1-seqLLMNode_1-output-seqLLMNode-LLMNode-seqEnd_0-seqEnd_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode" + } + ] +}