From 55b06f0581e17496edf13bb5b9af2937eaa6ab21 Mon Sep 17 00:00:00 2001 From: YISH Date: Fri, 1 Mar 2024 13:38:25 +0800 Subject: [PATCH 01/25] Fix the failure to resolve variables for OverrideConfig(Json Object) --- packages/server/src/utils/index.ts | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index df215270..22518c28 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -493,13 +493,14 @@ export const clearSessionMemory = async ( * @returns {string} */ export const getVariableValue = ( - paramValue: string, + paramValue: string | object, reactFlowNodes: IReactFlowNode[], question: string, chatHistory: IMessage[], isAcceptVariable = false ) => { - let returnVal = paramValue + const isObject = typeof paramValue === 'object' + let returnVal = isObject ? JSON.stringify(paramValue) : paramValue const variableStack = [] const variableDict = {} as IVariableDict let startIdx = 0 @@ -562,7 +563,7 @@ export const getVariableValue = ( }) return returnVal } - return returnVal + return isObject ? JSON.parse(returnVal) : returnVal } /** From d706ca389fc744740147ff59e1567b8d45b9ee93 Mon Sep 17 00:00:00 2001 From: Ilango Date: Tue, 5 Mar 2024 17:23:49 +0530 Subject: [PATCH 02/25] Detect host from list of allowed urls even if they have http/https --- packages/server/src/index.ts | 6 +++++- packages/ui/src/ui-component/dialog/AllowedDomainsDialog.js | 5 +---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index d18111e3..260bbdb4 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1332,7 +1332,11 @@ export class App { if (parsedConfig.allowedOrigins && parsedConfig.allowedOrigins.length > 0 && isValidAllowedOrigins) { const originHeader = req.headers.origin as string const origin = new URL(originHeader).host - isDomainAllowed = parsedConfig.allowedOrigins.includes(origin) + isDomainAllowed = + parsedConfig.allowedOrigins.filter((domain: string) => { + const allowedOrigin = new URL(domain).host + return origin === allowedOrigin + }).length > 0 } } diff --git a/packages/ui/src/ui-component/dialog/AllowedDomainsDialog.js b/packages/ui/src/ui-component/dialog/AllowedDomainsDialog.js index 9a8968e2..426ea131 100644 --- a/packages/ui/src/ui-component/dialog/AllowedDomainsDialog.js +++ b/packages/ui/src/ui-component/dialog/AllowedDomainsDialog.js @@ -145,10 +145,7 @@ const AllowedDomainsDialog = ({ show, dialogProps, onCancel, onConfirm }) => { flexDirection: 'column' }} > - - Your chatbot will only work when used from the following domains. When adding domains, exclude the{' '} -
http://
or
https://
part. -
+ Your chatbot will only work when used from the following domains. :not(style)': { m: 1 }, pt: 2 }}> From 2df05b3252771ebfae6392b099eb3c3a224e5591 Mon Sep 17 00:00:00 2001 From: Henry Date: Wed, 6 Mar 2024 01:05:43 +0800 Subject: [PATCH 03/25] add claude 3 and xml agent --- .../nodes/agents/XMLAgent/XMLAgent.ts | 203 ++++++++++++++++++ .../nodes/agents/XMLAgent/xmlagent.svg | 1 + .../chatmodels/AWSBedrock/AWSChatBedrock.ts | 2 + .../chatmodels/ChatAnthropic/ChatAnthropic.ts | 10 + packages/components/src/agents.ts | 42 ++-- .../marketplaces/chatflows/Claude LLM.json | 10 + 6 files changed, 254 insertions(+), 14 deletions(-) create mode 100644 packages/components/nodes/agents/XMLAgent/XMLAgent.ts create mode 100644 packages/components/nodes/agents/XMLAgent/xmlagent.svg diff --git a/packages/components/nodes/agents/XMLAgent/XMLAgent.ts b/packages/components/nodes/agents/XMLAgent/XMLAgent.ts new file mode 100644 index 00000000..49109947 --- /dev/null +++ b/packages/components/nodes/agents/XMLAgent/XMLAgent.ts @@ -0,0 +1,203 @@ +import { flatten } from 'lodash' +import { ChainValues } from '@langchain/core/utils/types' +import { AgentStep } from '@langchain/core/agents' +import { RunnableSequence } from '@langchain/core/runnables' +import { ChatOpenAI } from '@langchain/openai' +import { Tool } from '@langchain/core/tools' +import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts' +import { XMLAgentOutputParser } from 'langchain/agents/xml/output_parser' +import { formatLogToMessage } from 'langchain/agents/format_scratchpad/log_to_message' +import { getBaseClasses } from '../../../src/utils' +import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' +import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' +import { AgentExecutor } from '../../../src/agents' +//import { AgentExecutor } from "langchain/agents"; + +const defaultSystemMessage = `You are a helpful assistant. Help the user answer any questions. + +You have access to the following tools: + +{tools} + +In order to use a tool, you can use and tags. You will then get back a response in the form +For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond: + +searchweather in SF +64 degrees + +When you are done, respond with a final answer between . For example: + +The weather in SF is 64 degrees + +Begin! + +Previous Conversation: +{chat_history} + +Question: {input} +{agent_scratchpad}` + +class XMLAgent_Agents implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + inputs: INodeParams[] + sessionId?: string + + constructor(fields?: { sessionId?: string }) { + this.label = 'XML Agent' + this.name = 'xmlAgent' + this.version = 1.0 + this.type = 'XMLAgent' + this.category = 'Agents' + this.icon = 'xmlagent.svg' + this.description = `Agent that is designed for LLMs that are good for reasoning/writing XML (e.g: Anthropic Claude)` + this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)] + this.inputs = [ + { + label: 'Tools', + name: 'tools', + type: 'Tool', + list: true + }, + { + label: 'Memory', + name: 'memory', + type: 'BaseChatMemory' + }, + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel' + }, + { + label: 'System Message', + name: 'systemMessage', + type: 'string', + warning: 'Prompt must include input variables: {tools}, {chat_history}, {input} and {agent_scratchpad}', + rows: 4, + default: defaultSystemMessage, + additionalParams: true + } + ] + this.sessionId = fields?.sessionId + } + + async init(): Promise { + return null + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const memory = nodeData.inputs?.memory as FlowiseMemory + const executor = await prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) + + const loggerHandler = new ConsoleCallbackHandler(options.logger) + const callbacks = await additionalCallbacks(nodeData, options) + + let res: ChainValues = {} + let sourceDocuments: ICommonObject[] = [] + + if (options.socketIO && options.socketIOClientId) { + const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId) + res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] }) + if (res.sourceDocuments) { + options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', flatten(res.sourceDocuments)) + sourceDocuments = res.sourceDocuments + } + } else { + res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] }) + if (res.sourceDocuments) { + sourceDocuments = res.sourceDocuments + } + } + + await memory.addChatMessages( + [ + { + text: input, + type: 'userMessage' + }, + { + text: res?.output, + type: 'apiMessage' + } + ], + this.sessionId + ) + + return sourceDocuments.length ? { text: res?.output, sourceDocuments: flatten(sourceDocuments) } : res?.output + } +} + +const prepareAgent = async ( + nodeData: INodeData, + flowObj: { sessionId?: string; chatId?: string; input?: string }, + chatHistory: IMessage[] = [] +) => { + const model = nodeData.inputs?.model as ChatOpenAI + const memory = nodeData.inputs?.memory as FlowiseMemory + const systemMessage = nodeData.inputs?.systemMessage as string + let tools = nodeData.inputs?.tools + tools = flatten(tools) + const inputKey = memory.inputKey ? memory.inputKey : 'input' + const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history' + + let promptMessage = systemMessage ? systemMessage : defaultSystemMessage + if (memory.memoryKey) promptMessage = promptMessage.replaceAll('{chat_history}', `{${memory.memoryKey}}`) + if (memory.inputKey) promptMessage = promptMessage.replaceAll('{input}', `{${memory.inputKey}}`) + + const prompt = ChatPromptTemplate.fromMessages([ + HumanMessagePromptTemplate.fromTemplate(promptMessage), + new MessagesPlaceholder('agent_scratchpad') + ]) + + const missingVariables = ['tools', 'agent_scratchpad'].filter((v) => !prompt.inputVariables.includes(v)) + + if (missingVariables.length > 0) { + throw new Error(`Provided prompt is missing required input variables: ${JSON.stringify(missingVariables)}`) + } + + const llmWithStop = model.bind({ stop: ['', ''] }) + + const messages = (await memory.getChatMessages(flowObj.sessionId, false, chatHistory)) as IMessage[] + let chatHistoryMsgTxt = '' + for (const message of messages) { + if (message.type === 'apiMessage') { + chatHistoryMsgTxt += `\\nAI:${message.message}` + } else if (message.type === 'userMessage') { + chatHistoryMsgTxt += `\\nHuman:${message.message}` + } + } + + const runnableAgent = RunnableSequence.from([ + { + [inputKey]: (i: { input: string; tools: Tool[]; steps: AgentStep[] }) => i.input, + agent_scratchpad: (i: { input: string; tools: Tool[]; steps: AgentStep[] }) => formatLogToMessage(i.steps), + tools: (_: { input: string; tools: Tool[]; steps: AgentStep[] }) => + tools.map((tool: Tool) => `${tool.name}: ${tool.description}`), + [memoryKey]: (_: { input: string; tools: Tool[]; steps: AgentStep[] }) => chatHistoryMsgTxt + }, + prompt, + llmWithStop, + new XMLAgentOutputParser() + ]) + + const executor = AgentExecutor.fromAgentAndTools({ + agent: runnableAgent, + tools, + sessionId: flowObj?.sessionId, + chatId: flowObj?.chatId, + input: flowObj?.input, + isXML: true, + verbose: process.env.DEBUG === 'true' ? true : false + }) + + return executor +} + +module.exports = { nodeClass: XMLAgent_Agents } diff --git a/packages/components/nodes/agents/XMLAgent/xmlagent.svg b/packages/components/nodes/agents/XMLAgent/xmlagent.svg new file mode 100644 index 00000000..d1b5f708 --- /dev/null +++ b/packages/components/nodes/agents/XMLAgent/xmlagent.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/chatmodels/AWSBedrock/AWSChatBedrock.ts b/packages/components/nodes/chatmodels/AWSBedrock/AWSChatBedrock.ts index 85586d14..251bd24a 100644 --- a/packages/components/nodes/chatmodels/AWSBedrock/AWSChatBedrock.ts +++ b/packages/components/nodes/chatmodels/AWSBedrock/AWSChatBedrock.ts @@ -95,6 +95,8 @@ class AWSChatBedrock_ChatModels implements INode { name: 'model', type: 'options', options: [ + { label: 'anthropic.claude-3-sonnet', name: 'anthropic.claude-3-sonnet-20240229-v1:0' }, + { label: 'anthropic.claude-instant-v1', name: 'anthropic.claude-instant-v1' }, { label: 'anthropic.claude-instant-v1', name: 'anthropic.claude-instant-v1' }, { label: 'anthropic.claude-v1', name: 'anthropic.claude-v1' }, { label: 'anthropic.claude-v2', name: 'anthropic.claude-v2' }, diff --git a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts index 8b4f7c0e..844e7d25 100644 --- a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts +++ b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts @@ -43,6 +43,16 @@ class ChatAnthropic_ChatModels implements INode { name: 'modelName', type: 'options', options: [ + { + label: 'claude-3-opus', + name: 'claude-3-opus-20240229', + description: 'Most powerful model for highly complex tasks' + }, + { + label: 'claude-3-sonnet', + name: 'claude-3-sonnet-20240229', + description: 'Ideal balance of intelligence and speed for enterprise workloads' + }, { label: 'claude-2', name: 'claude-2', diff --git a/packages/components/src/agents.ts b/packages/components/src/agents.ts index 5e4bd9c8..41bc076d 100644 --- a/packages/components/src/agents.ts +++ b/packages/components/src/agents.ts @@ -257,6 +257,8 @@ export class AgentExecutor extends BaseChain { input?: string + isXML?: boolean + /** * How to handle errors raised by the agent's output parser. Defaults to `False`, which raises the error. @@ -277,7 +279,7 @@ export class AgentExecutor extends BaseChain { return this.agent.returnValues } - constructor(input: AgentExecutorInput & { sessionId?: string; chatId?: string; input?: string }) { + constructor(input: AgentExecutorInput & { sessionId?: string; chatId?: string; input?: string; isXML?: boolean }) { let agent: BaseSingleActionAgent | BaseMultiActionAgent if (Runnable.isRunnable(input.agent)) { agent = new RunnableAgent({ runnable: input.agent }) @@ -305,13 +307,17 @@ export class AgentExecutor extends BaseChain { this.sessionId = input.sessionId this.chatId = input.chatId this.input = input.input + this.isXML = input.isXML } - static fromAgentAndTools(fields: AgentExecutorInput & { sessionId?: string; chatId?: string; input?: string }): AgentExecutor { + static fromAgentAndTools( + fields: AgentExecutorInput & { sessionId?: string; chatId?: string; input?: string; isXML?: boolean } + ): AgentExecutor { const newInstance = new AgentExecutor(fields) if (fields.sessionId) newInstance.sessionId = fields.sessionId if (fields.chatId) newInstance.chatId = fields.chatId if (fields.input) newInstance.input = fields.input + if (fields.isXML) newInstance.isXML = fields.isXML return newInstance } @@ -405,12 +411,16 @@ export class AgentExecutor extends BaseChain { * - flowConfig?: { sessionId?: string, chatId?: string, input?: string } */ observation = tool - ? // @ts-ignore - await tool.call(action.toolInput, runManager?.getChild(), undefined, { - sessionId: this.sessionId, - chatId: this.chatId, - input: this.input - }) + ? await (tool as any).call( + this.isXML && typeof action.toolInput === 'string' ? { input: action.toolInput } : action.toolInput, + runManager?.getChild(), + undefined, + { + sessionId: this.sessionId, + chatId: this.chatId, + input: this.input + } + ) : `${action.tool} is not a valid tool, try another one.` } catch (e) { if (e instanceof ToolInputParsingException) { @@ -526,12 +536,16 @@ export class AgentExecutor extends BaseChain { * - tags?: string[] * - flowConfig?: { sessionId?: string, chatId?: string, input?: string } */ - // @ts-ignore - observation = await tool.call(agentAction.toolInput, runManager?.getChild(), undefined, { - sessionId: this.sessionId, - chatId: this.chatId, - input: this.input - }) + observation = await (tool as any).call( + this.isXML && typeof agentAction.toolInput === 'string' ? { input: agentAction.toolInput } : agentAction.toolInput, + runManager?.getChild(), + undefined, + { + sessionId: this.sessionId, + chatId: this.chatId, + input: this.input + } + ) if (observation?.includes(SOURCE_DOCUMENTS_PREFIX)) { const observationArray = observation.split(SOURCE_DOCUMENTS_PREFIX) observation = observationArray[0] diff --git a/packages/server/marketplaces/chatflows/Claude LLM.json b/packages/server/marketplaces/chatflows/Claude LLM.json index 48be286d..fdce533e 100644 --- a/packages/server/marketplaces/chatflows/Claude LLM.json +++ b/packages/server/marketplaces/chatflows/Claude LLM.json @@ -179,6 +179,16 @@ "name": "modelName", "type": "options", "options": [ + { + "label": "claude-3-opus", + "name": "claude-3-opus-20240229", + "description": "Most powerful model for highly complex tasks" + }, + { + "label": "claude-3-sonnet", + "name": "claude-3-sonnet-20240229", + "description": "Ideal balance of intelligence and speed for enterprise workloads" + }, { "label": "claude-2", "name": "claude-2", From b643afae3dd379781e1afd8f09d41eb8071b228e Mon Sep 17 00:00:00 2001 From: Henry Date: Wed, 6 Mar 2024 02:04:11 +0800 Subject: [PATCH 04/25] claude 3 llamaindex --- .../ChatAnthropic/ChatAnthropic_LlamaIndex.ts | 10 ++++++++++ .../server/marketplaces/chatflows/Query Engine.json | 10 ++++++++++ 2 files changed, 20 insertions(+) diff --git a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic_LlamaIndex.ts b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic_LlamaIndex.ts index 69a15114..d61b30e9 100644 --- a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic_LlamaIndex.ts +++ b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic_LlamaIndex.ts @@ -37,6 +37,16 @@ class ChatAnthropic_LlamaIndex_ChatModels implements INode { name: 'modelName', type: 'options', options: [ + { + label: 'claude-3-opus', + name: 'claude-3-opus-20240229', + description: 'Most powerful model for highly complex tasks' + }, + { + label: 'claude-3-sonnet', + name: 'claude-3-sonnet-20240229', + description: 'Ideal balance of intelligence and speed for enterprise workloads' + }, { label: 'claude-2', name: 'claude-2', diff --git a/packages/server/marketplaces/chatflows/Query Engine.json b/packages/server/marketplaces/chatflows/Query Engine.json index 63bbabfe..10809cb3 100644 --- a/packages/server/marketplaces/chatflows/Query Engine.json +++ b/packages/server/marketplaces/chatflows/Query Engine.json @@ -382,6 +382,16 @@ "name": "modelName", "type": "options", "options": [ + { + "label": "claude-3-opus", + "name": "claude-3-opus-20240229", + "description": "Most powerful model for highly complex tasks" + }, + { + "label": "claude-3-sonnet", + "name": "claude-3-sonnet-20240229", + "description": "Ideal balance of intelligence and speed for enterprise workloads" + }, { "label": "claude-2", "name": "claude-2", From da43fb7f44c205d92683623e2f21118043cf013a Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Wed, 6 Mar 2024 12:30:39 +0530 Subject: [PATCH 05/25] Base changes to enable AzureChatOpenAI for image upload --- .../AzureChatOpenAI/AzureChatOpenAI.ts | 56 +++++++++++++++++-- packages/server/src/index.ts | 2 +- 2 files changed, 51 insertions(+), 7 deletions(-) diff --git a/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts index ea924fd0..da1b69ef 100644 --- a/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts @@ -1,8 +1,9 @@ -import { AzureOpenAIInput, ChatOpenAI, OpenAIChatInput } from '@langchain/openai' +import { AzureOpenAIInput, ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput } from '@langchain/openai' import { BaseCache } from '@langchain/core/caches' import { BaseLLMParams } from '@langchain/core/language_models/llms' -import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { ICommonObject, IMultiModalOption, INode, INodeData, INodeParams } from "../../../src/Interface"; import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { ChatOpenAI } from '../ChatOpenAI/FlowiseChatOpenAI' class AzureChatOpenAI_ChatModels implements INode { label: string @@ -19,12 +20,12 @@ class AzureChatOpenAI_ChatModels implements INode { constructor() { this.label = 'Azure ChatOpenAI' this.name = 'azureChatOpenAI' - this.version = 2.0 + this.version = 3.0 this.type = 'AzureChatOpenAI' this.icon = 'Azure.svg' this.category = 'Chat Models' this.description = 'Wrapper around Azure OpenAI large language models that use the Chat endpoint' - this.baseClasses = [this.type, ...getBaseClasses(ChatOpenAI)] + this.baseClasses = [this.type, ...getBaseClasses(LangchainChatOpenAI)] this.credential = { label: 'Connect Credential', name: 'credential', @@ -102,6 +103,38 @@ class AzureChatOpenAI_ChatModels implements INode { step: 1, optional: true, additionalParams: true + }, + { + label: 'Allow Image Uploads', + name: 'allowImageUploads', + type: 'boolean', + description: + 'Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent', + default: false, + optional: true + }, + { + label: 'Image Resolution', + description: 'This parameter controls the resolution in which the model views the image.', + name: 'imageResolution', + type: 'options', + options: [ + { + label: 'Low', + name: 'low' + }, + { + label: 'High', + name: 'high' + }, + { + label: 'Auto', + name: 'auto' + } + ], + default: 'low', + optional: false, + additionalParams: true } ] } @@ -122,7 +155,10 @@ class AzureChatOpenAI_ChatModels implements INode { const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData) const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData) - const obj: Partial & BaseLLMParams & Partial = { + const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean + const imageResolution = nodeData.inputs?.imageResolution as string + + const obj: Partial & BaseLLMParams & Partial & { multiModalOption?: IMultiModalOption } = { temperature: parseFloat(temperature), modelName, azureOpenAIApiKey, @@ -138,7 +174,15 @@ class AzureChatOpenAI_ChatModels implements INode { if (timeout) obj.timeout = parseInt(timeout, 10) if (cache) obj.cache = cache - const model = new ChatOpenAI(obj) + const multiModalOption: IMultiModalOption = { + image: { + allowImageUploads: allowImageUploads ?? false, + imageResolution + } + } + obj.multiModalOption = multiModalOption + + const model = new ChatOpenAI(nodeData.id, obj) return model } } diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index ab405e35..68e1709d 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1534,7 +1534,7 @@ export class App { if (!chatflow) return `Chatflow ${chatflowid} not found` const uploadAllowedNodes = ['llmChain', 'conversationChain', 'mrklAgentChat', 'conversationalAgent'] - const uploadProcessingNodes = ['chatOpenAI'] + const uploadProcessingNodes = ['chatOpenAI', 'azureChatOpenAI'] const flowObj = JSON.parse(chatflow.flowData) const imgUploadSizeAndTypes: IUploadFileSizeAndTypes[] = [] From 214e7c8dc78612cab5efe87a9885220c221a8653 Mon Sep 17 00:00:00 2001 From: Marc Klingen Date: Thu, 7 Mar 2024 02:25:30 +0100 Subject: [PATCH 06/25] upgrade langfuse and add sdkIntegration tag --- packages/components/package.json | 4 ++-- packages/components/src/handler.ts | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/packages/components/package.json b/packages/components/package.json index 69ca3b5f..6e2a977a 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -65,8 +65,8 @@ "ioredis": "^5.3.2", "jsonpointer": "^5.0.1", "langchain": "^0.1.20", - "langfuse": "3.1.0", - "langfuse-langchain": "^3.1.0", + "langfuse": "3.3.1", + "langfuse-langchain": "^3.3.1", "langsmith": "0.1.6", "linkifyjs": "^4.1.1", "llamaindex": "^0.0.48", diff --git a/packages/components/src/handler.ts b/packages/components/src/handler.ts index cc6499b1..3b18e0ac 100644 --- a/packages/components/src/handler.ts +++ b/packages/components/src/handler.ts @@ -261,7 +261,8 @@ export const additionalCallbacks = async (nodeData: INodeData, options: ICommonO let langFuseOptions: any = { secretKey: langFuseSecretKey, publicKey: langFusePublicKey, - baseUrl: langFuseEndpoint ?? 'https://cloud.langfuse.com' + baseUrl: langFuseEndpoint ?? 'https://cloud.langfuse.com', + sdkIntegration: 'Flowise' } if (release) langFuseOptions.release = release if (options.chatId) langFuseOptions.sessionId = options.chatId @@ -340,6 +341,7 @@ export class AnalyticHandler { secretKey: langFuseSecretKey, publicKey: langFusePublicKey, baseUrl: langFuseEndpoint ?? 'https://cloud.langfuse.com', + sdkIntegration: 'Flowise', release }) this.handlers['langFuse'] = { client: langfuse } From 07503f9be8ad57adec8b885f95b274478322c37c Mon Sep 17 00:00:00 2001 From: Henry Date: Thu, 7 Mar 2024 19:08:10 +0800 Subject: [PATCH 07/25] add placeholder --- packages/ui/src/ui-component/dialog/AllowedDomainsDialog.js | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/ui/src/ui-component/dialog/AllowedDomainsDialog.js b/packages/ui/src/ui-component/dialog/AllowedDomainsDialog.js index 426ea131..8fd57fb6 100644 --- a/packages/ui/src/ui-component/dialog/AllowedDomainsDialog.js +++ b/packages/ui/src/ui-component/dialog/AllowedDomainsDialog.js @@ -161,6 +161,7 @@ const AllowedDomainsDialog = ({ show, dialogProps, onCancel, onConfirm }) => { size='small' value={origin} name='origin' + placeholder='https://example.com' endAdornment={ {inputFields.length > 1 && ( From 3b84e718a2a0ba5ddd78fd2d077ea7994cdaa50b Mon Sep 17 00:00:00 2001 From: Henry Date: Thu, 7 Mar 2024 19:22:33 +0800 Subject: [PATCH 08/25] fix isValidAllowedOrigins check and invalid URL --- packages/server/src/index.ts | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 260bbdb4..fd0635c5 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1328,14 +1328,18 @@ export class App { if (chatflow.chatbotConfig) { const parsedConfig = JSON.parse(chatflow.chatbotConfig) // check whether the first one is not empty. if it is empty that means the user set a value and then removed it. - const isValidAllowedOrigins = parsedConfig.allowedOrigins[0] !== '' - if (parsedConfig.allowedOrigins && parsedConfig.allowedOrigins.length > 0 && isValidAllowedOrigins) { + const isValidAllowedOrigins = parsedConfig.allowedOrigins?.length && parsedConfig.allowedOrigins[0] !== '' + if (isValidAllowedOrigins) { const originHeader = req.headers.origin as string const origin = new URL(originHeader).host isDomainAllowed = parsedConfig.allowedOrigins.filter((domain: string) => { - const allowedOrigin = new URL(domain).host - return origin === allowedOrigin + try { + const allowedOrigin = new URL(domain).host + return origin === allowedOrigin + } catch (e) { + return false + } }).length > 0 } } From a2caf3e265e2bf6ddabf45374a07e9fac90673dd Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Thu, 7 Mar 2024 18:54:36 +0530 Subject: [PATCH 09/25] Structural Changes to support expansion to other chat models. --- packages/components/src/IVisionChatModal.ts | 12 ++++++++++++ packages/components/src/multiModalUtils.ts | 6 ++++-- 2 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 packages/components/src/IVisionChatModal.ts diff --git a/packages/components/src/IVisionChatModal.ts b/packages/components/src/IVisionChatModal.ts new file mode 100644 index 00000000..482ff70b --- /dev/null +++ b/packages/components/src/IVisionChatModal.ts @@ -0,0 +1,12 @@ +import { IMultiModalOption } from './Interface' + +export interface IVisionChatModal { + id: string + configuredModel: string + configuredMaxToken: number + multiModalOption: IMultiModalOption + + setVisionModel(): void + revertToOriginalModel(): void + setMultiModalOption(multiModalOption: IMultiModalOption): void +} diff --git a/packages/components/src/multiModalUtils.ts b/packages/components/src/multiModalUtils.ts index 00cc5bf3..186e85ca 100644 --- a/packages/components/src/multiModalUtils.ts +++ b/packages/components/src/multiModalUtils.ts @@ -1,8 +1,8 @@ import { ICommonObject, IFileUpload, IMultiModalOption, INodeData, MessageContentImageUrl } from './Interface' -import { ChatOpenAI } from '../nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI' import path from 'path' import { getStoragePath } from './utils' import fs from 'fs' +import { IVisionChatModal } from './IVisionChatModal' export const addImagesToMessages = ( nodeData: INodeData, @@ -12,7 +12,7 @@ export const addImagesToMessages = ( const imageContent: MessageContentImageUrl[] = [] let model = nodeData.inputs?.model - if (model instanceof ChatOpenAI && multiModalOption) { + if (llmSupportsVision(model) && multiModalOption) { // Image Uploaded if (multiModalOption.image && multiModalOption.image.allowImageUploads && options?.uploads && options?.uploads.length > 0) { const imageUploads = getImageUploads(options.uploads) @@ -46,3 +46,5 @@ export const getAudioUploads = (uploads: IFileUpload[]) => { export const getImageUploads = (uploads: IFileUpload[]) => { return uploads.filter((upload: IFileUpload) => upload.mime.startsWith('image/')) } + +export const llmSupportsVision = (value: any): value is IVisionChatModal => !!value?.multiModalOption From 63b8c23072094c871b097e80606efa04db6c1932 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Thu, 7 Mar 2024 18:55:24 +0530 Subject: [PATCH 10/25] Addition of Claude for Image uploads --- .../ConversationalAgent.ts | 46 +++++++----- .../agents/MRKLAgentChat/MRKLAgentChat.ts | 30 ++++---- .../ConversationChain/ConversationChain.ts | 38 ++++------ .../nodes/chains/LLMChain/LLMChain.ts | 70 ++++++++++++------- .../chatmodels/ChatAnthropic/ChatAnthropic.ts | 29 ++++++-- .../ChatAnthropic/FlowiseChatAntrhopic.ts | 34 +++++++++ .../nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts | 5 +- .../ChatOpenAI/FlowiseChatOpenAI.ts | 37 +++++----- packages/components/package.json | 2 +- packages/server/src/index.ts | 2 +- 10 files changed, 185 insertions(+), 108 deletions(-) create mode 100644 packages/components/nodes/chatmodels/ChatAnthropic/FlowiseChatAntrhopic.ts diff --git a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts index db6b37c6..62c46878 100644 --- a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts +++ b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts @@ -4,7 +4,12 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models' import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages' import { ChainValues } from '@langchain/core/utils/types' import { AgentStep } from '@langchain/core/agents' -import { renderTemplate, MessagesPlaceholder } from '@langchain/core/prompts' +import { + renderTemplate, + MessagesPlaceholder, + HumanMessagePromptTemplate, + PromptTemplate +} from "@langchain/core/prompts"; import { RunnableSequence } from '@langchain/core/runnables' import { ChatConversationalAgent } from 'langchain/agents' import { getBaseClasses } from '../../../src/utils' @@ -12,7 +17,8 @@ import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' import { AgentExecutor } from '../../../src/agents' import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' -import { addImagesToMessages } from '../../../src/multiModalUtils' +import { addImagesToMessages, llmSupportsVision } from "../../../src/multiModalUtils"; +import { IVisionChatModal } from "../../../src/IVisionChatModal"; const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI. @@ -150,33 +156,39 @@ const prepareAgent = async ( outputParser }) - if (model instanceof ChatOpenAI) { - let humanImageMessages: HumanMessage[] = [] + if (llmSupportsVision(model)) { + const visionChatModel = model as IVisionChatModal + // let humanImageMessages: HumanMessage[] = [] const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption) if (messageContent?.length) { - // Change model to gpt-4-vision - model.modelName = 'gpt-4-vision-preview' + visionChatModel.setVisionModel() - // Change default max token to higher when using gpt-4-vision - model.maxTokens = 1024 - - for (const msg of messageContent) { - humanImageMessages.push(new HumanMessage({ content: [msg] })) - } + // for (const msg of messageContent) { + // humanImageMessages.push(new HumanMessage({ content: [msg] })) + // } // Pop the `agent_scratchpad` MessagePlaceHolder let messagePlaceholder = prompt.promptMessages.pop() as MessagesPlaceholder - + if (prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) { + const lastMessage = prompt.promptMessages.pop() as HumanMessagePromptTemplate + const template = (lastMessage.prompt as PromptTemplate).template as string + const msg = HumanMessagePromptTemplate.fromTemplate([ + ...messageContent, + { + text: template + } + ]) + msg.inputVariables = lastMessage.inputVariables + prompt.promptMessages.push(msg) + } // Add the HumanMessage for images - prompt.promptMessages.push(...humanImageMessages) + //prompt.promptMessages.push(...humanImageMessages) // Add the `agent_scratchpad` MessagePlaceHolder back prompt.promptMessages.push(messagePlaceholder) } else { - // revert to previous values if image upload is empty - model.modelName = model.configuredModel - model.maxTokens = model.configuredMaxToken + visionChatModel.revertToOriginalModel() } } diff --git a/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts b/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts index 5923d77e..e1f16fd8 100644 --- a/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts +++ b/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts @@ -1,6 +1,5 @@ import { flatten } from 'lodash' import { AgentExecutor } from 'langchain/agents' -import { HumanMessage } from '@langchain/core/messages' import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts' import { Tool } from '@langchain/core/tools' import type { PromptTemplate } from '@langchain/core/prompts' @@ -10,8 +9,8 @@ import { additionalCallbacks } from '../../../src/handler' import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' import { createReactAgent } from '../../../src/agents' -import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' -import { addImagesToMessages } from '../../../src/multiModalUtils' +import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils' +import { IVisionChatModal } from '../../../src/IVisionChatModal' class MRKLAgentChat_Agents implements INode { label: string @@ -68,23 +67,26 @@ class MRKLAgentChat_Agents implements INode { const prompt = await pull('hwchase17/react-chat') let chatPromptTemplate = undefined - if (model instanceof ChatOpenAI) { + if (llmSupportsVision(model)) { + const visionChatModel = model as IVisionChatModal const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption) if (messageContent?.length) { - // Change model to gpt-4-vision - model.modelName = 'gpt-4-vision-preview' - - // Change default max token to higher when using gpt-4-vision - model.maxTokens = 1024 - + // Change model to vision supported + visionChatModel.setVisionModel() const oldTemplate = prompt.template as string - chatPromptTemplate = ChatPromptTemplate.fromMessages([HumanMessagePromptTemplate.fromTemplate(oldTemplate)]) - chatPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent })) + + const msg = HumanMessagePromptTemplate.fromTemplate([ + ...messageContent, + { + text: oldTemplate + } + ]) + msg.inputVariables = prompt.inputVariables + chatPromptTemplate = ChatPromptTemplate.fromMessages([msg]) } else { // revert to previous values if image upload is empty - model.modelName = model.configuredModel - model.maxTokens = model.configuredMaxToken + visionChatModel.revertToOriginalModel() } } diff --git a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts index 25d80bee..0d572eca 100644 --- a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts +++ b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts @@ -2,15 +2,16 @@ import { ConversationChain } from 'langchain/chains' import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from '@langchain/core/prompts' import { RunnableSequence } from '@langchain/core/runnables' import { StringOutputParser } from '@langchain/core/output_parsers' -import { HumanMessage } from '@langchain/core/messages' import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console' import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' import { formatResponse } from '../../outputparsers/OutputParserHelpers' -import { addImagesToMessages } from '../../../src/multiModalUtils' +import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils' import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' -import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, MessageContentImageUrl } from '../../../src/Interface' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' +import { IVisionChatModal } from '../../../src/IVisionChatModal' +import { MessageContent } from 'llamaindex' let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.` const inputKey = 'input' @@ -145,7 +146,7 @@ class ConversationChain_Chains implements INode { } } -const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage[]) => { +const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: MessageContentImageUrl[]) => { const memory = nodeData.inputs?.memory as FlowiseMemory const prompt = nodeData.inputs?.systemMessagePrompt as string const chatPromptTemplate = nodeData.inputs?.chatPromptTemplate as ChatPromptTemplate @@ -154,7 +155,6 @@ const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage const sysPrompt = chatPromptTemplate.promptMessages[0] const humanPrompt = chatPromptTemplate.promptMessages[chatPromptTemplate.promptMessages.length - 1] const messages = [sysPrompt, new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), humanPrompt] - if (humanImageMessages.length) messages.push(...humanImageMessages) const chatPrompt = ChatPromptTemplate.fromMessages(messages) if ((chatPromptTemplate as any).promptValues) { @@ -168,9 +168,8 @@ const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage const messages = [ SystemMessagePromptTemplate.fromTemplate(prompt ? prompt : systemMessage), new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), - HumanMessagePromptTemplate.fromTemplate(`{${inputKey}}`) + HumanMessagePromptTemplate.fromTemplate([`{${inputKey}}`, ...humanImageMessages]) ] - if (humanImageMessages.length) messages.push(...(humanImageMessages as any[])) const chatPrompt = ChatPromptTemplate.fromMessages(messages) @@ -183,28 +182,19 @@ const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: s const memory = nodeData.inputs?.memory as FlowiseMemory const memoryKey = memory.memoryKey ?? 'chat_history' - let humanImageMessages: HumanMessage[] = [] - if (model instanceof ChatOpenAI) { - const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption) - + let messageContent: MessageContentImageUrl[] = [] + if (llmSupportsVision(model)) { + messageContent = addImagesToMessages(nodeData, options, model.multiModalOption) + const visionChatModel = model as IVisionChatModal if (messageContent?.length) { - // Change model to gpt-4-vision - model.modelName = 'gpt-4-vision-preview' - - // Change default max token to higher when using gpt-4-vision - model.maxTokens = 1024 - - for (const msg of messageContent) { - humanImageMessages.push(new HumanMessage({ content: [msg] })) - } + visionChatModel.setVisionModel() } else { // revert to previous values if image upload is empty - model.modelName = model.configuredModel - model.maxTokens = model.configuredMaxToken + visionChatModel.revertToOriginalModel() } } - const chatPrompt = prepareChatPrompt(nodeData, humanImageMessages) + const chatPrompt = prepareChatPrompt(nodeData, messageContent) let promptVariables = {} const promptValuesRaw = (chatPrompt as any).promptValues if (promptValuesRaw) { @@ -228,7 +218,7 @@ const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: s }, ...promptVariables }, - prepareChatPrompt(nodeData, humanImageMessages), + prepareChatPrompt(nodeData, messageContent), model, new StringOutputParser() ]) diff --git a/packages/components/nodes/chains/LLMChain/LLMChain.ts b/packages/components/nodes/chains/LLMChain/LLMChain.ts index 6adee1e1..a70a308e 100644 --- a/packages/components/nodes/chains/LLMChain/LLMChain.ts +++ b/packages/components/nodes/chains/LLMChain/LLMChain.ts @@ -1,16 +1,22 @@ -import { BaseLanguageModel, BaseLanguageModelCallOptions } from '@langchain/core/language_models/base' -import { BaseLLMOutputParser, BaseOutputParser } from '@langchain/core/output_parsers' -import { HumanMessage } from '@langchain/core/messages' -import { ChatPromptTemplate, FewShotPromptTemplate, PromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts' -import { OutputFixingParser } from 'langchain/output_parsers' -import { LLMChain } from 'langchain/chains' -import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' -import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' -import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' -import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' -import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers' -import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' -import { addImagesToMessages } from '../../../src/multiModalUtils' +import { BaseLanguageModel, BaseLanguageModelCallOptions } from "@langchain/core/language_models/base"; +import { BaseLLMOutputParser, BaseOutputParser } from "@langchain/core/output_parsers"; +import { HumanMessage } from "@langchain/core/messages"; +import { + ChatPromptTemplate, + FewShotPromptTemplate, + HumanMessagePromptTemplate, + PromptTemplate +} from "@langchain/core/prompts"; +import { OutputFixingParser } from "langchain/output_parsers"; +import { LLMChain } from "langchain/chains"; +import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from "../../../src/Interface"; +import { additionalCallbacks, ConsoleCallbackHandler, CustomChainHandler } from "../../../src/handler"; +import { getBaseClasses, handleEscapeCharacters } from "../../../src/utils"; +import { checkInputs, Moderation, streamResponse } from "../../moderation/Moderation"; +import { formatResponse, injectOutputParser } from "../../outputparsers/OutputParserHelpers"; +import { ChatOpenAI } from "../../chatmodels/ChatOpenAI/FlowiseChatOpenAI"; +import { addImagesToMessages, llmSupportsVision } from "../../../src/multiModalUtils"; +import { IVisionChatModal } from "../../../src/IVisionChatModal"; class LLMChain_Chains implements INode { label: string @@ -183,24 +189,39 @@ const runPrediction = async ( * TO: { "value": "hello i am ben\n\n\thow are you?" } */ const promptValues = handleEscapeCharacters(promptValuesRaw, true) - const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption) - if (chain.llm instanceof ChatOpenAI) { - const chatOpenAI = chain.llm as ChatOpenAI + if (llmSupportsVision(chain.llm)) { + const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption) + const visionChatModel = chain.llm as IVisionChatModal if (messageContent?.length) { // Change model to gpt-4-vision && max token to higher when using gpt-4-vision - chatOpenAI.modelName = 'gpt-4-vision-preview' - chatOpenAI.maxTokens = 1024 + visionChatModel.setVisionModel() // Add image to the message if (chain.prompt instanceof PromptTemplate) { const existingPromptTemplate = chain.prompt.template as string - let newChatPromptTemplate = ChatPromptTemplate.fromMessages([ - HumanMessagePromptTemplate.fromTemplate(existingPromptTemplate) + const msg = HumanMessagePromptTemplate.fromTemplate([ + ...messageContent, + { + text: existingPromptTemplate + } ]) - newChatPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent })) - chain.prompt = newChatPromptTemplate + msg.inputVariables = chain.prompt.inputVariables + chain.prompt = ChatPromptTemplate.fromMessages([msg]) } else if (chain.prompt instanceof ChatPromptTemplate) { - chain.prompt.promptMessages.push(new HumanMessage({ content: messageContent })) + if (chain.prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) { + const lastMessage = chain.prompt.promptMessages.pop() as HumanMessagePromptTemplate + const template = (lastMessage.prompt as PromptTemplate).template as string + const msg = HumanMessagePromptTemplate.fromTemplate([ + ...messageContent, + { + text: template + } + ]) + msg.inputVariables = lastMessage.inputVariables + chain.prompt.promptMessages.push(msg) + } else { + chain.prompt.promptMessages.push(new HumanMessage({ content: messageContent })) + } } else if (chain.prompt instanceof FewShotPromptTemplate) { let existingFewShotPromptTemplate = chain.prompt.examplePrompt.template as string let newFewShotPromptTemplate = ChatPromptTemplate.fromMessages([ @@ -212,8 +233,7 @@ const runPrediction = async ( } } else { // revert to previous values if image upload is empty - chatOpenAI.modelName = model.configuredModel - chatOpenAI.maxTokens = model.configuredMaxToken + visionChatModel.revertToOriginalModel() } } diff --git a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts index 844e7d25..392107a2 100644 --- a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts +++ b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts @@ -1,8 +1,9 @@ -import { AnthropicInput, ChatAnthropic } from '@langchain/anthropic' +import { AnthropicInput, ChatAnthropic as LangchainChatAnthropic } from '@langchain/anthropic' import { BaseCache } from '@langchain/core/caches' import { BaseLLMParams } from '@langchain/core/language_models/llms' -import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { ICommonObject, IMultiModalOption, INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { ChatAnthropic } from './FlowiseChatAntrhopic' class ChatAnthropic_ChatModels implements INode { label: string @@ -19,12 +20,12 @@ class ChatAnthropic_ChatModels implements INode { constructor() { this.label = 'ChatAnthropic' this.name = 'chatAnthropic' - this.version = 3.0 + this.version = 4.0 this.type = 'ChatAnthropic' this.icon = 'Anthropic.svg' this.category = 'Chat Models' this.description = 'Wrapper around ChatAnthropic large language models that use the Chat endpoint' - this.baseClasses = [this.type, ...getBaseClasses(ChatAnthropic)] + this.baseClasses = [this.type, ...getBaseClasses(LangchainChatAnthropic)] this.credential = { label: 'Connect Credential', name: 'credential', @@ -147,6 +148,15 @@ class ChatAnthropic_ChatModels implements INode { step: 0.1, optional: true, additionalParams: true + }, + { + label: 'Allow Image Uploads', + name: 'allowImageUploads', + type: 'boolean', + description: + 'Automatically uses claude-3-* models when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent', + default: false, + optional: true } ] } @@ -163,6 +173,8 @@ class ChatAnthropic_ChatModels implements INode { const credentialData = await getCredentialData(nodeData.credential ?? '', options) const anthropicApiKey = getCredentialParam('anthropicApiKey', credentialData, nodeData) + const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean + const obj: Partial & BaseLLMParams & { anthropicApiKey?: string } = { temperature: parseFloat(temperature), modelName, @@ -175,7 +187,14 @@ class ChatAnthropic_ChatModels implements INode { if (topK) obj.topK = parseFloat(topK) if (cache) obj.cache = cache - const model = new ChatAnthropic(obj) + const multiModalOption: IMultiModalOption = { + image: { + allowImageUploads: allowImageUploads ?? false + } + } + + const model = new ChatAnthropic(nodeData.id, obj) + model.setMultiModalOption(multiModalOption) return model } } diff --git a/packages/components/nodes/chatmodels/ChatAnthropic/FlowiseChatAntrhopic.ts b/packages/components/nodes/chatmodels/ChatAnthropic/FlowiseChatAntrhopic.ts new file mode 100644 index 00000000..d52e9900 --- /dev/null +++ b/packages/components/nodes/chatmodels/ChatAnthropic/FlowiseChatAntrhopic.ts @@ -0,0 +1,34 @@ +import { AnthropicInput, ChatAnthropic as LangchainChatAnthropic } from '@langchain/anthropic' +import { IMultiModalOption } from '../../../src' +import { IVisionChatModal } from '../../../src/IVisionChatModal' +import { BaseLLMParams } from '@langchain/core/language_models/llms' + +export class ChatAnthropic extends LangchainChatAnthropic implements IVisionChatModal { + configuredModel: string + configuredMaxToken: number + multiModalOption: IMultiModalOption + id: string + + constructor(id: string, fields: Partial & BaseLLMParams & { anthropicApiKey?: string }) { + super(fields) + this.id = id + this.configuredModel = fields?.modelName || 'claude-3-opus-20240229' + this.configuredMaxToken = fields?.maxTokens ?? 256 + } + + revertToOriginalModel(): void { + super.modelName = this.configuredModel + super.maxTokens = this.configuredMaxToken + } + + setMultiModalOption(multiModalOption: IMultiModalOption): void { + this.multiModalOption = multiModalOption + } + + setVisionModel(): void { + if (!this.modelName.startsWith('claude-3')) { + super.modelName = 'claude-3-opus-20240229' + super.maxTokens = 1024 + } + } +} diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts index cc0b0efa..09b8c5b3 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts @@ -228,7 +228,7 @@ class ChatOpenAI_ChatModels implements INode { const obj: Partial & Partial & - BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput; multiModalOption?: IMultiModalOption } = { + BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput } = { temperature: parseFloat(temperature), modelName, openAIApiKey, @@ -265,10 +265,9 @@ class ChatOpenAI_ChatModels implements INode { imageResolution } } - obj.multiModalOption = multiModalOption const model = new ChatOpenAI(nodeData.id, obj) - + model.setMultiModalOption(multiModalOption) return model } } diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts index 9049bb79..b00811d5 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts @@ -1,39 +1,40 @@ import type { ClientOptions } from 'openai' -import { - ChatOpenAI as LangchainChatOpenAI, - OpenAIChatInput, - LegacyOpenAIInput, - AzureOpenAIInput, - ChatOpenAICallOptions -} from '@langchain/openai' +import { ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput, LegacyOpenAIInput, AzureOpenAIInput } from '@langchain/openai' import { BaseChatModelParams } from '@langchain/core/language_models/chat_models' -import { BaseMessageLike } from '@langchain/core/messages' -import { Callbacks } from '@langchain/core/callbacks/manager' -import { LLMResult } from '@langchain/core/outputs' import { IMultiModalOption } from '../../../src' +import { IVisionChatModal } from '../../../src/IVisionChatModal' -export class ChatOpenAI extends LangchainChatOpenAI { +export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal { configuredModel: string - configuredMaxToken?: number - multiModalOption?: IMultiModalOption + configuredMaxToken: number + multiModalOption: IMultiModalOption id: string constructor( id: string, fields?: Partial & Partial & - BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput; multiModalOption?: IMultiModalOption }, + BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput }, /** @deprecated */ configuration?: ClientOptions & LegacyOpenAIInput ) { super(fields, configuration) this.id = id - this.multiModalOption = fields?.multiModalOption this.configuredModel = fields?.modelName ?? 'gpt-3.5-turbo' - this.configuredMaxToken = fields?.maxTokens + this.configuredMaxToken = fields?.maxTokens ?? 256 } - async generate(messages: BaseMessageLike[][], options?: string[] | ChatOpenAICallOptions, callbacks?: Callbacks): Promise { - return super.generate(messages, options, callbacks) + revertToOriginalModel(): void { + super.modelName = this.configuredModel + super.maxTokens = this.configuredMaxToken + } + + setMultiModalOption(multiModalOption: IMultiModalOption): void { + this.multiModalOption = multiModalOption + } + + setVisionModel(): void { + super.modelName = 'gpt-4-vision-preview' + super.maxTokens = 1024 } } diff --git a/packages/components/package.json b/packages/components/package.json index 69ca3b5f..30fe1c8c 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -29,7 +29,7 @@ "@google-ai/generativelanguage": "^0.2.1", "@google/generative-ai": "^0.1.3", "@huggingface/inference": "^2.6.1", - "@langchain/anthropic": "^0.0.10", + "@langchain/anthropic": "^0.1.4", "@langchain/cohere": "^0.0.5", "@langchain/community": "^0.0.30", "@langchain/google-genai": "^0.0.10", diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 68e1709d..8fd5f451 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1534,7 +1534,7 @@ export class App { if (!chatflow) return `Chatflow ${chatflowid} not found` const uploadAllowedNodes = ['llmChain', 'conversationChain', 'mrklAgentChat', 'conversationalAgent'] - const uploadProcessingNodes = ['chatOpenAI', 'azureChatOpenAI'] + const uploadProcessingNodes = ['chatOpenAI', 'chatAnthropic'] const flowObj = JSON.parse(chatflow.flowData) const imgUploadSizeAndTypes: IUploadFileSizeAndTypes[] = [] From 7ab96a4c394e1d972092534bed9f56b9e8c46a3e Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Thu, 7 Mar 2024 18:55:47 +0530 Subject: [PATCH 11/25] start of changes to AzureOpenAI to support Image uploads --- .../AzureChatOpenAI/AzureChatOpenAI.ts | 77 +++++++++---------- 1 file changed, 38 insertions(+), 39 deletions(-) diff --git a/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts index da1b69ef..155469ef 100644 --- a/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts @@ -1,7 +1,7 @@ import { AzureOpenAIInput, ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput } from '@langchain/openai' import { BaseCache } from '@langchain/core/caches' import { BaseLLMParams } from '@langchain/core/language_models/llms' -import { ICommonObject, IMultiModalOption, INode, INodeData, INodeParams } from "../../../src/Interface"; +import { ICommonObject, IMultiModalOption, INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { ChatOpenAI } from '../ChatOpenAI/FlowiseChatOpenAI' @@ -103,38 +103,38 @@ class AzureChatOpenAI_ChatModels implements INode { step: 1, optional: true, additionalParams: true - }, - { - label: 'Allow Image Uploads', - name: 'allowImageUploads', - type: 'boolean', - description: - 'Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent', - default: false, - optional: true - }, - { - label: 'Image Resolution', - description: 'This parameter controls the resolution in which the model views the image.', - name: 'imageResolution', - type: 'options', - options: [ - { - label: 'Low', - name: 'low' - }, - { - label: 'High', - name: 'high' - }, - { - label: 'Auto', - name: 'auto' - } - ], - default: 'low', - optional: false, - additionalParams: true + // }, + // { + // label: 'Allow Image Uploads', + // name: 'allowImageUploads', + // type: 'boolean', + // description: + // 'Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent', + // default: false, + // optional: true + // }, + // { + // label: 'Image Resolution', + // description: 'This parameter controls the resolution in which the model views the image.', + // name: 'imageResolution', + // type: 'options', + // options: [ + // { + // label: 'Low', + // name: 'low' + // }, + // { + // label: 'High', + // name: 'high' + // }, + // { + // label: 'Auto', + // name: 'auto' + // } + // ], + // default: 'low', + // optional: false, + // additionalParams: true } ] } @@ -155,10 +155,10 @@ class AzureChatOpenAI_ChatModels implements INode { const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData) const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData) - const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean - const imageResolution = nodeData.inputs?.imageResolution as string + // const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean + // const imageResolution = nodeData.inputs?.imageResolution as string - const obj: Partial & BaseLLMParams & Partial & { multiModalOption?: IMultiModalOption } = { + const obj: Partial & BaseLLMParams & Partial = { temperature: parseFloat(temperature), modelName, azureOpenAIApiKey, @@ -176,13 +176,12 @@ class AzureChatOpenAI_ChatModels implements INode { const multiModalOption: IMultiModalOption = { image: { - allowImageUploads: allowImageUploads ?? false, - imageResolution + allowImageUploads: false, } } - obj.multiModalOption = multiModalOption const model = new ChatOpenAI(nodeData.id, obj) + model.setMultiModalOption(multiModalOption) return model } } From 8c1a68a37ebcb31a63fe0c8e5e2f9a9ffc896f45 Mon Sep 17 00:00:00 2001 From: automaton82 Date: Thu, 7 Mar 2024 09:15:13 -0500 Subject: [PATCH 12/25] Fix for 1881 sorting for Flowise QnA --- packages/server/src/index.ts | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index ab405e35..e31d8b08 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1371,13 +1371,12 @@ export class App { } templates.push(template) }) - const FlowiseDocsQnA = templates.find((tmp) => tmp.name === 'Flowise Docs QnA') - const FlowiseDocsQnAIndex = templates.findIndex((tmp) => tmp.name === 'Flowise Docs QnA') - if (FlowiseDocsQnA && FlowiseDocsQnAIndex > 0) { - templates.splice(FlowiseDocsQnAIndex, 1) - templates.unshift(FlowiseDocsQnA) + const sortedTemplates = templates.sort((a, b) => a.templateName.localeCompare(b.templateName)) + const FlowiseDocsQnAIndex = sortedTemplates.findIndex((tmp) => tmp.templateName === 'Flowise Docs QnA'); + if (FlowiseDocsQnAIndex > 0) { + sortedTemplates.unshift(sortedTemplates.splice(FlowiseDocsQnAIndex, 1)[0]); } - return res.json(templates.sort((a, b) => a.templateName.localeCompare(b.templateName))) + return res.json(sortedTemplates) }) // ---------------------------------------- From 2b0ca6068676fe4c2657844d7ef28a670509f89d Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Thu, 7 Mar 2024 20:09:05 +0530 Subject: [PATCH 13/25] lint fixes --- .../ConversationalAgent.ts | 12 ++---- .../ConversationChain/ConversationChain.ts | 1 - .../nodes/chains/LLMChain/LLMChain.ts | 33 +++++++---------- .../AzureChatOpenAI/AzureChatOpenAI.ts | 37 +------------------ 4 files changed, 18 insertions(+), 65 deletions(-) diff --git a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts index 62c46878..6bb042b6 100644 --- a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts +++ b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts @@ -4,21 +4,15 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models' import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages' import { ChainValues } from '@langchain/core/utils/types' import { AgentStep } from '@langchain/core/agents' -import { - renderTemplate, - MessagesPlaceholder, - HumanMessagePromptTemplate, - PromptTemplate -} from "@langchain/core/prompts"; +import { renderTemplate, MessagesPlaceholder, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts' import { RunnableSequence } from '@langchain/core/runnables' import { ChatConversationalAgent } from 'langchain/agents' import { getBaseClasses } from '../../../src/utils' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' import { AgentExecutor } from '../../../src/agents' -import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' -import { addImagesToMessages, llmSupportsVision } from "../../../src/multiModalUtils"; -import { IVisionChatModal } from "../../../src/IVisionChatModal"; +import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils' +import { IVisionChatModal } from '../../../src/IVisionChatModal' const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI. diff --git a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts index 0d572eca..814fc1cc 100644 --- a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts +++ b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts @@ -11,7 +11,6 @@ import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, MessageCon import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' import { IVisionChatModal } from '../../../src/IVisionChatModal' -import { MessageContent } from 'llamaindex' let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.` const inputKey = 'input' diff --git a/packages/components/nodes/chains/LLMChain/LLMChain.ts b/packages/components/nodes/chains/LLMChain/LLMChain.ts index a70a308e..738a8c4b 100644 --- a/packages/components/nodes/chains/LLMChain/LLMChain.ts +++ b/packages/components/nodes/chains/LLMChain/LLMChain.ts @@ -1,22 +1,17 @@ -import { BaseLanguageModel, BaseLanguageModelCallOptions } from "@langchain/core/language_models/base"; -import { BaseLLMOutputParser, BaseOutputParser } from "@langchain/core/output_parsers"; -import { HumanMessage } from "@langchain/core/messages"; -import { - ChatPromptTemplate, - FewShotPromptTemplate, - HumanMessagePromptTemplate, - PromptTemplate -} from "@langchain/core/prompts"; -import { OutputFixingParser } from "langchain/output_parsers"; -import { LLMChain } from "langchain/chains"; -import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from "../../../src/Interface"; -import { additionalCallbacks, ConsoleCallbackHandler, CustomChainHandler } from "../../../src/handler"; -import { getBaseClasses, handleEscapeCharacters } from "../../../src/utils"; -import { checkInputs, Moderation, streamResponse } from "../../moderation/Moderation"; -import { formatResponse, injectOutputParser } from "../../outputparsers/OutputParserHelpers"; -import { ChatOpenAI } from "../../chatmodels/ChatOpenAI/FlowiseChatOpenAI"; -import { addImagesToMessages, llmSupportsVision } from "../../../src/multiModalUtils"; -import { IVisionChatModal } from "../../../src/IVisionChatModal"; +import { BaseLanguageModel, BaseLanguageModelCallOptions } from '@langchain/core/language_models/base' +import { BaseLLMOutputParser, BaseOutputParser } from '@langchain/core/output_parsers' +import { HumanMessage } from '@langchain/core/messages' +import { ChatPromptTemplate, FewShotPromptTemplate, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts' +import { OutputFixingParser } from 'langchain/output_parsers' +import { LLMChain } from 'langchain/chains' +import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { additionalCallbacks, ConsoleCallbackHandler, CustomChainHandler } from '../../../src/handler' +import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' +import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' +import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers' +import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' +import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils' +import { IVisionChatModal } from '../../../src/IVisionChatModal' class LLMChain_Chains implements INode { label: string diff --git a/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts index 155469ef..785bd3c5 100644 --- a/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts @@ -103,38 +103,6 @@ class AzureChatOpenAI_ChatModels implements INode { step: 1, optional: true, additionalParams: true - // }, - // { - // label: 'Allow Image Uploads', - // name: 'allowImageUploads', - // type: 'boolean', - // description: - // 'Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent', - // default: false, - // optional: true - // }, - // { - // label: 'Image Resolution', - // description: 'This parameter controls the resolution in which the model views the image.', - // name: 'imageResolution', - // type: 'options', - // options: [ - // { - // label: 'Low', - // name: 'low' - // }, - // { - // label: 'High', - // name: 'high' - // }, - // { - // label: 'Auto', - // name: 'auto' - // } - // ], - // default: 'low', - // optional: false, - // additionalParams: true } ] } @@ -155,9 +123,6 @@ class AzureChatOpenAI_ChatModels implements INode { const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData) const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData) - // const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean - // const imageResolution = nodeData.inputs?.imageResolution as string - const obj: Partial & BaseLLMParams & Partial = { temperature: parseFloat(temperature), modelName, @@ -176,7 +141,7 @@ class AzureChatOpenAI_ChatModels implements INode { const multiModalOption: IMultiModalOption = { image: { - allowImageUploads: false, + allowImageUploads: false } } From 3fda7973bbfc795609b3cfc94d938b236e314e1c Mon Sep 17 00:00:00 2001 From: automaton82 Date: Thu, 7 Mar 2024 09:45:28 -0500 Subject: [PATCH 14/25] Removing semicolons --- packages/server/src/index.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index e31d8b08..094e3812 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1372,9 +1372,9 @@ export class App { templates.push(template) }) const sortedTemplates = templates.sort((a, b) => a.templateName.localeCompare(b.templateName)) - const FlowiseDocsQnAIndex = sortedTemplates.findIndex((tmp) => tmp.templateName === 'Flowise Docs QnA'); + const FlowiseDocsQnAIndex = sortedTemplates.findIndex((tmp) => tmp.templateName === 'Flowise Docs QnA') if (FlowiseDocsQnAIndex > 0) { - sortedTemplates.unshift(sortedTemplates.splice(FlowiseDocsQnAIndex, 1)[0]); + sortedTemplates.unshift(sortedTemplates.splice(FlowiseDocsQnAIndex, 1)[0]) } return res.json(sortedTemplates) }) From bce7ff9ada6db56bfda729fdbc446d335d6d8537 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Fri, 8 Mar 2024 17:59:54 +0530 Subject: [PATCH 15/25] refactoring of interface into the common interface.ts file and misc changes --- .../ConversationalAgent/ConversationalAgent.ts | 9 +-------- .../nodes/agents/MRKLAgentChat/MRKLAgentChat.ts | 3 +-- .../chains/ConversationChain/ConversationChain.ts | 11 +++++++++-- .../components/nodes/chains/LLMChain/LLMChain.ts | 3 +-- .../chatmodels/ChatAnthropic/FlowiseChatAntrhopic.ts | 3 +-- .../nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts | 3 +-- packages/components/src/IVisionChatModal.ts | 12 ------------ packages/components/src/Interface.ts | 11 +++++++++++ packages/components/src/multiModalUtils.ts | 3 +-- 9 files changed, 26 insertions(+), 32 deletions(-) delete mode 100644 packages/components/src/IVisionChatModal.ts diff --git a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts index 6bb042b6..14361e43 100644 --- a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts +++ b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts @@ -9,10 +9,9 @@ import { RunnableSequence } from '@langchain/core/runnables' import { ChatConversationalAgent } from 'langchain/agents' import { getBaseClasses } from '../../../src/utils' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' -import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' +import { IVisionChatModal, FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' import { AgentExecutor } from '../../../src/agents' import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils' -import { IVisionChatModal } from '../../../src/IVisionChatModal' const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI. @@ -158,10 +157,6 @@ const prepareAgent = async ( if (messageContent?.length) { visionChatModel.setVisionModel() - // for (const msg of messageContent) { - // humanImageMessages.push(new HumanMessage({ content: [msg] })) - // } - // Pop the `agent_scratchpad` MessagePlaceHolder let messagePlaceholder = prompt.promptMessages.pop() as MessagesPlaceholder if (prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) { @@ -176,8 +171,6 @@ const prepareAgent = async ( msg.inputVariables = lastMessage.inputVariables prompt.promptMessages.push(msg) } - // Add the HumanMessage for images - //prompt.promptMessages.push(...humanImageMessages) // Add the `agent_scratchpad` MessagePlaceHolder back prompt.promptMessages.push(messagePlaceholder) diff --git a/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts b/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts index e1f16fd8..f0466e08 100644 --- a/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts +++ b/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts @@ -6,11 +6,10 @@ import type { PromptTemplate } from '@langchain/core/prompts' import { BaseChatModel } from '@langchain/core/language_models/chat_models' import { pull } from 'langchain/hub' import { additionalCallbacks } from '../../../src/handler' -import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' +import { IVisionChatModal, FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' import { createReactAgent } from '../../../src/agents' import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils' -import { IVisionChatModal } from '../../../src/IVisionChatModal' class MRKLAgentChat_Agents implements INode { label: string diff --git a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts index 814fc1cc..ffea16f5 100644 --- a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts +++ b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts @@ -7,10 +7,17 @@ import { checkInputs, Moderation, streamResponse } from '../../moderation/Modera import { formatResponse } from '../../outputparsers/OutputParserHelpers' import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils' import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' -import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, MessageContentImageUrl } from '../../../src/Interface' +import { + IVisionChatModal, + FlowiseMemory, + ICommonObject, + INode, + INodeData, + INodeParams, + MessageContentImageUrl +} from '../../../src/Interface' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' -import { IVisionChatModal } from '../../../src/IVisionChatModal' let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.` const inputKey = 'input' diff --git a/packages/components/nodes/chains/LLMChain/LLMChain.ts b/packages/components/nodes/chains/LLMChain/LLMChain.ts index 738a8c4b..dc59760c 100644 --- a/packages/components/nodes/chains/LLMChain/LLMChain.ts +++ b/packages/components/nodes/chains/LLMChain/LLMChain.ts @@ -4,14 +4,13 @@ import { HumanMessage } from '@langchain/core/messages' import { ChatPromptTemplate, FewShotPromptTemplate, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts' import { OutputFixingParser } from 'langchain/output_parsers' import { LLMChain } from 'langchain/chains' -import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { IVisionChatModal, ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' import { additionalCallbacks, ConsoleCallbackHandler, CustomChainHandler } from '../../../src/handler' import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers' import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils' -import { IVisionChatModal } from '../../../src/IVisionChatModal' class LLMChain_Chains implements INode { label: string diff --git a/packages/components/nodes/chatmodels/ChatAnthropic/FlowiseChatAntrhopic.ts b/packages/components/nodes/chatmodels/ChatAnthropic/FlowiseChatAntrhopic.ts index d52e9900..05665d1f 100644 --- a/packages/components/nodes/chatmodels/ChatAnthropic/FlowiseChatAntrhopic.ts +++ b/packages/components/nodes/chatmodels/ChatAnthropic/FlowiseChatAntrhopic.ts @@ -1,6 +1,5 @@ import { AnthropicInput, ChatAnthropic as LangchainChatAnthropic } from '@langchain/anthropic' -import { IMultiModalOption } from '../../../src' -import { IVisionChatModal } from '../../../src/IVisionChatModal' +import { IVisionChatModal, IMultiModalOption } from '../../../src' import { BaseLLMParams } from '@langchain/core/language_models/llms' export class ChatAnthropic extends LangchainChatAnthropic implements IVisionChatModal { diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts index b00811d5..0227362c 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI.ts @@ -1,8 +1,7 @@ import type { ClientOptions } from 'openai' import { ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput, LegacyOpenAIInput, AzureOpenAIInput } from '@langchain/openai' import { BaseChatModelParams } from '@langchain/core/language_models/chat_models' -import { IMultiModalOption } from '../../../src' -import { IVisionChatModal } from '../../../src/IVisionChatModal' +import { IMultiModalOption, IVisionChatModal } from '../../../src' export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal { configuredModel: string diff --git a/packages/components/src/IVisionChatModal.ts b/packages/components/src/IVisionChatModal.ts deleted file mode 100644 index 482ff70b..00000000 --- a/packages/components/src/IVisionChatModal.ts +++ /dev/null @@ -1,12 +0,0 @@ -import { IMultiModalOption } from './Interface' - -export interface IVisionChatModal { - id: string - configuredModel: string - configuredMaxToken: number - multiModalOption: IMultiModalOption - - setVisionModel(): void - revertToOriginalModel(): void - setMultiModalOption(multiModalOption: IMultiModalOption): void -} diff --git a/packages/components/src/Interface.ts b/packages/components/src/Interface.ts index 0e280dea..798e91a2 100644 --- a/packages/components/src/Interface.ts +++ b/packages/components/src/Interface.ts @@ -270,3 +270,14 @@ export abstract class FlowiseSummaryMemory extends ConversationSummaryMemory imp abstract addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId?: string): Promise abstract clearChatMessages(overrideSessionId?: string): Promise } + +export interface IVisionChatModal { + id: string + configuredModel: string + configuredMaxToken: number + multiModalOption: IMultiModalOption + + setVisionModel(): void + revertToOriginalModel(): void + setMultiModalOption(multiModalOption: IMultiModalOption): void +} diff --git a/packages/components/src/multiModalUtils.ts b/packages/components/src/multiModalUtils.ts index 186e85ca..b3a51210 100644 --- a/packages/components/src/multiModalUtils.ts +++ b/packages/components/src/multiModalUtils.ts @@ -1,8 +1,7 @@ -import { ICommonObject, IFileUpload, IMultiModalOption, INodeData, MessageContentImageUrl } from './Interface' +import { IVisionChatModal, ICommonObject, IFileUpload, IMultiModalOption, INodeData, MessageContentImageUrl } from './Interface' import path from 'path' import { getStoragePath } from './utils' import fs from 'fs' -import { IVisionChatModal } from './IVisionChatModal' export const addImagesToMessages = ( nodeData: INodeData, From c35eb0b7e55dfd66fcc012a31708a98a2bb9d0fd Mon Sep 17 00:00:00 2001 From: Henry Date: Sat, 9 Mar 2024 17:01:22 +0800 Subject: [PATCH 16/25] add separate logic for conversation chain for openai vision --- .../ConversationalAgent.ts | 1 - .../ConversationChain/ConversationChain.ts | 43 ++++++++++++++++--- 2 files changed, 38 insertions(+), 6 deletions(-) diff --git a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts index 14361e43..404f4db4 100644 --- a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts +++ b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts @@ -151,7 +151,6 @@ const prepareAgent = async ( if (llmSupportsVision(model)) { const visionChatModel = model as IVisionChatModal - // let humanImageMessages: HumanMessage[] = [] const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption) if (messageContent?.length) { diff --git a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts index ffea16f5..16493adc 100644 --- a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts +++ b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts @@ -1,7 +1,16 @@ import { ConversationChain } from 'langchain/chains' -import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from '@langchain/core/prompts' +import { + ChatPromptTemplate, + HumanMessagePromptTemplate, + MessagesPlaceholder, + SystemMessagePromptTemplate, + BaseMessagePromptTemplateLike, + PromptTemplate +} from '@langchain/core/prompts' import { RunnableSequence } from '@langchain/core/runnables' import { StringOutputParser } from '@langchain/core/output_parsers' +import { BaseChatModel } from '@langchain/core/language_models/chat_models' +import { HumanMessage } from '@langchain/core/messages' import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console' import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' import { formatResponse } from '../../outputparsers/OutputParserHelpers' @@ -156,12 +165,29 @@ const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: MessageConte const memory = nodeData.inputs?.memory as FlowiseMemory const prompt = nodeData.inputs?.systemMessagePrompt as string const chatPromptTemplate = nodeData.inputs?.chatPromptTemplate as ChatPromptTemplate + let model = nodeData.inputs?.model as BaseChatModel if (chatPromptTemplate && chatPromptTemplate.promptMessages.length) { const sysPrompt = chatPromptTemplate.promptMessages[0] const humanPrompt = chatPromptTemplate.promptMessages[chatPromptTemplate.promptMessages.length - 1] const messages = [sysPrompt, new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), humanPrompt] + // OpenAI works better when separate images into standalone human messages + if (model instanceof ChatOpenAI && humanImageMessages.length) { + messages.push(new HumanMessage({ content: [...humanImageMessages] })) + } else if (humanImageMessages.length) { + const lastMessage = messages.pop() as HumanMessagePromptTemplate + const template = (lastMessage.prompt as PromptTemplate).template as string + const msg = HumanMessagePromptTemplate.fromTemplate([ + ...humanImageMessages, + { + text: template + } + ]) + msg.inputVariables = lastMessage.inputVariables + messages.push(msg) + } + const chatPrompt = ChatPromptTemplate.fromMessages(messages) if ((chatPromptTemplate as any).promptValues) { // @ts-ignore @@ -171,12 +197,19 @@ const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: MessageConte return chatPrompt } - const messages = [ + const messages: BaseMessagePromptTemplateLike[] = [ SystemMessagePromptTemplate.fromTemplate(prompt ? prompt : systemMessage), - new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), - HumanMessagePromptTemplate.fromTemplate([`{${inputKey}}`, ...humanImageMessages]) + new MessagesPlaceholder(memory.memoryKey ?? 'chat_history') ] + // OpenAI works better when separate images into standalone human messages + if (model instanceof ChatOpenAI && humanImageMessages.length) { + messages.push(HumanMessagePromptTemplate.fromTemplate(`{${inputKey}}`)) + messages.push(new HumanMessage({ content: [...humanImageMessages] })) + } else if (humanImageMessages.length) { + messages.push(HumanMessagePromptTemplate.fromTemplate([`{${inputKey}}`, ...humanImageMessages])) + } + const chatPrompt = ChatPromptTemplate.fromMessages(messages) return chatPrompt @@ -184,7 +217,7 @@ const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: MessageConte const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: string) => { const chatHistory = options.chatHistory - let model = nodeData.inputs?.model as ChatOpenAI + let model = nodeData.inputs?.model as BaseChatModel const memory = nodeData.inputs?.memory as FlowiseMemory const memoryKey = memory.memoryKey ?? 'chat_history' From 66a83f886a441931a1cc25d6a5a3970d7dbb17cc Mon Sep 17 00:00:00 2001 From: Henry Date: Sat, 9 Mar 2024 17:25:56 +0800 Subject: [PATCH 17/25] update model typecast --- packages/components/nodes/agents/XMLAgent/XMLAgent.ts | 4 ++-- packages/components/nodes/chains/LLMChain/LLMChain.ts | 4 +--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/packages/components/nodes/agents/XMLAgent/XMLAgent.ts b/packages/components/nodes/agents/XMLAgent/XMLAgent.ts index 49109947..476a40da 100644 --- a/packages/components/nodes/agents/XMLAgent/XMLAgent.ts +++ b/packages/components/nodes/agents/XMLAgent/XMLAgent.ts @@ -1,8 +1,8 @@ import { flatten } from 'lodash' import { ChainValues } from '@langchain/core/utils/types' import { AgentStep } from '@langchain/core/agents' +import { BaseChatModel } from '@langchain/core/language_models/chat_models' import { RunnableSequence } from '@langchain/core/runnables' -import { ChatOpenAI } from '@langchain/openai' import { Tool } from '@langchain/core/tools' import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts' import { XMLAgentOutputParser } from 'langchain/agents/xml/output_parser' @@ -139,7 +139,7 @@ const prepareAgent = async ( flowObj: { sessionId?: string; chatId?: string; input?: string }, chatHistory: IMessage[] = [] ) => { - const model = nodeData.inputs?.model as ChatOpenAI + const model = nodeData.inputs?.model as BaseChatModel const memory = nodeData.inputs?.memory as FlowiseMemory const systemMessage = nodeData.inputs?.systemMessage as string let tools = nodeData.inputs?.tools diff --git a/packages/components/nodes/chains/LLMChain/LLMChain.ts b/packages/components/nodes/chains/LLMChain/LLMChain.ts index dc59760c..fa0fd61c 100644 --- a/packages/components/nodes/chains/LLMChain/LLMChain.ts +++ b/packages/components/nodes/chains/LLMChain/LLMChain.ts @@ -9,7 +9,6 @@ import { additionalCallbacks, ConsoleCallbackHandler, CustomChainHandler } from import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers' -import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils' class LLMChain_Chains implements INode { @@ -164,7 +163,6 @@ const runPrediction = async ( const socketIO = isStreaming ? options.socketIO : undefined const socketIOClientId = isStreaming ? options.socketIOClientId : '' const moderations = nodeData.inputs?.inputModeration as Moderation[] - let model = nodeData.inputs?.model as ChatOpenAI if (moderations && moderations.length > 0) { try { @@ -185,8 +183,8 @@ const runPrediction = async ( const promptValues = handleEscapeCharacters(promptValuesRaw, true) if (llmSupportsVision(chain.llm)) { - const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption) const visionChatModel = chain.llm as IVisionChatModal + const messageContent = addImagesToMessages(nodeData, options, visionChatModel.multiModalOption) if (messageContent?.length) { // Change model to gpt-4-vision && max token to higher when using gpt-4-vision visionChatModel.setVisionModel() From 4b9a7c9b9b7d03868a08a673f955201822149e62 Mon Sep 17 00:00:00 2001 From: Henry Date: Sat, 9 Mar 2024 17:28:31 +0800 Subject: [PATCH 18/25] revert azure chat openai --- .../AzureChatOpenAI/AzureChatOpenAI.ts | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts index 785bd3c5..ea924fd0 100644 --- a/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts @@ -1,9 +1,8 @@ -import { AzureOpenAIInput, ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput } from '@langchain/openai' +import { AzureOpenAIInput, ChatOpenAI, OpenAIChatInput } from '@langchain/openai' import { BaseCache } from '@langchain/core/caches' import { BaseLLMParams } from '@langchain/core/language_models/llms' -import { ICommonObject, IMultiModalOption, INode, INodeData, INodeParams } from '../../../src/Interface' +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' -import { ChatOpenAI } from '../ChatOpenAI/FlowiseChatOpenAI' class AzureChatOpenAI_ChatModels implements INode { label: string @@ -20,12 +19,12 @@ class AzureChatOpenAI_ChatModels implements INode { constructor() { this.label = 'Azure ChatOpenAI' this.name = 'azureChatOpenAI' - this.version = 3.0 + this.version = 2.0 this.type = 'AzureChatOpenAI' this.icon = 'Azure.svg' this.category = 'Chat Models' this.description = 'Wrapper around Azure OpenAI large language models that use the Chat endpoint' - this.baseClasses = [this.type, ...getBaseClasses(LangchainChatOpenAI)] + this.baseClasses = [this.type, ...getBaseClasses(ChatOpenAI)] this.credential = { label: 'Connect Credential', name: 'credential', @@ -139,14 +138,7 @@ class AzureChatOpenAI_ChatModels implements INode { if (timeout) obj.timeout = parseInt(timeout, 10) if (cache) obj.cache = cache - const multiModalOption: IMultiModalOption = { - image: { - allowImageUploads: false - } - } - - const model = new ChatOpenAI(nodeData.id, obj) - model.setMultiModalOption(multiModalOption) + const model = new ChatOpenAI(obj) return model } } From d98ac8236abf973e5babf588a6eaf53930344162 Mon Sep 17 00:00:00 2001 From: Joe <1712833832@qq.com> Date: Sat, 9 Mar 2024 18:39:56 +0800 Subject: [PATCH 19/25] Fixed the issue of inconsistent date creation, if the running server and the database are in different regions, the generated date will be inconsistent, and the generated date will be inconsistent --- packages/server/src/index.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 094e3812..519a605b 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1652,6 +1652,8 @@ export class App { const newChatMessage = new ChatMessage() Object.assign(newChatMessage, chatMessage) + if (!newChatMessage.createdDate) newChatMessage.createdDate = new Date() + const chatmessage = this.AppDataSource.getRepository(ChatMessage).create(newChatMessage) return await this.AppDataSource.getRepository(ChatMessage).save(chatmessage) } From e04cfba7a53675e04845e0358e014daeac107b46 Mon Sep 17 00:00:00 2001 From: Henry Date: Sat, 9 Mar 2024 19:45:29 +0800 Subject: [PATCH 20/25] update marketplace template --- .../server/marketplaces/chatflows/Claude LLM.json | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/packages/server/marketplaces/chatflows/Claude LLM.json b/packages/server/marketplaces/chatflows/Claude LLM.json index fdce533e..dc5f3254 100644 --- a/packages/server/marketplaces/chatflows/Claude LLM.json +++ b/packages/server/marketplaces/chatflows/Claude LLM.json @@ -160,7 +160,7 @@ "data": { "id": "chatAnthropic_0", "label": "ChatAnthropic", - "version": 3, + "version": 4, "name": "chatAnthropic", "type": "ChatAnthropic", "baseClasses": ["ChatAnthropic", "BaseChatModel", "BaseLanguageModel", "Runnable"], @@ -288,6 +288,15 @@ "optional": true, "additionalParams": true, "id": "chatAnthropic_0-input-topK-number" + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Automatically uses claude-3-* models when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", + "default": false, + "optional": true, + "id": "chatAnthropic_0-input-allowImageUploads-boolean" } ], "inputAnchors": [ @@ -305,7 +314,8 @@ "temperature": 0.9, "maxTokensToSample": "", "topP": "", - "topK": "" + "topK": "", + "allowImageUploads": true }, "outputAnchors": [ { From 69e082e29faf1a6bb987c564ebc1f7451f55e28f Mon Sep 17 00:00:00 2001 From: Octavian FlowiseAI <154992625+ocflowiseai@users.noreply.github.com> Date: Sat, 9 Mar 2024 23:19:39 +0100 Subject: [PATCH 21/25] Add input moderation for all chains and agents --- .../agents/AirtableAgent/AirtableAgent.ts | 26 ++++++++++++-- .../nodes/agents/AutoGPT/AutoGPT.ts | 26 ++++++++++++-- .../nodes/agents/BabyAGI/BabyAGI.ts | 27 ++++++++++++-- .../nodes/agents/CSVAgent/CSVAgent.ts | 26 ++++++++++++-- .../ConversationalAgent.ts | 25 +++++++++++-- .../ConversationalRetrievalAgent.ts | 27 ++++++++++++-- .../agents/MRKLAgentChat/MRKLAgentChat.ts | 26 ++++++++++++-- .../nodes/agents/MRKLAgentLLM/MRKLAgentLLM.ts | 27 ++++++++++++-- .../OpenAIFunctionAgent.ts | 25 ++++++++++++- .../nodes/agents/XMLAgent/XMLAgent.ts | 25 +++++++++++-- .../nodes/chains/ApiChain/OpenAPIChain.ts | 26 ++++++++++++-- .../ConversationalRetrievalQAChain.ts | 23 +++++++++++- .../MultiPromptChain/MultiPromptChain.ts | 25 +++++++++++-- .../MultiRetrievalQAChain.ts | 24 +++++++++++-- .../RetrievalQAChain/RetrievalQAChain.ts | 25 +++++++++++-- .../SqlDatabaseChain/SqlDatabaseChain.ts | 25 +++++++++++-- .../nodes/chains/VectaraChain/VectaraChain.ts | 26 ++++++++++++-- .../chains/VectorDBQAChain/VectorDBQAChain.ts | 26 ++++++++++++-- .../chatflows/API Agent OpenAI.json | 24 +++++++++++-- .../marketplaces/chatflows/API Agent.json | 12 ++++++- .../marketplaces/chatflows/AutoGPT.json | 12 ++++++- .../marketplaces/chatflows/BabyAGI.json | 12 ++++++- .../marketplaces/chatflows/CSV Agent.json | 12 ++++++- .../chatflows/Chat with a Podcast.json | 12 ++++++- .../marketplaces/chatflows/ChatGPTPlugin.json | 12 ++++++- .../chatflows/Conversational Agent.json | 12 ++++++- .../Conversational Retrieval Agent.json | 12 ++++++- .../Conversational Retrieval QA Chain.json | 12 ++++++- .../chatflows/Flowise Docs QnA.json | 12 ++++++- .../marketplaces/chatflows/Local QnA.json | 12 ++++++- .../chatflows/Long Term Memory.json | 12 ++++++- .../chatflows/Metadata Filter.json | 12 ++++++- .../chatflows/Multi Prompt Chain.json | 12 ++++++- .../chatflows/Multi Retrieval QA Chain.json | 12 ++++++- .../chatflows/Multiple VectorDB.json | 36 +++++++++++++++++-- .../marketplaces/chatflows/OpenAI Agent.json | 12 ++++++- .../marketplaces/chatflows/ReAct Agent.json | 12 ++++++- .../marketplaces/chatflows/SQL DB Chain.json | 12 ++++++- .../chatflows/Vectara RAG Chain.json | 12 ++++++- .../marketplaces/chatflows/WebBrowser.json | 12 ++++++- .../marketplaces/chatflows/WebPage QnA.json | 12 ++++++- 41 files changed, 711 insertions(+), 61 deletions(-) diff --git a/packages/components/nodes/agents/AirtableAgent/AirtableAgent.ts b/packages/components/nodes/agents/AirtableAgent/AirtableAgent.ts index 3113cdfe..34352f6c 100644 --- a/packages/components/nodes/agents/AirtableAgent/AirtableAgent.ts +++ b/packages/components/nodes/agents/AirtableAgent/AirtableAgent.ts @@ -6,6 +6,8 @@ import { ICommonObject, INode, INodeData, INodeParams, PromptTemplate } from '.. import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { LoadPyodide, finalSystemPrompt, systemPrompt } from './core' +import { checkInputs, Moderation } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class Airtable_Agents implements INode { label: string @@ -22,7 +24,7 @@ class Airtable_Agents implements INode { constructor() { this.label = 'Airtable Agent' this.name = 'airtableAgent' - this.version = 1.0 + this.version = 2.0 this.type = 'AgentExecutor' this.category = 'Agents' this.icon = 'airtable.svg' @@ -71,6 +73,14 @@ class Airtable_Agents implements INode { default: 100, additionalParams: true, description: 'Number of results to return' + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -80,12 +90,24 @@ class Airtable_Agents implements INode { return undefined } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const model = nodeData.inputs?.model as BaseLanguageModel const baseId = nodeData.inputs?.baseId as string const tableId = nodeData.inputs?.tableId as string const returnAll = nodeData.inputs?.returnAll as boolean const limit = nodeData.inputs?.limit as string + const moderations = nodeData.inputs?.inputModeration as Moderation[] + + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the Vectara chain + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } const credentialData = await getCredentialData(nodeData.credential ?? '', options) const accessToken = getCredentialParam('accessToken', credentialData, nodeData) diff --git a/packages/components/nodes/agents/AutoGPT/AutoGPT.ts b/packages/components/nodes/agents/AutoGPT/AutoGPT.ts index 3689a7ea..4c1d962c 100644 --- a/packages/components/nodes/agents/AutoGPT/AutoGPT.ts +++ b/packages/components/nodes/agents/AutoGPT/AutoGPT.ts @@ -7,6 +7,8 @@ import { PromptTemplate } from '@langchain/core/prompts' import { AutoGPT } from 'langchain/experimental/autogpt' import { LLMChain } from 'langchain/chains' import { INode, INodeData, INodeParams } from '../../../src/Interface' +import { checkInputs, Moderation } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' type ObjectTool = StructuredTool const FINISH_NAME = 'finish' @@ -25,7 +27,7 @@ class AutoGPT_Agents implements INode { constructor() { this.label = 'AutoGPT' this.name = 'autoGPT' - this.version = 1.0 + this.version = 2.0 this.type = 'AutoGPT' this.category = 'Agents' this.icon = 'autogpt.svg' @@ -68,6 +70,14 @@ class AutoGPT_Agents implements INode { type: 'number', default: 5, optional: true + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -92,9 +102,21 @@ class AutoGPT_Agents implements INode { return autogpt } - async run(nodeData: INodeData, input: string): Promise { + async run(nodeData: INodeData, input: string): Promise { const executor = nodeData.instance as AutoGPT const model = nodeData.inputs?.model as BaseChatModel + const moderations = nodeData.inputs?.inputModeration as Moderation[] + + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the AutoGPT agent + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } try { let totalAssistantReply = '' diff --git a/packages/components/nodes/agents/BabyAGI/BabyAGI.ts b/packages/components/nodes/agents/BabyAGI/BabyAGI.ts index c70cd800..bfc910b7 100644 --- a/packages/components/nodes/agents/BabyAGI/BabyAGI.ts +++ b/packages/components/nodes/agents/BabyAGI/BabyAGI.ts @@ -2,6 +2,8 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models' import { VectorStore } from '@langchain/core/vectorstores' import { INode, INodeData, INodeParams } from '../../../src/Interface' import { BabyAGI } from './core' +import { checkInputs, Moderation } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class BabyAGI_Agents implements INode { label: string @@ -17,7 +19,7 @@ class BabyAGI_Agents implements INode { constructor() { this.label = 'BabyAGI' this.name = 'babyAGI' - this.version = 1.0 + this.version = 2.0 this.type = 'BabyAGI' this.category = 'Agents' this.icon = 'babyagi.svg' @@ -39,6 +41,14 @@ class BabyAGI_Agents implements INode { name: 'taskLoop', type: 'number', default: 3 + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -53,8 +63,21 @@ class BabyAGI_Agents implements INode { return babyAgi } - async run(nodeData: INodeData, input: string): Promise { + async run(nodeData: INodeData, input: string): Promise { const executor = nodeData.instance as BabyAGI + const moderations = nodeData.inputs?.inputModeration as Moderation[] + + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the BabyAGI agent + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } + const objective = input const res = await executor.call({ objective }) diff --git a/packages/components/nodes/agents/CSVAgent/CSVAgent.ts b/packages/components/nodes/agents/CSVAgent/CSVAgent.ts index f55981ab..428b02e2 100644 --- a/packages/components/nodes/agents/CSVAgent/CSVAgent.ts +++ b/packages/components/nodes/agents/CSVAgent/CSVAgent.ts @@ -5,6 +5,8 @@ import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from import { ICommonObject, INode, INodeData, INodeParams, PromptTemplate } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' import { LoadPyodide, finalSystemPrompt, systemPrompt } from './core' +import { checkInputs, Moderation } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class CSV_Agents implements INode { label: string @@ -20,7 +22,7 @@ class CSV_Agents implements INode { constructor() { this.label = 'CSV Agent' this.name = 'csvAgent' - this.version = 1.0 + this.version = 2.0 this.type = 'AgentExecutor' this.category = 'Agents' this.icon = 'CSVagent.svg' @@ -47,6 +49,14 @@ class CSV_Agents implements INode { optional: true, placeholder: 'I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". You will provide me with answers from the given info. If the answer is not included, say exactly "Hmm, I am not sure." and stop after that. Refuse to answer any question not about the info. Never break character.' + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -56,10 +66,22 @@ class CSV_Agents implements INode { return undefined } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const csvFileBase64 = nodeData.inputs?.csvFile as string const model = nodeData.inputs?.model as BaseLanguageModel const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string + const moderations = nodeData.inputs?.inputModeration as Moderation[] + + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the CSV agent + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } const loggerHandler = new ConsoleCallbackHandler(options.logger) const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId) diff --git a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts index db6b37c6..802ee6f8 100644 --- a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts +++ b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts @@ -13,6 +13,8 @@ import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } import { AgentExecutor } from '../../../src/agents' import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' import { addImagesToMessages } from '../../../src/multiModalUtils' +import { checkInputs, Moderation } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI. @@ -46,7 +48,7 @@ class ConversationalAgent_Agents implements INode { constructor(fields?: { sessionId?: string }) { this.label = 'Conversational Agent' this.name = 'conversationalAgent' - this.version = 2.0 + this.version = 3.0 this.type = 'AgentExecutor' this.category = 'Agents' this.icon = 'agent.svg' @@ -77,6 +79,14 @@ class ConversationalAgent_Agents implements INode { default: DEFAULT_PREFIX, optional: true, additionalParams: true + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] this.sessionId = fields?.sessionId @@ -86,9 +96,20 @@ class ConversationalAgent_Agents implements INode { return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory as FlowiseMemory + const moderations = nodeData.inputs?.inputModeration as Moderation[] + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the BabyAGI agent + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } const executor = await prepareAgent( nodeData, options, diff --git a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts index 36bc6807..c61c2544 100644 --- a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts +++ b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts @@ -10,6 +10,8 @@ import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } import { getBaseClasses } from '../../../src/utils' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { AgentExecutor, formatAgentSteps } from '../../../src/agents' +import { checkInputs, Moderation } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' const defaultMessage = `Do your best to answer the questions. Feel free to use any tools available to look up relevant information, only if necessary.` @@ -28,7 +30,7 @@ class ConversationalRetrievalAgent_Agents implements INode { constructor(fields?: { sessionId?: string }) { this.label = 'Conversational Retrieval Agent' this.name = 'conversationalRetrievalAgent' - this.version = 3.0 + this.version = 4.0 this.type = 'AgentExecutor' this.category = 'Agents' this.icon = 'agent.svg' @@ -59,6 +61,14 @@ class ConversationalRetrievalAgent_Agents implements INode { rows: 4, optional: true, additionalParams: true + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] this.sessionId = fields?.sessionId @@ -68,8 +78,21 @@ class ConversationalRetrievalAgent_Agents implements INode { return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory as FlowiseMemory + const moderations = nodeData.inputs?.inputModeration as Moderation[] + + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the BabyAGI agent + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } + const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) const loggerHandler = new ConsoleCallbackHandler(options.logger) diff --git a/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts b/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts index 5923d77e..a2d0f259 100644 --- a/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts +++ b/packages/components/nodes/agents/MRKLAgentChat/MRKLAgentChat.ts @@ -12,6 +12,8 @@ import { getBaseClasses } from '../../../src/utils' import { createReactAgent } from '../../../src/agents' import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' import { addImagesToMessages } from '../../../src/multiModalUtils' +import { checkInputs, Moderation } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class MRKLAgentChat_Agents implements INode { label: string @@ -28,7 +30,7 @@ class MRKLAgentChat_Agents implements INode { constructor(fields?: { sessionId?: string }) { this.label = 'ReAct Agent for Chat Models' this.name = 'mrklAgentChat' - this.version = 3.0 + this.version = 4.0 this.type = 'AgentExecutor' this.category = 'Agents' this.icon = 'agent.svg' @@ -50,6 +52,14 @@ class MRKLAgentChat_Agents implements INode { label: 'Memory', name: 'memory', type: 'BaseChatMemory' + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] this.sessionId = fields?.sessionId @@ -59,10 +69,22 @@ class MRKLAgentChat_Agents implements INode { return null } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory as FlowiseMemory const model = nodeData.inputs?.model as BaseChatModel let tools = nodeData.inputs?.tools as Tool[] + const moderations = nodeData.inputs?.inputModeration as Moderation[] + + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the ReAct Agent for Chat Models + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } tools = flatten(tools) const prompt = await pull('hwchase17/react-chat') diff --git a/packages/components/nodes/agents/MRKLAgentLLM/MRKLAgentLLM.ts b/packages/components/nodes/agents/MRKLAgentLLM/MRKLAgentLLM.ts index 452cf437..179885e3 100644 --- a/packages/components/nodes/agents/MRKLAgentLLM/MRKLAgentLLM.ts +++ b/packages/components/nodes/agents/MRKLAgentLLM/MRKLAgentLLM.ts @@ -8,6 +8,8 @@ import { additionalCallbacks } from '../../../src/handler' import { getBaseClasses } from '../../../src/utils' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { createReactAgent } from '../../../src/agents' +import { checkInputs, Moderation } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class MRKLAgentLLM_Agents implements INode { label: string @@ -23,7 +25,7 @@ class MRKLAgentLLM_Agents implements INode { constructor() { this.label = 'ReAct Agent for LLMs' this.name = 'mrklAgentLLM' - this.version = 1.0 + this.version = 2.0 this.type = 'AgentExecutor' this.category = 'Agents' this.icon = 'agent.svg' @@ -40,6 +42,14 @@ class MRKLAgentLLM_Agents implements INode { label: 'Language Model', name: 'model', type: 'BaseLanguageModel' + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -48,9 +58,22 @@ class MRKLAgentLLM_Agents implements INode { return null } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const model = nodeData.inputs?.model as BaseLanguageModel let tools = nodeData.inputs?.tools as Tool[] + const moderations = nodeData.inputs?.inputModeration as Moderation[] + + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the ReAct Agent for LLMs + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } + tools = flatten(tools) const prompt = await pull('hwchase17/react') diff --git a/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts b/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts index 0acadca1..2fc1a229 100644 --- a/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts +++ b/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts @@ -10,6 +10,8 @@ import { getBaseClasses } from '../../../src/utils' import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { AgentExecutor, formatAgentSteps } from '../../../src/agents' +import { Moderation, checkInputs } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class OpenAIFunctionAgent_Agents implements INode { label: string @@ -26,7 +28,7 @@ class OpenAIFunctionAgent_Agents implements INode { constructor(fields?: { sessionId?: string }) { this.label = 'OpenAI Function Agent' this.name = 'openAIFunctionAgent' - this.version = 3.0 + this.version = 4.0 this.type = 'AgentExecutor' this.category = 'Agents' this.icon = 'function.svg' @@ -56,6 +58,14 @@ class OpenAIFunctionAgent_Agents implements INode { rows: 4, optional: true, additionalParams: true + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] this.sessionId = fields?.sessionId @@ -67,6 +77,19 @@ class OpenAIFunctionAgent_Agents implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory as FlowiseMemory + const moderations = nodeData.inputs?.inputModeration as Moderation[] + + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the OpenAI Function Agent + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } + const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) const loggerHandler = new ConsoleCallbackHandler(options.logger) diff --git a/packages/components/nodes/agents/XMLAgent/XMLAgent.ts b/packages/components/nodes/agents/XMLAgent/XMLAgent.ts index 49109947..167214f8 100644 --- a/packages/components/nodes/agents/XMLAgent/XMLAgent.ts +++ b/packages/components/nodes/agents/XMLAgent/XMLAgent.ts @@ -11,7 +11,8 @@ import { getBaseClasses } from '../../../src/utils' import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { AgentExecutor } from '../../../src/agents' -//import { AgentExecutor } from "langchain/agents"; +import { Moderation, checkInputs } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' const defaultSystemMessage = `You are a helpful assistant. Help the user answer any questions. @@ -52,7 +53,7 @@ class XMLAgent_Agents implements INode { constructor(fields?: { sessionId?: string }) { this.label = 'XML Agent' this.name = 'xmlAgent' - this.version = 1.0 + this.version = 2.0 this.type = 'XMLAgent' this.category = 'Agents' this.icon = 'xmlagent.svg' @@ -83,6 +84,14 @@ class XMLAgent_Agents implements INode { rows: 4, default: defaultSystemMessage, additionalParams: true + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] this.sessionId = fields?.sessionId @@ -94,6 +103,18 @@ class XMLAgent_Agents implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory as FlowiseMemory + const moderations = nodeData.inputs?.inputModeration as Moderation[] + + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the OpenAI Function Agent + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } const executor = await prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) const loggerHandler = new ConsoleCallbackHandler(options.logger) diff --git a/packages/components/nodes/chains/ApiChain/OpenAPIChain.ts b/packages/components/nodes/chains/ApiChain/OpenAPIChain.ts index d922a186..e5c11eb3 100644 --- a/packages/components/nodes/chains/ApiChain/OpenAPIChain.ts +++ b/packages/components/nodes/chains/ApiChain/OpenAPIChain.ts @@ -3,6 +3,8 @@ import { APIChain, createOpenAPIChain } from 'langchain/chains' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' +import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class OpenApiChain_Chains implements INode { label: string @@ -18,7 +20,7 @@ class OpenApiChain_Chains implements INode { constructor() { this.label = 'OpenAPI Chain' this.name = 'openApiChain' - this.version = 1.0 + this.version = 2.0 this.type = 'OpenAPIChain' this.icon = 'openapi.svg' this.category = 'Chains' @@ -50,6 +52,14 @@ class OpenApiChain_Chains implements INode { type: 'json', additionalParams: true, optional: true + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -58,11 +68,21 @@ class OpenApiChain_Chains implements INode { return await initChain(nodeData) } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const chain = await initChain(nodeData) const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) - + const moderations = nodeData.inputs?.inputModeration as Moderation[] + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the OpenAPI chain + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } if (options.socketIO && options.socketIOClientId) { const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId) const res = await chain.run(input, [loggerHandler, handler, ...callbacks]) diff --git a/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts b/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts index 46d739d1..ec45b684 100644 --- a/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts +++ b/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts @@ -5,6 +5,8 @@ import { PromptTemplate, ChatPromptTemplate, MessagesPlaceholder } from '@langch import { Runnable, RunnableSequence, RunnableMap, RunnableBranch, RunnableLambda } from '@langchain/core/runnables' import { BaseMessage, HumanMessage, AIMessage } from '@langchain/core/messages' import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console' +import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' import { StringOutputParser } from '@langchain/core/output_parsers' import type { Document } from '@langchain/core/documents' import { BufferMemoryInput } from 'langchain/memory' @@ -36,7 +38,7 @@ class ConversationalRetrievalQAChain_Chains implements INode { constructor(fields?: { sessionId?: string }) { this.label = 'Conversational Retrieval QA Chain' this.name = 'conversationalRetrievalQAChain' - this.version = 2.0 + this.version = 3.0 this.type = 'ConversationalRetrievalQAChain' this.icon = 'qa.svg' this.category = 'Chains' @@ -87,6 +89,14 @@ class ConversationalRetrievalQAChain_Chains implements INode { additionalParams: true, optional: true, default: RESPONSE_TEMPLATE + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } /** Deprecated { @@ -163,6 +173,7 @@ class ConversationalRetrievalQAChain_Chains implements INode { } let memory: FlowiseMemory | undefined = externalMemory + const moderations = nodeData.inputs?.inputModeration as Moderation[] if (!memory) { memory = new BufferMemory({ returnMessages: true, @@ -171,6 +182,16 @@ class ConversationalRetrievalQAChain_Chains implements INode { }) } + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the Conversational Retrieval QA Chain + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } const answerChain = createChain(model, vectorStoreRetriever, rephrasePrompt, customResponsePrompt) const history = ((await memory.getChatMessages(this.sessionId, false, options.chatHistory)) as IMessage[]) ?? [] diff --git a/packages/components/nodes/chains/MultiPromptChain/MultiPromptChain.ts b/packages/components/nodes/chains/MultiPromptChain/MultiPromptChain.ts index c4c1d372..fa91bb20 100644 --- a/packages/components/nodes/chains/MultiPromptChain/MultiPromptChain.ts +++ b/packages/components/nodes/chains/MultiPromptChain/MultiPromptChain.ts @@ -3,6 +3,8 @@ import { MultiPromptChain } from 'langchain/chains' import { ICommonObject, INode, INodeData, INodeParams, PromptRetriever } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' +import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class MultiPromptChain_Chains implements INode { label: string @@ -18,7 +20,7 @@ class MultiPromptChain_Chains implements INode { constructor() { this.label = 'Multi Prompt Chain' this.name = 'multiPromptChain' - this.version = 1.0 + this.version = 2.0 this.type = 'MultiPromptChain' this.icon = 'prompt.svg' this.category = 'Chains' @@ -35,6 +37,14 @@ class MultiPromptChain_Chains implements INode { name: 'promptRetriever', type: 'PromptRetriever', list: true + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -62,8 +72,19 @@ class MultiPromptChain_Chains implements INode { return chain } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const chain = nodeData.instance as MultiPromptChain + const moderations = nodeData.inputs?.inputModeration as Moderation[] + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the Multi Prompt Chain + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } const obj = { input } const loggerHandler = new ConsoleCallbackHandler(options.logger) diff --git a/packages/components/nodes/chains/MultiRetrievalQAChain/MultiRetrievalQAChain.ts b/packages/components/nodes/chains/MultiRetrievalQAChain/MultiRetrievalQAChain.ts index 3cb78ce8..71302d63 100644 --- a/packages/components/nodes/chains/MultiRetrievalQAChain/MultiRetrievalQAChain.ts +++ b/packages/components/nodes/chains/MultiRetrievalQAChain/MultiRetrievalQAChain.ts @@ -3,6 +3,8 @@ import { MultiRetrievalQAChain } from 'langchain/chains' import { ICommonObject, INode, INodeData, INodeParams, VectorStoreRetriever } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' +import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class MultiRetrievalQAChain_Chains implements INode { label: string @@ -18,7 +20,7 @@ class MultiRetrievalQAChain_Chains implements INode { constructor() { this.label = 'Multi Retrieval QA Chain' this.name = 'multiRetrievalQAChain' - this.version = 1.0 + this.version = 2.0 this.type = 'MultiRetrievalQAChain' this.icon = 'qa.svg' this.category = 'Chains' @@ -41,6 +43,14 @@ class MultiRetrievalQAChain_Chains implements INode { name: 'returnSourceDocuments', type: 'boolean', optional: true + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -72,7 +82,17 @@ class MultiRetrievalQAChain_Chains implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const chain = nodeData.instance as MultiRetrievalQAChain const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean - + const moderations = nodeData.inputs?.inputModeration as Moderation[] + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the Multi Retrieval QA Chain + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } const obj = { input } const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) diff --git a/packages/components/nodes/chains/RetrievalQAChain/RetrievalQAChain.ts b/packages/components/nodes/chains/RetrievalQAChain/RetrievalQAChain.ts index 3968d3c0..9125f38f 100644 --- a/packages/components/nodes/chains/RetrievalQAChain/RetrievalQAChain.ts +++ b/packages/components/nodes/chains/RetrievalQAChain/RetrievalQAChain.ts @@ -4,6 +4,8 @@ import { RetrievalQAChain } from 'langchain/chains' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' +import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class RetrievalQAChain_Chains implements INode { label: string @@ -19,7 +21,7 @@ class RetrievalQAChain_Chains implements INode { constructor() { this.label = 'Retrieval QA Chain' this.name = 'retrievalQAChain' - this.version = 1.0 + this.version = 2.0 this.type = 'RetrievalQAChain' this.icon = 'qa.svg' this.category = 'Chains' @@ -35,6 +37,14 @@ class RetrievalQAChain_Chains implements INode { label: 'Vector Store Retriever', name: 'vectorStoreRetriever', type: 'BaseRetriever' + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -47,8 +57,19 @@ class RetrievalQAChain_Chains implements INode { return chain } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const chain = nodeData.instance as RetrievalQAChain + const moderations = nodeData.inputs?.inputModeration as Moderation[] + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the Retrieval QA Chain + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } const obj = { query: input } diff --git a/packages/components/nodes/chains/SqlDatabaseChain/SqlDatabaseChain.ts b/packages/components/nodes/chains/SqlDatabaseChain/SqlDatabaseChain.ts index 2c9f3813..018e1f06 100644 --- a/packages/components/nodes/chains/SqlDatabaseChain/SqlDatabaseChain.ts +++ b/packages/components/nodes/chains/SqlDatabaseChain/SqlDatabaseChain.ts @@ -7,6 +7,8 @@ import { SqlDatabase } from 'langchain/sql_db' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { getBaseClasses, getInputVariables } from '../../../src/utils' +import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' type DatabaseType = 'sqlite' | 'postgres' | 'mssql' | 'mysql' @@ -24,7 +26,7 @@ class SqlDatabaseChain_Chains implements INode { constructor() { this.label = 'Sql Database Chain' this.name = 'sqlDatabaseChain' - this.version = 4.0 + this.version = 5.0 this.type = 'SqlDatabaseChain' this.icon = 'sqlchain.svg' this.category = 'Chains' @@ -115,6 +117,14 @@ class SqlDatabaseChain_Chains implements INode { placeholder: DEFAULT_SQL_DATABASE_PROMPT.template + DEFAULT_SQL_DATABASE_PROMPT.templateFormat, additionalParams: true, optional: true + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -144,7 +154,7 @@ class SqlDatabaseChain_Chains implements INode { return chain } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const databaseType = nodeData.inputs?.database as DatabaseType const model = nodeData.inputs?.model as BaseLanguageModel const url = nodeData.inputs?.url as string @@ -155,6 +165,17 @@ class SqlDatabaseChain_Chains implements INode { const sampleRowsInTableInfo = nodeData.inputs?.sampleRowsInTableInfo as number const topK = nodeData.inputs?.topK as number const customPrompt = nodeData.inputs?.customPrompt as string + const moderations = nodeData.inputs?.inputModeration as Moderation[] + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the Sql Database Chain + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } const chain = await getSQLDBChain( databaseType, diff --git a/packages/components/nodes/chains/VectaraChain/VectaraChain.ts b/packages/components/nodes/chains/VectaraChain/VectaraChain.ts index 97bbaa67..e5427ca0 100644 --- a/packages/components/nodes/chains/VectaraChain/VectaraChain.ts +++ b/packages/components/nodes/chains/VectaraChain/VectaraChain.ts @@ -4,6 +4,8 @@ import { VectaraStore } from '@langchain/community/vectorstores/vectara' import { VectorDBQAChain } from 'langchain/chains' import { INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' +import { checkInputs, Moderation } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' // functionality based on https://github.com/vectara/vectara-answer const reorderCitations = (unorderedSummary: string) => { @@ -48,7 +50,7 @@ class VectaraChain_Chains implements INode { constructor() { this.label = 'Vectara QA Chain' this.name = 'vectaraQAChain' - this.version = 1.0 + this.version = 2.0 this.type = 'VectaraQAChain' this.icon = 'vectara.png' this.category = 'Chains' @@ -219,6 +221,14 @@ class VectaraChain_Chains implements INode { description: 'Maximum results used to build the summarized response', type: 'number', default: 7 + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -227,7 +237,7 @@ class VectaraChain_Chains implements INode { return null } - async run(nodeData: INodeData, input: string): Promise { + async run(nodeData: INodeData, input: string): Promise { const vectorStore = nodeData.inputs?.vectaraStore as VectaraStore const responseLang = (nodeData.inputs?.responseLang as string) ?? 'eng' const summarizerPromptName = nodeData.inputs?.summarizerPromptName as string @@ -252,6 +262,18 @@ class VectaraChain_Chains implements INode { const mmrRerankerId = 272725718 const mmrEnabled = vectaraFilter?.mmrConfig?.enabled + const moderations = nodeData.inputs?.inputModeration as Moderation[] + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the Vectara chain + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } + const data = { query: [ { diff --git a/packages/components/nodes/chains/VectorDBQAChain/VectorDBQAChain.ts b/packages/components/nodes/chains/VectorDBQAChain/VectorDBQAChain.ts index ef0df01a..129eb46a 100644 --- a/packages/components/nodes/chains/VectorDBQAChain/VectorDBQAChain.ts +++ b/packages/components/nodes/chains/VectorDBQAChain/VectorDBQAChain.ts @@ -4,6 +4,8 @@ import { VectorDBQAChain } from 'langchain/chains' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses } from '../../../src/utils' +import { checkInputs, Moderation } from '../../moderation/Moderation' +import { formatResponse } from '../../outputparsers/OutputParserHelpers' class VectorDBQAChain_Chains implements INode { label: string @@ -19,7 +21,7 @@ class VectorDBQAChain_Chains implements INode { constructor() { this.label = 'VectorDB QA Chain' this.name = 'vectorDBQAChain' - this.version = 1.0 + this.version = 2.0 this.type = 'VectorDBQAChain' this.icon = 'vectordb.svg' this.category = 'Chains' @@ -35,6 +37,14 @@ class VectorDBQAChain_Chains implements INode { label: 'Vector Store', name: 'vectorStore', type: 'VectorStore' + }, + { + label: 'Input Moderation', + description: 'Detect text that could generate harmful output and prevent it from being sent to the language model', + name: 'inputModeration', + type: 'Moderation', + optional: true, + list: true } ] } @@ -50,8 +60,20 @@ class VectorDBQAChain_Chains implements INode { return chain } - async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const chain = nodeData.instance as VectorDBQAChain + const moderations = nodeData.inputs?.inputModeration as Moderation[] + + if (moderations && moderations.length > 0) { + try { + // Use the output of the moderation chain as input for the VectorDB QA Chain + input = await checkInputs(moderations, input) + } catch (e) { + await new Promise((resolve) => setTimeout(resolve, 500)) + //streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId) + return formatResponse(e.message) + } + } const obj = { query: input } diff --git a/packages/server/marketplaces/chatflows/API Agent OpenAI.json b/packages/server/marketplaces/chatflows/API Agent OpenAI.json index 691852d6..a0c380b4 100644 --- a/packages/server/marketplaces/chatflows/API Agent OpenAI.json +++ b/packages/server/marketplaces/chatflows/API Agent OpenAI.json @@ -15,7 +15,7 @@ "data": { "id": "openApiChain_1", "label": "OpenAPI Chain", - "version": 1, + "version": 2, "name": "openApiChain", "type": "OpenAPIChain", "baseClasses": ["OpenAPIChain", "BaseChain"], @@ -53,9 +53,19 @@ "name": "model", "type": "ChatOpenAI", "id": "openApiChain_1-input-model-ChatOpenAI" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "openApiChain_1-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_1.data.instance}}", "yamlLink": "https://gist.githubusercontent.com/roaldnefs/053e505b2b7a807290908fe9aa3e1f00/raw/0a212622ebfef501163f91e23803552411ed00e4/openapi.yaml", "headers": "" @@ -399,7 +409,7 @@ "id": "openAIFunctionAgent_0", "label": "OpenAI Function Agent", "name": "openAIFunctionAgent", - "version": 3, + "version": 4, "type": "AgentExecutor", "baseClasses": ["AgentExecutor", "BaseChain"], "category": "Agents", @@ -434,9 +444,19 @@ "name": "model", "type": "BaseChatModel", "id": "openAIFunctionAgent_0-input-model-BaseChatModel" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "openAIFunctionAgent_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "tools": ["{{chainTool_0.data.instance}}"], "memory": "{{bufferMemory_0.data.instance}}", "model": "{{chatOpenAI_2.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/API Agent.json b/packages/server/marketplaces/chatflows/API Agent.json index facdcb6b..7f3f90b4 100644 --- a/packages/server/marketplaces/chatflows/API Agent.json +++ b/packages/server/marketplaces/chatflows/API Agent.json @@ -1100,7 +1100,7 @@ "data": { "id": "conversationalAgent_0", "label": "Conversational Agent", - "version": 2, + "version": 3, "name": "conversationalAgent", "type": "AgentExecutor", "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], @@ -1137,9 +1137,19 @@ "name": "memory", "type": "BaseChatMemory", "id": "conversationalAgent_0-input-memory-BaseChatMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalAgent_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "tools": ["{{chainTool_0.data.instance}}", "{{chainTool_1.data.instance}}"], "model": "{{chatOpenAI_3.data.instance}}", "memory": "{{bufferMemory_0.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/AutoGPT.json b/packages/server/marketplaces/chatflows/AutoGPT.json index bb7c7bdc..fd6ffc90 100644 --- a/packages/server/marketplaces/chatflows/AutoGPT.json +++ b/packages/server/marketplaces/chatflows/AutoGPT.json @@ -15,7 +15,7 @@ "data": { "id": "autoGPT_0", "label": "AutoGPT", - "version": 1, + "version": 2, "name": "autoGPT", "type": "AutoGPT", "baseClasses": ["AutoGPT"], @@ -66,9 +66,19 @@ "name": "vectorStoreRetriever", "type": "BaseRetriever", "id": "autoGPT_0-input-vectorStoreRetriever-BaseRetriever" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "autoGPT_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "tools": ["{{readFile_0.data.instance}}", "{{writeFile_1.data.instance}}", "{{serpAPI_0.data.instance}}"], "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": "{{pinecone_0.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/BabyAGI.json b/packages/server/marketplaces/chatflows/BabyAGI.json index 8a800046..a93e7193 100644 --- a/packages/server/marketplaces/chatflows/BabyAGI.json +++ b/packages/server/marketplaces/chatflows/BabyAGI.json @@ -15,7 +15,7 @@ "data": { "id": "babyAGI_1", "label": "BabyAGI", - "version": 1, + "version": 2, "name": "babyAGI", "type": "BabyAGI", "baseClasses": ["BabyAGI"], @@ -42,9 +42,19 @@ "name": "vectorStore", "type": "VectorStore", "id": "babyAGI_1-input-vectorStore-VectorStore" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "babyAGI_1-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "vectorStore": "{{pinecone_0.data.instance}}", "taskLoop": 3 diff --git a/packages/server/marketplaces/chatflows/CSV Agent.json b/packages/server/marketplaces/chatflows/CSV Agent.json index 0a0bdce9..92af9735 100644 --- a/packages/server/marketplaces/chatflows/CSV Agent.json +++ b/packages/server/marketplaces/chatflows/CSV Agent.json @@ -16,7 +16,7 @@ "id": "csvAgent_0", "label": "CSV Agent", "name": "csvAgent", - "version": 1, + "version": 2, "type": "AgentExecutor", "baseClasses": ["AgentExecutor", "BaseChain"], "category": "Agents", @@ -36,9 +36,19 @@ "name": "model", "type": "BaseLanguageModel", "id": "csvAgent_0-input-model-BaseLanguageModel" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "csvAgent_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}" }, "outputAnchors": [ diff --git a/packages/server/marketplaces/chatflows/Chat with a Podcast.json b/packages/server/marketplaces/chatflows/Chat with a Podcast.json index 6d0344a3..526a85f6 100644 --- a/packages/server/marketplaces/chatflows/Chat with a Podcast.json +++ b/packages/server/marketplaces/chatflows/Chat with a Podcast.json @@ -15,7 +15,7 @@ "data": { "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", - "version": 2, + "version": 3, "name": "conversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain", "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], @@ -74,9 +74,19 @@ "optional": true, "description": "If left empty, a default BufferMemory will be used", "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": "{{memoryVectorStore_0.data.instance}}", "memory": "", diff --git a/packages/server/marketplaces/chatflows/ChatGPTPlugin.json b/packages/server/marketplaces/chatflows/ChatGPTPlugin.json index cbdc4634..e7f27080 100644 --- a/packages/server/marketplaces/chatflows/ChatGPTPlugin.json +++ b/packages/server/marketplaces/chatflows/ChatGPTPlugin.json @@ -451,7 +451,7 @@ "id": "mrklAgentChat_0", "label": "MRKL Agent for Chat Models", "name": "mrklAgentChat", - "version": 1, + "version": 2, "type": "AgentExecutor", "baseClasses": ["AgentExecutor", "BaseChain"], "category": "Agents", @@ -470,9 +470,19 @@ "name": "model", "type": "BaseLanguageModel", "id": "mrklAgentChat_0-input-model-BaseLanguageModel" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "mrklAgentChat_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "tools": ["{{requestsGet_0.data.instance}}", "{{requestsPost_0.data.instance}}", "{{aiPlugin_0.data.instance}}"], "model": "{{chatOpenAI_0.data.instance}}" }, diff --git a/packages/server/marketplaces/chatflows/Conversational Agent.json b/packages/server/marketplaces/chatflows/Conversational Agent.json index d07047d6..3d9340de 100644 --- a/packages/server/marketplaces/chatflows/Conversational Agent.json +++ b/packages/server/marketplaces/chatflows/Conversational Agent.json @@ -392,7 +392,7 @@ "data": { "id": "conversationalAgent_0", "label": "Conversational Agent", - "version": 2, + "version": 3, "name": "conversationalAgent", "type": "AgentExecutor", "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], @@ -429,9 +429,19 @@ "name": "memory", "type": "BaseChatMemory", "id": "conversationalAgent_0-input-memory-BaseChatMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalAgent_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "tools": ["{{calculator_1.data.instance}}", "{{serpAPI_0.data.instance}}"], "model": "{{chatOpenAI_0.data.instance}}", "memory": "{{bufferMemory_1.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json b/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json index 72ac467e..dbcca236 100644 --- a/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json +++ b/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json @@ -123,7 +123,7 @@ "data": { "id": "conversationalRetrievalAgent_0", "label": "Conversational Retrieval Agent", - "version": 3, + "version": 4, "name": "conversationalRetrievalAgent", "type": "AgentExecutor", "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], @@ -159,9 +159,19 @@ "name": "model", "type": "BaseChatModel", "id": "conversationalRetrievalAgent_0-input-model-BaseChatModel" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalRetrievalAgent_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "tools": ["{{retrieverTool_0.data.instance}}"], "memory": "{{bufferMemory_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json index df3d1389..0b6559ff 100644 --- a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json +++ b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json @@ -274,7 +274,7 @@ "data": { "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", - "version": 2, + "version": 3, "name": "conversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain", "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], @@ -333,9 +333,19 @@ "optional": true, "description": "If left empty, a default BufferMemory will be used", "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": "{{pinecone_0.data.instance}}", "memory": "", diff --git a/packages/server/marketplaces/chatflows/Flowise Docs QnA.json b/packages/server/marketplaces/chatflows/Flowise Docs QnA.json index 62c72595..c3db081b 100644 --- a/packages/server/marketplaces/chatflows/Flowise Docs QnA.json +++ b/packages/server/marketplaces/chatflows/Flowise Docs QnA.json @@ -158,7 +158,7 @@ "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", "name": "conversationalRetrievalQAChain", - "version": 2, + "version": 3, "type": "ConversationalRetrievalQAChain", "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], "category": "Chains", @@ -216,9 +216,19 @@ "optional": true, "description": "If left empty, a default BufferMemory will be used", "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": "{{memoryVectorStore_0.data.instance}}", "memory": "", diff --git a/packages/server/marketplaces/chatflows/Local QnA.json b/packages/server/marketplaces/chatflows/Local QnA.json index 3e8b93f6..6f4ed8ce 100644 --- a/packages/server/marketplaces/chatflows/Local QnA.json +++ b/packages/server/marketplaces/chatflows/Local QnA.json @@ -85,7 +85,7 @@ "data": { "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", - "version": 2, + "version": 3, "name": "conversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain", "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], @@ -144,9 +144,19 @@ "optional": true, "description": "If left empty, a default BufferMemory will be used", "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOllama_0.data.instance}}", "vectorStoreRetriever": "{{faiss_0.data.instance}}", "memory": "", diff --git a/packages/server/marketplaces/chatflows/Long Term Memory.json b/packages/server/marketplaces/chatflows/Long Term Memory.json index bc3b8a76..f6a642cb 100644 --- a/packages/server/marketplaces/chatflows/Long Term Memory.json +++ b/packages/server/marketplaces/chatflows/Long Term Memory.json @@ -15,7 +15,7 @@ "data": { "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", - "version": 2, + "version": 3, "name": "conversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain", "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], @@ -74,9 +74,19 @@ "optional": true, "description": "If left empty, a default BufferMemory will be used", "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": "{{qdrant_0.data.instance}}", "memory": "{{ZepMemory_0.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/Metadata Filter.json b/packages/server/marketplaces/chatflows/Metadata Filter.json index 147a8cf6..2caefe5d 100644 --- a/packages/server/marketplaces/chatflows/Metadata Filter.json +++ b/packages/server/marketplaces/chatflows/Metadata Filter.json @@ -251,7 +251,7 @@ "data": { "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", - "version": 2, + "version": 3, "name": "conversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain", "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], @@ -310,9 +310,19 @@ "optional": true, "description": "If left empty, a default BufferMemory will be used", "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": "{{pinecone_0.data.instance}}", "rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:", diff --git a/packages/server/marketplaces/chatflows/Multi Prompt Chain.json b/packages/server/marketplaces/chatflows/Multi Prompt Chain.json index 41cd9b17..171cce0b 100644 --- a/packages/server/marketplaces/chatflows/Multi Prompt Chain.json +++ b/packages/server/marketplaces/chatflows/Multi Prompt Chain.json @@ -84,7 +84,7 @@ "id": "multiPromptChain_0", "label": "Multi Prompt Chain", "name": "multiPromptChain", - "version": 1, + "version": 2, "type": "MultiPromptChain", "baseClasses": ["MultiPromptChain", "MultiRouteChain", "BaseChain", "BaseLangChain"], "category": "Chains", @@ -103,9 +103,19 @@ "type": "PromptRetriever", "list": true, "id": "multiPromptChain_0-input-promptRetriever-PromptRetriever" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "multiPromptChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "promptRetriever": [ "{{promptRetriever_0.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json b/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json index 8f762ca9..70ecbcf8 100644 --- a/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json +++ b/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json @@ -82,7 +82,7 @@ "data": { "id": "multiRetrievalQAChain_0", "label": "Multi Retrieval QA Chain", - "version": 1, + "version": 2, "name": "multiRetrievalQAChain", "type": "MultiRetrievalQAChain", "baseClasses": ["MultiRetrievalQAChain", "MultiRouteChain", "BaseChain", "BaseLangChain"], @@ -109,9 +109,19 @@ "type": "VectorStoreRetriever", "list": true, "id": "multiRetrievalQAChain_0-input-vectorStoreRetriever-VectorStoreRetriever" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "multiRetrievalQAChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": [ "{{vectorStoreRetriever_0.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/Multiple VectorDB.json b/packages/server/marketplaces/chatflows/Multiple VectorDB.json index db17df54..3de2a08c 100644 --- a/packages/server/marketplaces/chatflows/Multiple VectorDB.json +++ b/packages/server/marketplaces/chatflows/Multiple VectorDB.json @@ -163,7 +163,7 @@ "data": { "id": "retrievalQAChain_0", "label": "Retrieval QA Chain", - "version": 1, + "version": 2, "name": "retrievalQAChain", "type": "RetrievalQAChain", "baseClasses": ["RetrievalQAChain", "BaseChain", "BaseLangChain"], @@ -182,9 +182,19 @@ "name": "vectorStoreRetriever", "type": "BaseRetriever", "id": "retrievalQAChain_0-input-vectorStoreRetriever-BaseRetriever" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "retrievalQAChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": "{{redis_0.data.instance}}" }, @@ -218,7 +228,7 @@ "data": { "id": "retrievalQAChain_1", "label": "Retrieval QA Chain", - "version": 1, + "version": 2, "name": "retrievalQAChain", "type": "RetrievalQAChain", "baseClasses": ["RetrievalQAChain", "BaseChain", "BaseLangChain"], @@ -237,9 +247,19 @@ "name": "vectorStoreRetriever", "type": "BaseRetriever", "id": "retrievalQAChain_1-input-vectorStoreRetriever-BaseRetriever" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "retrievalQAChain_1-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_1.data.instance}}", "vectorStoreRetriever": "{{faiss_0.data.instance}}" }, @@ -1741,7 +1761,7 @@ "data": { "id": "conversationalAgent_0", "label": "Conversational Agent", - "version": 2, + "version": 3, "name": "conversationalAgent", "type": "AgentExecutor", "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], @@ -1778,9 +1798,19 @@ "name": "memory", "type": "BaseChatMemory", "id": "conversationalAgent_0-input-memory-BaseChatMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalAgent_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "tools": ["{{chainTool_2.data.instance}}", "{{chainTool_3.data.instance}}"], "model": "{{chatOpenAI_2.data.instance}}", "memory": "{{bufferMemory_0.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/OpenAI Agent.json b/packages/server/marketplaces/chatflows/OpenAI Agent.json index f405640c..065b0b4d 100644 --- a/packages/server/marketplaces/chatflows/OpenAI Agent.json +++ b/packages/server/marketplaces/chatflows/OpenAI Agent.json @@ -208,7 +208,7 @@ "id": "openAIFunctionAgent_0", "label": "OpenAI Function Agent", "name": "openAIFunctionAgent", - "version": 3, + "version": 4, "type": "AgentExecutor", "baseClasses": ["AgentExecutor", "BaseChain"], "category": "Agents", @@ -243,9 +243,19 @@ "name": "model", "type": "BaseChatModel", "id": "openAIFunctionAgent_0-input-model-BaseChatModel" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "openAIFunctionAgent_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "tools": ["{{calculator_0.data.instance}}", "{{serper_0.data.instance}}", "{{customTool_0.data.instance}}"], "memory": "{{bufferMemory_0.data.instance}}", "model": "{{chatOpenAI_0.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/ReAct Agent.json b/packages/server/marketplaces/chatflows/ReAct Agent.json index a4989c47..7bc2c33f 100644 --- a/packages/server/marketplaces/chatflows/ReAct Agent.json +++ b/packages/server/marketplaces/chatflows/ReAct Agent.json @@ -52,7 +52,7 @@ "data": { "id": "mrklAgentChat_0", "label": "ReAct Agent for Chat Models", - "version": 3, + "version": 4, "name": "mrklAgentChat", "type": "AgentExecutor", "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], @@ -78,9 +78,19 @@ "name": "memory", "type": "BaseChatMemory", "id": "mrklAgentChat_0-input-memory-BaseChatMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "mrklAgentChat_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "tools": ["{{calculator_1.data.instance}}", "{{serper_0.data.instance}}"], "model": "{{chatOpenAI_0.data.instance}}", "memory": "{{RedisBackedChatMemory_0.data.instance}}" diff --git a/packages/server/marketplaces/chatflows/SQL DB Chain.json b/packages/server/marketplaces/chatflows/SQL DB Chain.json index debe4edc..855f9b1f 100644 --- a/packages/server/marketplaces/chatflows/SQL DB Chain.json +++ b/packages/server/marketplaces/chatflows/SQL DB Chain.json @@ -249,7 +249,7 @@ "data": { "id": "sqlDatabaseChain_0", "label": "Sql Database Chain", - "version": 4, + "version": 5, "name": "sqlDatabaseChain", "type": "SqlDatabaseChain", "baseClasses": ["SqlDatabaseChain", "BaseChain", "Runnable"], @@ -347,9 +347,19 @@ "name": "model", "type": "BaseLanguageModel", "id": "sqlDatabaseChain_0-input-model-BaseLanguageModel" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "sqlDatabaseChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "database": "sqlite", "url": "", diff --git a/packages/server/marketplaces/chatflows/Vectara RAG Chain.json b/packages/server/marketplaces/chatflows/Vectara RAG Chain.json index 2ef1474a..c5684ae4 100644 --- a/packages/server/marketplaces/chatflows/Vectara RAG Chain.json +++ b/packages/server/marketplaces/chatflows/Vectara RAG Chain.json @@ -15,7 +15,7 @@ "data": { "id": "vectaraQAChain_0", "label": "Vectara QA Chain", - "version": 1, + "version": 2, "name": "vectaraQAChain", "type": "VectaraQAChain", "baseClasses": ["VectaraQAChain", "BaseChain", "Runnable"], @@ -189,9 +189,19 @@ "name": "vectaraStore", "type": "VectorStore", "id": "vectaraQAChain_0-input-vectaraStore-VectorStore" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "vectaraQAChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "vectaraStore": "{{vectara_1.data.instance}}", "summarizerPromptName": "vectara-experimental-summary-ext-2023-10-23-small", "responseLang": "eng", diff --git a/packages/server/marketplaces/chatflows/WebBrowser.json b/packages/server/marketplaces/chatflows/WebBrowser.json index d8b7d9f6..d27298d2 100644 --- a/packages/server/marketplaces/chatflows/WebBrowser.json +++ b/packages/server/marketplaces/chatflows/WebBrowser.json @@ -702,7 +702,7 @@ "data": { "id": "conversationalAgent_0", "label": "Conversational Agent", - "version": 2, + "version": 3, "name": "conversationalAgent", "type": "AgentExecutor", "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], @@ -739,9 +739,19 @@ "name": "memory", "type": "BaseChatMemory", "id": "conversationalAgent_0-input-memory-BaseChatMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalAgent_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "tools": ["{{webBrowser_0.data.instance}}"], "model": "{{chatOpenAI_1.data.instance}}", "memory": "{{bufferMemory_0.data.instance}}", diff --git a/packages/server/marketplaces/chatflows/WebPage QnA.json b/packages/server/marketplaces/chatflows/WebPage QnA.json index 5ca29ee9..a913e5f9 100644 --- a/packages/server/marketplaces/chatflows/WebPage QnA.json +++ b/packages/server/marketplaces/chatflows/WebPage QnA.json @@ -187,7 +187,7 @@ "data": { "id": "conversationalRetrievalQAChain_0", "label": "Conversational Retrieval QA Chain", - "version": 2, + "version": 3, "name": "conversationalRetrievalQAChain", "type": "ConversationalRetrievalQAChain", "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"], @@ -246,9 +246,19 @@ "optional": true, "description": "If left empty, a default BufferMemory will be used", "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation" } ], "inputs": { + "inputModeration": "", "model": "{{chatOpenAI_0.data.instance}}", "vectorStoreRetriever": "{{pinecone_0.data.instance}}", "memory": "{{RedisBackedChatMemory_0.data.instance}}", From 20929dbff1e6e191a2d5b2eb82d666a3ccc078a4 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Sun, 10 Mar 2024 21:31:39 +0530 Subject: [PATCH 22/25] bugfix - fix for inccorect bedrock imports --- .../components/nodes/chatmodels/AWSBedrock/AWSChatBedrock.ts | 2 +- packages/components/nodes/llms/AWSBedrock/AWSBedrock.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/components/nodes/chatmodels/AWSBedrock/AWSChatBedrock.ts b/packages/components/nodes/chatmodels/AWSBedrock/AWSChatBedrock.ts index 251bd24a..31d78270 100644 --- a/packages/components/nodes/chatmodels/AWSBedrock/AWSChatBedrock.ts +++ b/packages/components/nodes/chatmodels/AWSBedrock/AWSChatBedrock.ts @@ -1,7 +1,7 @@ import { BedrockChat } from '@langchain/community/chat_models/bedrock' import { BaseCache } from '@langchain/core/caches' import { BaseChatModelParams } from '@langchain/core/language_models/chat_models' -import { BaseBedrockInput } from 'langchain/dist/util/bedrock' +import { BaseBedrockInput } from '@langchain/community/dist/utils/bedrock' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' diff --git a/packages/components/nodes/llms/AWSBedrock/AWSBedrock.ts b/packages/components/nodes/llms/AWSBedrock/AWSBedrock.ts index a22265e3..7b095fb9 100644 --- a/packages/components/nodes/llms/AWSBedrock/AWSBedrock.ts +++ b/packages/components/nodes/llms/AWSBedrock/AWSBedrock.ts @@ -1,9 +1,9 @@ import { Bedrock } from '@langchain/community/llms/bedrock' import { BaseCache } from '@langchain/core/caches' import { BaseLLMParams } from '@langchain/core/language_models/llms' -import { BaseBedrockInput } from 'langchain/dist/util/bedrock' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { BaseBedrockInput } from '@langchain/community/dist/utils/bedrock' /** * I had to run the following to build the component From 8a0af7b44676fc3ebd6ab15be7ec6a1d36e802f0 Mon Sep 17 00:00:00 2001 From: Christoph Simon | dotSource SE Date: Sun, 10 Mar 2024 18:35:50 +0100 Subject: [PATCH 23/25] fix(utilities): Handle escape characters in ifelse-function node's return value, resolves #1887 --- .../nodes/utilities/IfElseFunction/IfElseFunction.ts | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/components/nodes/utilities/IfElseFunction/IfElseFunction.ts b/packages/components/nodes/utilities/IfElseFunction/IfElseFunction.ts index 1e61616c..05c9f45b 100644 --- a/packages/components/nodes/utilities/IfElseFunction/IfElseFunction.ts +++ b/packages/components/nodes/utilities/IfElseFunction/IfElseFunction.ts @@ -143,10 +143,11 @@ class IfElseFunction_Utilities implements INode { const vm = new NodeVM(nodeVMOptions) try { const responseTrue = await vm.run(`module.exports = async function() {${ifFunction}}()`, __dirname) - if (responseTrue) return { output: responseTrue, type: true } + if (responseTrue) + return { output: typeof responseTrue === 'string' ? handleEscapeCharacters(responseTrue, false) : responseTrue, type: true } const responseFalse = await vm.run(`module.exports = async function() {${elseFunction}}()`, __dirname) - return { output: responseFalse, type: false } + return { output: typeof responseFalse === 'string' ? handleEscapeCharacters(responseFalse, false) : responseFalse, type: false } } catch (e) { throw new Error(e) } From 07b40772846b3777f1a5f5488b7c6c55f529ceb5 Mon Sep 17 00:00:00 2001 From: Henry Date: Mon, 11 Mar 2024 16:24:59 +0800 Subject: [PATCH 24/25] fix json dialog title --- packages/ui/src/ui-component/dialog/FormatPromptValuesDialog.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/ui/src/ui-component/dialog/FormatPromptValuesDialog.js b/packages/ui/src/ui-component/dialog/FormatPromptValuesDialog.js index 233f0762..6afb84cc 100644 --- a/packages/ui/src/ui-component/dialog/FormatPromptValuesDialog.js +++ b/packages/ui/src/ui-component/dialog/FormatPromptValuesDialog.js @@ -28,7 +28,7 @@ const FormatPromptValuesDialog = ({ show, dialogProps, onChange, onCancel }) => aria-describedby='alert-dialog-description' > - Format Prompt Values + {dialogProps.inputParam.label ?? 'Format Prompt Values'} Date: Mon, 11 Mar 2024 14:29:11 +0530 Subject: [PATCH 25/25] Bugfix: hidden flag on additionalParams is not taken into account. --- packages/ui/src/views/canvas/CanvasNode.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/ui/src/views/canvas/CanvasNode.js b/packages/ui/src/views/canvas/CanvasNode.js index a1b55a74..d2ca6ead 100644 --- a/packages/ui/src/views/canvas/CanvasNode.js +++ b/packages/ui/src/views/canvas/CanvasNode.js @@ -50,7 +50,7 @@ const CanvasNode = ({ data }) => { const onDialogClicked = () => { const dialogProps = { data, - inputParams: data.inputParams.filter((param) => param.additionalParams), + inputParams: data.inputParams.filter((inputParam) => !inputParam.hidden).filter((param) => param.additionalParams), confirmButtonName: 'Save', cancelButtonName: 'Cancel' }