From 40f8371de955f3401f2263382de0c1c4505a9473 Mon Sep 17 00:00:00 2001 From: Henry Date: Mon, 4 Dec 2023 20:04:09 +0000 Subject: [PATCH 01/45] add llamaindex --- .../ConversationalAgent.ts | 19 +- .../ConversationalRetrievalAgent.ts | 24 +- .../agents/OpenAIAssistant/OpenAIAssistant.ts | 100 +- .../OpenAIFunctionAgent.ts | 24 +- .../ConversationChain/ConversationChain.ts | 15 +- .../ConversationalRetrievalQAChain.ts | 42 +- .../AzureChatOpenAI_LlamaIndex.ts | 135 +++ .../chatmodels/ChatAnthropic/ChatAnthropic.ts | 63 +- .../ChatAnthropic/ChatAnthropic_LlamaIndex.ts | 94 ++ .../nodes/chatmodels/ChatAnthropic/utils.ts | 61 ++ .../ChatOpenAI/ChatOpenAI_LlamaIndex.ts | 148 +++ .../AzureOpenAIEmbedding_LlamaIndex.ts | 77 ++ .../OpenAIEmbedding_LlamaIndex.ts | 68 ++ .../engine/ChatEngine/ContextChatEngine.ts | 178 ++++ .../engine/ChatEngine/SimpleChatEngine.ts | 171 ++++ .../nodes/engine/ChatEngine/chat-engine.png | Bin 0 -> 10012 bytes .../engine/ChatEngine/context-chat-engine.png | Bin 0 -> 9768 bytes .../nodes/engine/QueryEngine/QueryEngine.ts | 126 +++ .../nodes/engine/QueryEngine/query-engine.png | Bin 0 -> 12383 bytes .../nodes/memory/BufferMemory/BufferMemory.ts | 47 +- .../BufferWindowMemory/BufferWindowMemory.ts | 45 +- .../ConversationSummaryMemory.ts | 49 +- .../nodes/memory/DynamoDb/DynamoDb.ts | 219 ++++- .../memory/MongoDBMemory/MongoDBMemory.ts | 115 ++- .../memory/MotorheadMemory/MotorheadMemory.ts | 86 +- .../RedisBackedChatMemory.ts | 98 +- .../UpstashRedisBackedChatMemory.ts | 102 ++- .../nodes/memory/ZepMemory/ZepMemory.ts | 148 ++- .../CompactRefine/CompactRefine.ts | 75 ++ .../CompactRefine/compactrefine.svg | 1 + .../responsesynthesizer/Refine/Refine.ts | 75 ++ .../responsesynthesizer/Refine/refine.svg | 1 + .../SimpleResponseBuilder.ts | 35 + .../SimpleResponseBuilder/simplerb.svg | 1 + .../TreeSummarize/TreeSummarize.ts | 56 ++ .../TreeSummarize/treesummarize.svg | 1 + .../nodes/responsesynthesizer/base.ts | 11 + .../Pinecone/Pinecone_LlamaIndex.ts | 366 ++++++++ .../vectorstores/SimpleStore/SimpleStore.ts | 124 +++ .../vectorstores/SimpleStore/simplevs.svg | 6 + packages/components/package.json | 3 +- packages/components/src/Interface.ts | 5 +- packages/components/src/utils.ts | 53 +- .../chatflows/Context Chat Engine.json | 855 ++++++++++++++++++ .../chatflows/Long Term Memory.json | 20 +- .../marketplaces/chatflows/Query Engine.json | 509 +++++++++++ .../chatflows/Simple Chat Engine.json | 270 ++++++ .../marketplaces/chatflows/WebPage QnA.json | 2 +- packages/server/src/index.ts | 68 +- packages/server/src/utils/index.ts | 98 +- packages/ui/src/assets/images/llamaindex.png | Bin 0 -> 28343 bytes .../src/ui-component/dialog/NodeInfoDialog.js | 29 + packages/ui/src/utils/genericHelper.js | 1 + packages/ui/src/views/canvas/AddNodes.js | 89 +- packages/ui/src/views/canvas/CanvasNode.js | 19 +- .../marketplaces/MarketplaceCanvasNode.js | 18 + 56 files changed, 4509 insertions(+), 536 deletions(-) create mode 100644 packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI_LlamaIndex.ts create mode 100644 packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic_LlamaIndex.ts create mode 100644 packages/components/nodes/chatmodels/ChatAnthropic/utils.ts create mode 100644 packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts create mode 100644 packages/components/nodes/embeddings/AzureOpenAIEmbedding/AzureOpenAIEmbedding_LlamaIndex.ts create mode 100644 packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts create mode 100644 packages/components/nodes/engine/ChatEngine/ContextChatEngine.ts create mode 100644 packages/components/nodes/engine/ChatEngine/SimpleChatEngine.ts create mode 100644 packages/components/nodes/engine/ChatEngine/chat-engine.png create mode 100644 packages/components/nodes/engine/ChatEngine/context-chat-engine.png create mode 100644 packages/components/nodes/engine/QueryEngine/QueryEngine.ts create mode 100644 packages/components/nodes/engine/QueryEngine/query-engine.png create mode 100644 packages/components/nodes/responsesynthesizer/CompactRefine/CompactRefine.ts create mode 100644 packages/components/nodes/responsesynthesizer/CompactRefine/compactrefine.svg create mode 100644 packages/components/nodes/responsesynthesizer/Refine/Refine.ts create mode 100644 packages/components/nodes/responsesynthesizer/Refine/refine.svg create mode 100644 packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/SimpleResponseBuilder.ts create mode 100644 packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/simplerb.svg create mode 100644 packages/components/nodes/responsesynthesizer/TreeSummarize/TreeSummarize.ts create mode 100644 packages/components/nodes/responsesynthesizer/TreeSummarize/treesummarize.svg create mode 100644 packages/components/nodes/responsesynthesizer/base.ts create mode 100644 packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts create mode 100644 packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts create mode 100644 packages/components/nodes/vectorstores/SimpleStore/simplevs.svg create mode 100644 packages/server/marketplaces/chatflows/Context Chat Engine.json create mode 100644 packages/server/marketplaces/chatflows/Query Engine.json create mode 100644 packages/server/marketplaces/chatflows/Simple Chat Engine.json create mode 100644 packages/ui/src/assets/images/llamaindex.png diff --git a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts index 8a2329b5..cb5d3189 100644 --- a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts +++ b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts @@ -2,7 +2,7 @@ import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Inter import { initializeAgentExecutorWithOptions, AgentExecutor, InitializeAgentExecutorOptions } from 'langchain/agents' import { Tool } from 'langchain/tools' import { BaseChatMemory } from 'langchain/memory' -import { getBaseClasses, mapChatHistory } from '../../../src/utils' +import { getBaseClasses } from '../../../src/utils' import { BaseChatModel } from 'langchain/chat_models/base' import { flatten } from 'lodash' import { additionalCallbacks } from '../../../src/handler' @@ -90,18 +90,17 @@ class ConversationalAgent_Agents implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const executor = nodeData.instance as AgentExecutor - const memory = nodeData.inputs?.memory as BaseChatMemory + const memory = nodeData.inputs?.memory + memory.returnMessages = true // Return true for BaseChatModel - if (options && options.chatHistory) { - const chatHistoryClassName = memory.chatHistory.constructor.name - // Only replace when its In-Memory - if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') { - memory.chatHistory = mapChatHistory(options) - executor.memory = memory - } + /* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory + * LongTermMemory will automatically retrieved chatHistory from sessionId + */ + if (options && options.chatHistory && memory.isShortTermMemory) { + await memory.resumeMessages(options.chatHistory) } - ;(executor.memory as any).returnMessages = true // Return true for BaseChatModel + executor.memory = memory const callbacks = await additionalCallbacks(nodeData, options) diff --git a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts index 643c6a65..ce5b5e18 100644 --- a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts +++ b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts @@ -1,6 +1,6 @@ import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { initializeAgentExecutorWithOptions, AgentExecutor } from 'langchain/agents' -import { getBaseClasses, mapChatHistory } from '../../../src/utils' +import { getBaseClasses } from '../../../src/utils' import { flatten } from 'lodash' import { BaseChatMemory } from 'langchain/memory' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' @@ -58,8 +58,8 @@ class ConversationalRetrievalAgent_Agents implements INode { async init(nodeData: INodeData): Promise { const model = nodeData.inputs?.model - const memory = nodeData.inputs?.memory as BaseChatMemory const systemMessage = nodeData.inputs?.systemMessage as string + const memory = nodeData.inputs?.memory as BaseChatMemory let tools = nodeData.inputs?.tools tools = flatten(tools) @@ -78,19 +78,21 @@ class ConversationalRetrievalAgent_Agents implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const executor = nodeData.instance as AgentExecutor + const memory = nodeData.inputs?.memory - if (executor.memory) { - ;(executor.memory as any).memoryKey = 'chat_history' - ;(executor.memory as any).outputKey = 'output' - ;(executor.memory as any).returnMessages = true + memory.memoryKey = 'chat_history' + memory.outputKey = 'output' + memory.returnMessages = true - const chatHistoryClassName = (executor.memory as any).chatHistory.constructor.name - // Only replace when its In-Memory - if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') { - ;(executor.memory as any).chatHistory = mapChatHistory(options) - } + /* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory + * LongTermMemory will automatically retrieved chatHistory from sessionId + */ + if (options && options.chatHistory && memory.isShortTermMemory) { + await memory.resumeMessages(options.chatHistory) } + executor.memory = memory + const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) diff --git a/packages/components/nodes/agents/OpenAIAssistant/OpenAIAssistant.ts b/packages/components/nodes/agents/OpenAIAssistant/OpenAIAssistant.ts index 7f2377bd..1a48be50 100644 --- a/packages/components/nodes/agents/OpenAIAssistant/OpenAIAssistant.ts +++ b/packages/components/nodes/agents/OpenAIAssistant/OpenAIAssistant.ts @@ -81,50 +81,8 @@ class OpenAIAssistant_Agents implements INode { } } - async init(): Promise { - return null - } - - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const selectedAssistantId = nodeData.inputs?.selectedAssistant as string - const appDataSource = options.appDataSource as DataSource - const databaseEntities = options.databaseEntities as IDatabaseEntity - let sessionId = nodeData.inputs?.sessionId as string - - const assistant = await appDataSource.getRepository(databaseEntities['Assistant']).findOneBy({ - id: selectedAssistantId - }) - - if (!assistant) { - options.logger.error(`Assistant ${selectedAssistantId} not found`) - return - } - - if (!sessionId && options.chatId) { - const chatmsg = await appDataSource.getRepository(databaseEntities['ChatMessage']).findOneBy({ - chatId: options.chatId - }) - if (!chatmsg) { - options.logger.error(`Chat Message with Chat Id: ${options.chatId} not found`) - return - } - sessionId = chatmsg.sessionId - } - - const credentialData = await getCredentialData(assistant.credential ?? '', options) - const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) - if (!openAIApiKey) { - options.logger.error(`OpenAI ApiKey not found`) - return - } - - const openai = new OpenAI({ apiKey: openAIApiKey }) - options.logger.info(`Clearing OpenAI Thread ${sessionId}`) - if (sessionId) await openai.beta.threads.del(sessionId) - options.logger.info(`Successfully cleared OpenAI Thread ${sessionId}`) - } + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + return new OpenAIAssistant({ nodeData, options }) } async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { @@ -459,4 +417,58 @@ const formatToOpenAIAssistantTool = (tool: any): OpenAI.Beta.AssistantCreatePara } } +interface OpenAIAssistantInput { + nodeData: INodeData + options: ICommonObject +} + +class OpenAIAssistant { + nodeData: INodeData + options: ICommonObject = {} + + constructor(fields: OpenAIAssistantInput) { + this.nodeData = fields.nodeData + this.options = fields.options + } + + async clearChatMessages(): Promise { + const selectedAssistantId = this.nodeData.inputs?.selectedAssistant as string + const appDataSource = this.options.appDataSource as DataSource + const databaseEntities = this.options.databaseEntities as IDatabaseEntity + let sessionId = this.nodeData.inputs?.sessionId as string + + const assistant = await appDataSource.getRepository(databaseEntities['Assistant']).findOneBy({ + id: selectedAssistantId + }) + + if (!assistant) { + this.options.logger.error(`Assistant ${selectedAssistantId} not found`) + return + } + + if (!sessionId && this.options.chatId) { + const chatmsg = await appDataSource.getRepository(databaseEntities['ChatMessage']).findOneBy({ + chatId: this.options.chatId + }) + if (!chatmsg) { + this.options.logger.error(`Chat Message with Chat Id: ${this.options.chatId} not found`) + return + } + sessionId = chatmsg.sessionId + } + + const credentialData = await getCredentialData(assistant.credential ?? '', this.options) + const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, this.nodeData) + if (!openAIApiKey) { + this.options.logger.error(`OpenAI ApiKey not found`) + return + } + + const openai = new OpenAI({ apiKey: openAIApiKey }) + this.options.logger.info(`Clearing OpenAI Thread ${sessionId}`) + if (sessionId) await openai.beta.threads.del(sessionId) + this.options.logger.info(`Successfully cleared OpenAI Thread ${sessionId}`) + } +} + module.exports = { nodeClass: OpenAIAssistant_Agents } diff --git a/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts b/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts index 96ba7ea3..781292d7 100644 --- a/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts +++ b/packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts @@ -1,6 +1,6 @@ import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { initializeAgentExecutorWithOptions, AgentExecutor } from 'langchain/agents' -import { getBaseClasses, mapChatHistory } from '../../../src/utils' +import { getBaseClasses } from '../../../src/utils' import { BaseLanguageModel } from 'langchain/base_language' import { flatten } from 'lodash' import { BaseChatMemory } from 'langchain/memory' @@ -56,8 +56,8 @@ class OpenAIFunctionAgent_Agents implements INode { async init(nodeData: INodeData): Promise { const model = nodeData.inputs?.model as BaseLanguageModel - const memory = nodeData.inputs?.memory as BaseChatMemory const systemMessage = nodeData.inputs?.systemMessage as string + const memory = nodeData.inputs?.memory as BaseChatMemory let tools = nodeData.inputs?.tools tools = flatten(tools) @@ -69,25 +69,23 @@ class OpenAIFunctionAgent_Agents implements INode { prefix: systemMessage ?? `You are a helpful AI assistant.` } }) - if (memory) executor.memory = memory - + executor.memory = memory return executor } async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const executor = nodeData.instance as AgentExecutor - const memory = nodeData.inputs?.memory as BaseChatMemory + const memory = nodeData.inputs?.memory + memory.returnMessages = true // Return true for BaseChatModel - if (options && options.chatHistory) { - const chatHistoryClassName = memory.chatHistory.constructor.name - // Only replace when its In-Memory - if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') { - memory.chatHistory = mapChatHistory(options) - executor.memory = memory - } + /* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory + * LongTermMemory will automatically retrieved chatHistory from sessionId + */ + if (options && options.chatHistory && memory.isShortTermMemory) { + await memory.resumeMessages(options.chatHistory) } - ;(executor.memory as any).returnMessages = true // Return true for BaseChatModel + executor.memory = memory const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) diff --git a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts index 7887ce97..aa9b1a8a 100644 --- a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts +++ b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts @@ -1,6 +1,6 @@ import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ConversationChain } from 'langchain/chains' -import { getBaseClasses, mapChatHistory } from '../../../src/utils' +import { getBaseClasses } from '../../../src/utils' import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts' import { BufferMemory } from 'langchain/memory' import { BaseChatModel } from 'langchain/chat_models/base' @@ -105,15 +105,14 @@ class ConversationChain_Chains implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const chain = nodeData.instance as ConversationChain - const memory = nodeData.inputs?.memory as BufferMemory + const memory = nodeData.inputs?.memory memory.returnMessages = true // Return true for BaseChatModel - if (options && options.chatHistory) { - const chatHistoryClassName = memory.chatHistory.constructor.name - // Only replace when its In-Memory - if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') { - memory.chatHistory = mapChatHistory(options) - } + /* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory + * LongTermMemory will automatically retrieved chatHistory from sessionId + */ + if (options && options.chatHistory && memory.isShortTermMemory) { + await memory.resumeMessages(options.chatHistory) } chain.memory = memory diff --git a/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts b/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts index 9a8c1b18..d8fb4225 100644 --- a/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts +++ b/packages/components/nodes/chains/ConversationalRetrievalQAChain/ConversationalRetrievalQAChain.ts @@ -1,9 +1,9 @@ import { BaseLanguageModel } from 'langchain/base_language' -import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' -import { getBaseClasses, mapChatHistory } from '../../../src/utils' +import { ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses } from '../../../src/utils' import { ConversationalRetrievalQAChain, QAChainParams } from 'langchain/chains' import { BaseRetriever } from 'langchain/schema/retriever' -import { BufferMemory, BufferMemoryInput } from 'langchain/memory' +import { BufferMemoryInput, BufferMemory } from 'langchain/memory' import { PromptTemplate } from 'langchain/prompts' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { @@ -158,7 +158,7 @@ class ConversationalRetrievalQAChain_Chains implements INode { returnMessages: true } if (chainOption === 'refine') fields.outputKey = 'output_text' - obj.memory = new BufferMemory(fields) + obj.memory = new BufferMemoryExtended(fields) } const chain = ConversationalRetrievalQAChain.fromLLM(model, vectorStoreRetriever, obj) @@ -178,12 +178,11 @@ class ConversationalRetrievalQAChain_Chains implements INode { const obj = { question: input } - if (options && options.chatHistory && chain.memory) { - const chatHistoryClassName = (chain.memory as any).chatHistory.constructor.name - // Only replace when its In-Memory - if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') { - ;(chain.memory as any).chatHistory = mapChatHistory(options) - } + /* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory + * LongTermMemory will automatically retrieved chatHistory from sessionId + */ + if (options && options.chatHistory && chain.memory && (chain.memory as any).isShortTermMemory) { + await (chain.memory as any).resumeMessages(options.chatHistory) } const loggerHandler = new ConsoleCallbackHandler(options.logger) @@ -216,4 +215,27 @@ class ConversationalRetrievalQAChain_Chains implements INode { } } +class BufferMemoryExtended extends BufferMemory { + isShortTermMemory = true + + constructor(fields: BufferMemoryInput) { + super(fields) + } + + async clearChatMessages(): Promise { + await this.clear() + } + + async resumeMessages(messages: IMessage[]): Promise { + // Clear existing chatHistory to avoid duplication + if (messages.length) await this.clear() + + // Insert into chatHistory + for (const msg of messages) { + if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message) + else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message) + } + } +} + module.exports = { nodeClass: ConversationalRetrievalQAChain_Chains } diff --git a/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI_LlamaIndex.ts b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI_LlamaIndex.ts new file mode 100644 index 00000000..850c2bc5 --- /dev/null +++ b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI_LlamaIndex.ts @@ -0,0 +1,135 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { OpenAI, ALL_AVAILABLE_OPENAI_MODELS } from 'llamaindex' + +interface AzureOpenAIConfig { + apiKey?: string + endpoint?: string + apiVersion?: string + deploymentName?: string +} + +class AzureChatOpenAI_LlamaIndex_ChatModels implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + tags: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'AzureChatOpenAI' + this.name = 'azureChatOpenAI_LlamaIndex' + this.version = 1.0 + this.type = 'AzureChatOpenAI' + this.icon = 'Azure.svg' + this.category = 'Chat Models' + this.description = 'Wrapper around Azure OpenAI Chat LLM with LlamaIndex implementation' + this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(OpenAI)] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['azureOpenAIApi'] + } + this.inputs = [ + { + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [ + { + label: 'gpt-4', + name: 'gpt-4' + }, + { + label: 'gpt-4-32k', + name: 'gpt-4-32k' + }, + { + label: 'gpt-3.5-turbo', + name: 'gpt-3.5-turbo' + }, + { + label: 'gpt-3.5-turbo-16k', + name: 'gpt-3.5-turbo-16k' + } + ], + default: 'gpt-3.5-turbo-16k', + optional: true + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + step: 0.1, + default: 0.9, + optional: true + }, + { + label: 'Max Tokens', + name: 'maxTokens', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Top Probability', + name: 'topP', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + }, + { + label: 'Timeout', + name: 'timeout', + type: 'number', + step: 1, + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const modelName = nodeData.inputs?.modelName as keyof typeof ALL_AVAILABLE_OPENAI_MODELS + const temperature = nodeData.inputs?.temperature as string + const maxTokens = nodeData.inputs?.maxTokens as string + const topP = nodeData.inputs?.topP as string + const timeout = nodeData.inputs?.timeout as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData) + const azureOpenAIApiInstanceName = getCredentialParam('azureOpenAIApiInstanceName', credentialData, nodeData) + const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData) + const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData) + + const obj: Partial & { azure?: AzureOpenAIConfig } = { + temperature: parseFloat(temperature), + model: modelName, + azure: { + apiKey: azureOpenAIApiKey, + endpoint: `https://${azureOpenAIApiInstanceName}.openai.azure.com`, + apiVersion: azureOpenAIApiVersion, + deploymentName: azureOpenAIApiDeploymentName + } + } + + if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10) + if (topP) obj.topP = parseFloat(topP) + if (timeout) obj.timeout = parseInt(timeout, 10) + + const model = new OpenAI(obj) + return model + } +} + +module.exports = { nodeClass: AzureChatOpenAI_LlamaIndex_ChatModels } diff --git a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts index 358a15d1..8209c04a 100644 --- a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts +++ b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts @@ -3,6 +3,7 @@ import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../ import { AnthropicInput, ChatAnthropic } from 'langchain/chat_models/anthropic' import { BaseCache } from 'langchain/schema' import { BaseLLMParams } from 'langchain/llms/base' +import { availableModels } from './utils' class ChatAnthropic_ChatModels implements INode { label: string @@ -42,67 +43,7 @@ class ChatAnthropic_ChatModels implements INode { label: 'Model Name', name: 'modelName', type: 'options', - options: [ - { - label: 'claude-2', - name: 'claude-2', - description: 'Claude 2 latest major version, automatically get updates to the model as they are released' - }, - { - label: 'claude-2.1', - name: 'claude-2.1', - description: 'Claude 2 latest full version' - }, - { - label: 'claude-instant-1', - name: 'claude-instant-1', - description: 'Claude Instant latest major version, automatically get updates to the model as they are released' - }, - { - label: 'claude-v1', - name: 'claude-v1' - }, - { - label: 'claude-v1-100k', - name: 'claude-v1-100k' - }, - { - label: 'claude-v1.0', - name: 'claude-v1.0' - }, - { - label: 'claude-v1.2', - name: 'claude-v1.2' - }, - { - label: 'claude-v1.3', - name: 'claude-v1.3' - }, - { - label: 'claude-v1.3-100k', - name: 'claude-v1.3-100k' - }, - { - label: 'claude-instant-v1', - name: 'claude-instant-v1' - }, - { - label: 'claude-instant-v1-100k', - name: 'claude-instant-v1-100k' - }, - { - label: 'claude-instant-v1.0', - name: 'claude-instant-v1.0' - }, - { - label: 'claude-instant-v1.1', - name: 'claude-instant-v1.1' - }, - { - label: 'claude-instant-v1.1-100k', - name: 'claude-instant-v1.1-100k' - } - ], + options: [...availableModels], default: 'claude-2', optional: true }, diff --git a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic_LlamaIndex.ts b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic_LlamaIndex.ts new file mode 100644 index 00000000..b989ef76 --- /dev/null +++ b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic_LlamaIndex.ts @@ -0,0 +1,94 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { Anthropic } from 'llamaindex' +import { availableModels } from './utils' + +class ChatAnthropic_LlamaIndex_ChatModels implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + tags: string[] + baseClasses: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'ChatAnthropic' + this.name = 'chatAnthropic_LlamaIndex' + this.version = 1.0 + this.type = 'ChatAnthropic' + this.icon = 'chatAnthropic.png' + this.category = 'Chat Models' + this.description = 'Wrapper around ChatAnthropic LLM with LlamaIndex implementation' + this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(Anthropic)] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['anthropicApi'] + } + this.inputs = [ + { + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [...availableModels], + default: 'claude-2', + optional: true + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + step: 0.1, + default: 0.9, + optional: true + }, + { + label: 'Max Tokens', + name: 'maxTokensToSample', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Top P', + name: 'topP', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const temperature = nodeData.inputs?.temperature as string + const modelName = nodeData.inputs?.modelName as string + const maxTokensToSample = nodeData.inputs?.maxTokensToSample as string + const topP = nodeData.inputs?.topP as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const anthropicApiKey = getCredentialParam('anthropicApiKey', credentialData, nodeData) + + const obj: Partial = { + temperature: parseFloat(temperature), + model: modelName, + apiKey: anthropicApiKey + } + + if (maxTokensToSample) obj.maxTokens = parseInt(maxTokensToSample, 10) + if (topP) obj.topP = parseFloat(topP) + + const model = new Anthropic(obj) + return model + } +} + +module.exports = { nodeClass: ChatAnthropic_LlamaIndex_ChatModels } diff --git a/packages/components/nodes/chatmodels/ChatAnthropic/utils.ts b/packages/components/nodes/chatmodels/ChatAnthropic/utils.ts new file mode 100644 index 00000000..209996a6 --- /dev/null +++ b/packages/components/nodes/chatmodels/ChatAnthropic/utils.ts @@ -0,0 +1,61 @@ +export const availableModels = [ + { + label: 'claude-2', + name: 'claude-2', + description: 'Claude 2 latest major version, automatically get updates to the model as they are released' + }, + { + label: 'claude-2.1', + name: 'claude-2.1', + description: 'Claude 2 latest full version' + }, + { + label: 'claude-instant-1', + name: 'claude-instant-1', + description: 'Claude Instant latest major version, automatically get updates to the model as they are released' + }, + { + label: 'claude-v1', + name: 'claude-v1' + }, + { + label: 'claude-v1-100k', + name: 'claude-v1-100k' + }, + { + label: 'claude-v1.0', + name: 'claude-v1.0' + }, + { + label: 'claude-v1.2', + name: 'claude-v1.2' + }, + { + label: 'claude-v1.3', + name: 'claude-v1.3' + }, + { + label: 'claude-v1.3-100k', + name: 'claude-v1.3-100k' + }, + { + label: 'claude-instant-v1', + name: 'claude-instant-v1' + }, + { + label: 'claude-instant-v1-100k', + name: 'claude-instant-v1-100k' + }, + { + label: 'claude-instant-v1.0', + name: 'claude-instant-v1.0' + }, + { + label: 'claude-instant-v1.1', + name: 'claude-instant-v1.1' + }, + { + label: 'claude-instant-v1.1-100k', + name: 'claude-instant-v1.1-100k' + } +] diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts new file mode 100644 index 00000000..147bfe3f --- /dev/null +++ b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts @@ -0,0 +1,148 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { OpenAI, ALL_AVAILABLE_OPENAI_MODELS } from 'llamaindex' + +class ChatOpenAI_LlamaIndex_LLMs implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + tags: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'ChatOpenAI' + this.name = 'chatOpenAI_LlamaIndex' + this.version = 1.0 + this.type = 'ChatOpenAI' + this.icon = 'openai.png' + this.category = 'Chat Models' + this.description = 'Wrapper around OpenAI Chat LLM with LlamaIndex implementation' + this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(OpenAI)] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['openAIApi'] + } + this.inputs = [ + { + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [ + { + label: 'gpt-4', + name: 'gpt-4' + }, + { + label: 'gpt-4-1106-preview', + name: 'gpt-4-1106-preview' + }, + { + label: 'gpt-4-vision-preview', + name: 'gpt-4-vision-preview' + }, + { + label: 'gpt-4-0613', + name: 'gpt-4-0613' + }, + { + label: 'gpt-4-32k', + name: 'gpt-4-32k' + }, + { + label: 'gpt-4-32k-0613', + name: 'gpt-4-32k-0613' + }, + { + label: 'gpt-3.5-turbo', + name: 'gpt-3.5-turbo' + }, + { + label: 'gpt-3.5-turbo-1106', + name: 'gpt-3.5-turbo-1106' + }, + { + label: 'gpt-3.5-turbo-0613', + name: 'gpt-3.5-turbo-0613' + }, + { + label: 'gpt-3.5-turbo-16k', + name: 'gpt-3.5-turbo-16k' + }, + { + label: 'gpt-3.5-turbo-16k-0613', + name: 'gpt-3.5-turbo-16k-0613' + } + ], + default: 'gpt-3.5-turbo', + optional: true + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + step: 0.1, + default: 0.9, + optional: true + }, + { + label: 'Max Tokens', + name: 'maxTokens', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Top Probability', + name: 'topP', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + }, + { + label: 'Timeout', + name: 'timeout', + type: 'number', + step: 1, + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const temperature = nodeData.inputs?.temperature as string + const modelName = nodeData.inputs?.modelName as keyof typeof ALL_AVAILABLE_OPENAI_MODELS + const maxTokens = nodeData.inputs?.maxTokens as string + const topP = nodeData.inputs?.topP as string + const timeout = nodeData.inputs?.timeout as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) + + const obj: Partial = { + temperature: parseFloat(temperature), + model: modelName, + apiKey: openAIApiKey + } + + if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10) + if (topP) obj.topP = parseFloat(topP) + if (timeout) obj.timeout = parseInt(timeout, 10) + + const model = new OpenAI(obj) + return model + } +} + +module.exports = { nodeClass: ChatOpenAI_LlamaIndex_LLMs } diff --git a/packages/components/nodes/embeddings/AzureOpenAIEmbedding/AzureOpenAIEmbedding_LlamaIndex.ts b/packages/components/nodes/embeddings/AzureOpenAIEmbedding/AzureOpenAIEmbedding_LlamaIndex.ts new file mode 100644 index 00000000..38e45402 --- /dev/null +++ b/packages/components/nodes/embeddings/AzureOpenAIEmbedding/AzureOpenAIEmbedding_LlamaIndex.ts @@ -0,0 +1,77 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { OpenAIEmbedding } from 'llamaindex' + +interface AzureOpenAIConfig { + apiKey?: string + endpoint?: string + apiVersion?: string + deploymentName?: string +} + +class AzureOpenAIEmbedding_LlamaIndex_Embeddings implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + credential: INodeParams + tags: string[] + inputs: INodeParams[] + + constructor() { + this.label = 'Azure OpenAI Embeddings' + this.name = 'azureOpenAIEmbeddingsLlamaIndex' + this.version = 1.0 + this.type = 'AzureOpenAIEmbeddings' + this.icon = 'Azure.svg' + this.category = 'Embeddings' + this.description = 'Azure OpenAI API embeddings with LlamaIndex implementation' + this.baseClasses = [this.type, 'BaseEmbedding_LlamaIndex', ...getBaseClasses(OpenAIEmbedding)] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['azureOpenAIApi'] + } + this.inputs = [ + { + label: 'Timeout', + name: 'timeout', + type: 'number', + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const timeout = nodeData.inputs?.timeout as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData) + const azureOpenAIApiInstanceName = getCredentialParam('azureOpenAIApiInstanceName', credentialData, nodeData) + const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData) + const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData) + + const obj: Partial & { azure?: AzureOpenAIConfig } = { + azure: { + apiKey: azureOpenAIApiKey, + endpoint: `https://${azureOpenAIApiInstanceName}.openai.azure.com`, + apiVersion: azureOpenAIApiVersion, + deploymentName: azureOpenAIApiDeploymentName + } + } + + if (timeout) obj.timeout = parseInt(timeout, 10) + + const model = new OpenAIEmbedding(obj) + return model + } +} + +module.exports = { nodeClass: AzureOpenAIEmbedding_LlamaIndex_Embeddings } diff --git a/packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts b/packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts new file mode 100644 index 00000000..4ff780e3 --- /dev/null +++ b/packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts @@ -0,0 +1,68 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { OpenAIEmbedding } from 'llamaindex' + +class OpenAIEmbedding_LlamaIndex_Embeddings implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + tags: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'OpenAI Embedding' + this.name = 'openAIEmbedding_LlamaIndex' + this.version = 1.0 + this.type = 'OpenAIEmbedding' + this.icon = 'openai.png' + this.category = 'Embeddings' + this.description = 'OpenAI Embedding with LlamaIndex implementation' + this.baseClasses = [this.type, 'BaseEmbedding_LlamaIndex', ...getBaseClasses(OpenAIEmbedding)] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['openAIApi'] + } + this.inputs = [ + { + label: 'Timeout', + name: 'timeout', + type: 'number', + optional: true, + additionalParams: true + }, + { + label: 'BasePath', + name: 'basepath', + type: 'string', + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const timeout = nodeData.inputs?.timeout as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) + + const obj: Partial = { + apiKey: openAIApiKey + } + if (timeout) obj.timeout = parseInt(timeout, 10) + + const model = new OpenAIEmbedding(obj) + return model + } +} + +module.exports = { nodeClass: OpenAIEmbedding_LlamaIndex_Embeddings } diff --git a/packages/components/nodes/engine/ChatEngine/ContextChatEngine.ts b/packages/components/nodes/engine/ChatEngine/ContextChatEngine.ts new file mode 100644 index 00000000..dd77601a --- /dev/null +++ b/packages/components/nodes/engine/ChatEngine/ContextChatEngine.ts @@ -0,0 +1,178 @@ +import { ICommonObject, IMessage, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { ContextChatEngine, ChatMessage } from 'llamaindex' + +class ContextChatEngine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Context Chat Engine' + this.name = 'contextChatEngine' + this.version = 1.0 + this.type = 'ContextChatEngine' + this.icon = 'context-chat-engine.png' + this.category = 'Engine' + this.description = 'Answer question based on retrieved documents (context) with built-in memory to remember conversation' + this.baseClasses = [this.type] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel_LlamaIndex' + }, + { + label: 'Vector Store Retriever', + name: 'vectorStoreRetriever', + type: 'VectorIndexRetriever' + }, + { + label: 'Memory', + name: 'memory', + type: 'BaseChatMemory' + }, + { + label: 'System Message', + name: 'systemMessagePrompt', + type: 'string', + rows: 4, + optional: true, + placeholder: + 'I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". You will provide me with answers from the given info. If the answer is not included, say exactly "Hmm, I am not sure." and stop after that. Refuse to answer any question not about the info. Never break character.' + } + ] + } + + async init(nodeData: INodeData): Promise { + const model = nodeData.inputs?.model + const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever + const memory = nodeData.inputs?.memory + + const chatEngine = new ContextChatEngine({ chatModel: model, retriever: vectorStoreRetriever }) + ;(chatEngine as any).memory = memory + return chatEngine + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const chatEngine = nodeData.instance as ContextChatEngine + const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string + const memory = nodeData.inputs?.memory + + const chatHistory = [] as ChatMessage[] + + let sessionId = '' + if (memory) { + if (memory.isSessionIdUsingChatMessageId) sessionId = options.chatId + else sessionId = nodeData.inputs?.sessionId + } + + if (systemMessagePrompt) { + chatHistory.push({ + content: systemMessagePrompt, + role: 'user' + }) + } + + /* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory + * LongTermMemory will automatically retrieved chatHistory from sessionId + */ + if (options && options.chatHistory && memory.isShortTermMemory) { + await memory.resumeMessages(options.chatHistory) + } + + const msgs: IMessage[] = await memory.getChatMessages(sessionId) + for (const message of msgs) { + if (message.type === 'apiMessage') { + chatHistory.push({ + content: message.message, + role: 'assistant' + }) + } else if (message.type === 'userMessage') { + chatHistory.push({ + content: message.message, + role: 'user' + }) + } + } + + if (options.socketIO && options.socketIOClientId) { + let response = '' + const stream = await chatEngine.chat(input, chatHistory, true) + let isStart = true + const onNextPromise = () => { + return new Promise((resolve, reject) => { + const onNext = async () => { + try { + const { value, done } = await stream.next() + if (!done) { + if (isStart) { + options.socketIO.to(options.socketIOClientId).emit('start') + isStart = false + } + options.socketIO.to(options.socketIOClientId).emit('token', value) + response += value + onNext() + } else { + resolve(response) + } + } catch (error) { + reject(error) + } + } + onNext() + }) + } + + try { + const result = await onNextPromise() + if (memory) { + await memory.addChatMessages( + [ + { + text: input, + type: 'userMessage' + }, + { + text: result, + type: 'apiMessage' + } + ], + sessionId + ) + } + return result as string + } catch (error) { + throw new Error(error) + } + } else { + const response = await chatEngine.chat(input, chatHistory) + if (memory) { + await memory.addChatMessages( + [ + { + text: input, + type: 'userMessage' + }, + { + text: response?.response, + type: 'apiMessage' + } + ], + sessionId + ) + } + return response?.response + } + } +} + +module.exports = { nodeClass: ContextChatEngine_LlamaIndex } diff --git a/packages/components/nodes/engine/ChatEngine/SimpleChatEngine.ts b/packages/components/nodes/engine/ChatEngine/SimpleChatEngine.ts new file mode 100644 index 00000000..9ae9c2f1 --- /dev/null +++ b/packages/components/nodes/engine/ChatEngine/SimpleChatEngine.ts @@ -0,0 +1,171 @@ +import { ICommonObject, IMessage, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { ChatMessage, SimpleChatEngine } from 'llamaindex' + +class SimpleChatEngine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Simple Chat Engine' + this.name = 'simpleChatEngine' + this.version = 1.0 + this.type = 'SimpleChatEngine' + this.icon = 'chat-engine.png' + this.category = 'Engine' + this.description = 'Simple engine to handle back and forth conversations' + this.baseClasses = [this.type] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel_LlamaIndex' + }, + { + label: 'Memory', + name: 'memory', + type: 'BaseChatMemory' + }, + { + label: 'System Message', + name: 'systemMessagePrompt', + type: 'string', + rows: 4, + optional: true, + placeholder: 'You are a helpful assistant' + } + ] + } + + async init(nodeData: INodeData): Promise { + const model = nodeData.inputs?.model + const memory = nodeData.inputs?.memory + + const chatEngine = new SimpleChatEngine({ llm: model }) + ;(chatEngine as any).memory = memory + return chatEngine + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const chatEngine = nodeData.instance as SimpleChatEngine + const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string + const memory = nodeData.inputs?.memory + + const chatHistory = [] as ChatMessage[] + + let sessionId = '' + if (memory) { + if (memory.isSessionIdUsingChatMessageId) sessionId = options.chatId + else sessionId = nodeData.inputs?.sessionId + } + + if (systemMessagePrompt) { + chatHistory.push({ + content: systemMessagePrompt, + role: 'user' + }) + } + + /* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory + * LongTermMemory will automatically retrieved chatHistory from sessionId + */ + if (options && options.chatHistory && memory.isShortTermMemory) { + await memory.resumeMessages(options.chatHistory) + } + + const msgs: IMessage[] = await memory.getChatMessages(sessionId) + for (const message of msgs) { + if (message.type === 'apiMessage') { + chatHistory.push({ + content: message.message, + role: 'assistant' + }) + } else if (message.type === 'userMessage') { + chatHistory.push({ + content: message.message, + role: 'user' + }) + } + } + + if (options.socketIO && options.socketIOClientId) { + let response = '' + const stream = await chatEngine.chat(input, chatHistory, true) + let isStart = true + const onNextPromise = () => { + return new Promise((resolve, reject) => { + const onNext = async () => { + try { + const { value, done } = await stream.next() + if (!done) { + if (isStart) { + options.socketIO.to(options.socketIOClientId).emit('start') + isStart = false + } + options.socketIO.to(options.socketIOClientId).emit('token', value) + response += value + onNext() + } else { + resolve(response) + } + } catch (error) { + reject(error) + } + } + onNext() + }) + } + + try { + const result = await onNextPromise() + if (memory) { + await memory.addChatMessages( + [ + { + text: input, + type: 'userMessage' + }, + { + text: result, + type: 'apiMessage' + } + ], + sessionId + ) + } + return result as string + } catch (error) { + throw new Error(error) + } + } else { + const response = await chatEngine.chat(input, chatHistory) + if (memory) { + await memory.addChatMessages( + [ + { + text: input, + type: 'userMessage' + }, + { + text: response?.response, + type: 'apiMessage' + } + ], + sessionId + ) + } + return response?.response + } + } +} + +module.exports = { nodeClass: SimpleChatEngine_LlamaIndex } diff --git a/packages/components/nodes/engine/ChatEngine/chat-engine.png b/packages/components/nodes/engine/ChatEngine/chat-engine.png new file mode 100644 index 0000000000000000000000000000000000000000..d614b8887ee270317dd5e98041da40cca59d9043 GIT binary patch literal 10012 zcmeHtXH=6*_wSPgLXje((whc6pcJKqBIO_@0RcstQWX^e=^z~vk*3l@QRxSw2uM?! zp~cEk4j=)fLj;v35TrvO{|WE^-gVz~@0a(({c_h`3)alDpFMl;nb|YXZ+?@+3+Be$ zoWh(C1aV_A29^*62UR%4js(SONZ&RnID9de10aa!9rGX7?(_UQ5DEnv+67+nxfvLI z)&B+*92~6R?(G@ide!%af{(vj&f-~N2$F=b27g+GXy*a)RlJA+GiEf_8uxo@bv<&-K<3FA%!dzHsGWm9^wjb+{ zm&Pw1W5simrHRUHmNou7kST5eDoH!a_)xQe5l(H#x*_+(8wEGbZfjX?_;W$V@FiAr zSQhKXG55_ywpJFpysx9LqNSTZC!~nbVQI!UZxgJN%ke4j`C^f@;jDN1S`D-U!LbBX zCrNnou&;aIaZCGS2yzd3h+Ko$#!&vyMeYeUs*(6g-IZawE0@V7Scy{t(O3Gj|e)z&4D$SeTi{TH1e2 zk9sn6cr3xnSO7+4p&p}Zh|y1B=iWvV57H`TjEL@aTSv%Femy4vD_t}%hM9!RyK zyfXQU(7HB~CNuC=2#3Harh6}^6Qy=wvu|z$8>5ez6(vEwf6%lDDm_4~Xkm|h@u>8o1&`(r`8ShI!LDgBSSrxm&bY^>gq#8e*a^LrGGszL6iRYVa~D#GysQ|0$E@qW?i&JZ1;ut%_Jk0QqguE^bS zC;B1CP1ucBCWJW+{Hokl**L$7gLaA33Hc>AAzyC@W1DXIl$$;>h)- zq3xT2$K`DMg>b553)-GF)?=85s!q+GF|vAA<_UFu_~1+!RywEoPA*pP2;_7?bj12N z!K(04ssd+*CRz@PAih#Sl{$3Qah{$~h1y}db1(U@a)ZmnhH{gw!orhB{m+l199{fa+|~P4PDPG1x1F+qg3xi^}wZ`5FcC%XKWeK}XY_ENjjd z3Si}icF88xG!RbQST~e=??D^N(9S5#vAQp@<<8DduRiJ4I1i<_PA#?4tq~u1)Ac0n zmWwlC?y^DHy+s(&ZMF*b6=tlw*U{;JN7EMz%y+jEFIi}XI|M_~NYO#sAm;Ok93^vA zB(WfVuJ0YlAj^_2$vtX8y&8g%7|0z%^p}U}K)0o$oU-f3$2Om1j^%`O%ATs=z+m6wb(7LTM_Sf&u7<{uHaaAx5bICSF`q}MEhoZk& zSdQTM?S(k7=K>D7cO~RE$dtYVkM6u(s6CuSgE+$6+f5L~-IusJe;((C?tc<}Y!9~) zBUNhV#-|&dWX~9+2x2V!w3gZE>T4hBk$RSV3NlgRA|>8l?mJIUTsES=(J}XuavVB| z?Y&}7@=_1eDK8Tvzq%rf;J{dPg6jRCKR;Sr}uiKkCNP6x;C#d*a=Ie3xy zHk)Q-P~2}>$$33y55HBhtQ+KE(J!V#aM|#_IccwBj*TBA%s)DgKLSJ|j zocr8b`8#9o(|i$A_PPM31OW+o95euH3Wklz*cm14jK0XvfgLqQU;I`%gG6y#E(UZE zm{CQtL-zZ|cnyIusL)1UQv1#sw*yRLc3shOU(W|32&mFVPhHf0LkIL++4~_`Pnxi;-Ze z9{c&ukF~lnX2COOU-b!4^;vQ5TUM>zmhz?CbDg+9gP$2;D>V7;UbCV(*jlf_3^zS7 zf1*qdoxko=A7wzYK68E*T3tXWeJ*{UG9X*wb0d5|$WMkTw$?vT@l&1&Yl6N!v=__4 zh5<$BRtpK-#Vo`IpRSYr%(VQK{SgZ+YA-MIPwQA#{XWb^H+FgRc!$W>BQo}gS1)rV zQKI~{X2TCJubBNH^JZ$w${fB2MsT;Z9uQR;_bycob1g69wMiQ|r&Nz%Yao5-^k>f8 zGXZTI>&%$Z~j#Zs36hJJccwd7vofiyej4WHi0?$Z^5cB@z>^L`QPAluI zbE?1On}$aZKP{KHFJH<@mRd0V5USy;N*jH~a6#xrdKQZaUUdvaxbCM^-?QA5DuSp< zSjdIq6n-_RFW4#&aeVa~TIpY@`gvdTaleh&vU!u(*jY6FMt@)t^?ufkA ztT)44&|1YqZXc`CXvrK1;yD;0Y4`^nSj1 zTl|2NYef(DL)wM^)tll|Hl$-$MmL8Nh}9Ov z`h9dHH#hvB*9{sP8;=8ggj05uqT>mb9FI5O*duY<-pqhRibMqWXRJi+M?}OdM-`-_ zIU9W)bf*M3NDc4_@LxPVJ^u6isB2e?``N{JdzY<~{wu+oc$b+XwkazUx0K0V-?v&h7KXy8{SWKtUJI|lMMHR7Vqw+1j1UbjxRVal|BolQrltF2La zW$wWZawJ$0H(d-nMuw5hsrAw9nX>$V!hP|W%3s_O5{JUyM())Mlp3dLZb2>jpoiAUCLPNa72 zCx5zH_uD?et=x)^==!UmE!28HwvYVUZEHb$^K+Xovr9WRnnk8W%F9}{V((Puw>;W& z^i}2F?`bXAN|sif?b{#q-(G;6YD>ff$Ly@R-}qA$lPO8=^J4)b%J(h_6Hq}{wP=?qLEH?%(T>`$ z3)|=(_xCKPWKCA-Kj{NLZt#>t(Yfa>gMX1=J)N7d)WgzYQ2~4NZRt3#_Yx=5`@T_a z!IJFvlcmLzU3-$wyNC%wC~Voh2st_#>{;{V5lA|-bIZbvBp6~308=Bkofnj9b!J*u z)m_8)-7n{WdVaf~OdG1M2WT|_9aTdic$gl6_|pLz6&ial$fFEl2z|M)<5_0ue{DM1Xq_CxCpJTSXl;ZU!m$3wT78h6aQ0 z@72V8)7po}^kSuTIPEMyMsVo3iC#Wph$_F;l^l5_tbSDeY^hOF!1C)41C=!kA&(xkb}Zw2C>tIOfCNcd;fx* z{s3ii7O?jsk!ep%3=Cpt5BIIXn}0JTwf1pltpW+p`I_PX3?R+LWbdN4J>;@|Q2U0Q z_CL`p)L?+VconHGPvXGF_zQ38vPo~ezO0Q#j0%JK*3sMB_+>H|FHO{t4sN^et@ln1 zF)9dN*udNj&{FOP$Js;=f=6%Wt9mHps4wFeqYIpF_{fpq=r>ofi<`&08KW()o^bcx z3kp15cEc?YQTz|Y3l{;KN%=r;*YNWk5U&{Cz3 zbl97v0i1W_AH#k2lbN+Ll$blcy&{#B5uc7TPAq8qSg}#3%r$4X>StxGx9zP>O2c}H z%p&Bm*5fSnxXn;cg650Nvb6bqTc~(= z@0fvvsVf`q!I@h(vq_HwZ&aZ=ha@&RmsfgK*0zp*Ja@V%Iw~s}M zmtErRO&K|?nm;q;etlt&R!{}=@eW*Z{RKsF-i9h!y(ME~D#*w2k=Qb~)79wN*}`~) zaoYG?Lma(eK{vSy@_FE`pEM*iM*gFT4cCy$qxT^+*m0zFu18kG8Ic7bT_-bcR6 zw(;w$@WSKRlDy0YdE!J|+u<=x$2c~O?Vj6HmFrsAfTiwYk&DG)K}d+INws%G7bVKG zwPLnALlP#!72#cMdoW;W0`0eK+x|10mDYhPC*3xvbKq&tTTS>bWoHsb?q#w*n!hcn z#yw`w{Vo^49xFmn1aF9;apd#XCAd!^V?s)SRY7`eSUS@M;j16W?_u~A=F%7WDthR$ zfXCZ%oCP>%^;%Lz8aK;NF%R`~ToCb+WqVLhg76B@cIb)r(aAO@TXasBB{{`gzv9z2 zm!36bhoH0A4gRU<@o^V?AT|zf#Fp$@7q#Py@dbwHqOaG*pfd5iQ2AN8fVK8B)b8S( z$2g!+767_ZSf&g?-B2Z)P5tPD>=kxj!L2si(FwYBSvc-Ejtz`Rc~~SEkpW^!RjKY# zCZ;U9+qPAHJ^KEGw!29INSDQlCb*CFY`PY%|N9nPN^$ejr%K4l^kZLD8KEZN4LLL%OZHY0PTTk zyK#*GK{PQczn4mp4cI+g1Of!;uQ$uBB-p+hqoq2Zd_RfPIDk#5j0k<#@;sCwu-P&r zLqkW;no#?R;SB5K7j5vzG7xl@tx;IL_Kj0tb0@j=jW!tDK!$$UP=o`5Av0=q`I zlNuAo-7X8v=yLj|-FiWi%?ls3)y^VeDd*y5*? zcvH+0&jp?A(ROQkr}NVh&gRU(j@pbhL!6l+k0MK}VVFnZ?=RL6NZZ*(m{Us0?%KT~ zuqjB98?ZjbmzovexCd<}!3SoGW^#%|F50~0fy^8b)lK#oUXJak1JU)rr-Jw7#xltg z)XA!R_v4UWm=2kPTribh75C2$-TGx7xddaexuO%jW-d??KLB@zZ0^urty+04xPtCq z2i;FzUzjnlr?%(Ya)$+@O@0jS`OCt~~W-aWY<9%?kq7!n|CxjVW$IO?Eo1{=fO z5Q-eX{$ep(aGjVdIkbCl)0H|6Cg#ygo(GNvu)+QyqV08Yiz{{+iXbo*+w|6#XT=4g zOM?`psHc79@B4I47ag!_C0kr>h59OwaePp4H6ZKij)yIlH|adrU*&o6YF|aWXT>>h?N3*r z(!1yasAdn_RZ;bY?ZVTBOJw&u`;8RC#B*;(eqkvJrV_OV0R>6o(-tr%>D!;a8!7kQbe;3B9d1|-HU9rqBnr=^1&3FVe1XH5-rV{RC5hnW(^+RTU z?Wzn3=5E4dSz};&k>7z^RUxPk4Fr7<0=NAh*c847U2tv$&SJQrIRvvik70VR0Ub>| zDhOpLGX(&=z4m7cgrJP$Ou-W%@B;!KpurCWa|uA;%M<_&rj0pZqsM_REsKNcr7=sO zaFJQ2!u0r}vo4k}1fA z>6tM##0O83VB9994;cDXK^OUIq&nP=1iEsx1>s0Sh;j%!UXG;n6F!Aq6$*4 zvaI$)w1%(5@>DCU4`0T%T-e_8e>HecIU0@G*fLJpamX9_U2o!HygGW(x=c2Vt=>^u zVXyqwPbpoq{yS8vO@p1KNYQk1+K&#Lib-kry6%JKO0qLovo3&)l=Rq=#;mgU6LhH- zsTH2d_UMgaOxSEHaW5esZItB()t-2|_mkK6RA_7Av>qh~DP78kgcd5le~_S@j0~t# z0EFnftI(H96K6Us`M$Rc3jqHfV44FQ=;`kO`SC5ZhZ`fzgTRqLMB4&LEGRWSj#>i> z3VRk=D|x&-1^^PP11@7%hJ+V(pclwZ+b?9L4yI!H)>vGqe7YUa_Kiyzf3N8*s92gk_w9O%X!kq*uL|B~8s z0}tvXJV)cEZO87z`?Qu|?H9A%f=Ppf}|p5u3VS>lV|{6ASA^(fbMF!Tr|@2!q=rt5Hp;vKO$pS@0v z^iAHG#x+4&)}6RNDUZ$J;?hw%f(_#XT}tc}krC3_BdgX9^qfEzUcQ~Q11)Vm| zDWc7L-ZH-K%%Say7F@tBMmCONzPFLHeo?}EYN$?}waeoM3e`CsLwx1iQ^3H^t%Hk+ z@W5lfyq!z$lgOXMyp7!zj_t*0nmL{)9lC58zFwj$KwofTUT>(p<)rw`*_XDh)WaD3 zKHqMQgeNcx&FweRyO5jJlBJg|5PK{kxXtdZ+Muwh;-Z>gymZ#ttIFw%wf1=4^tbnh z8}6PhSX9T3VKr~^c!b<0Qbf5TSOyK~(}MtHI{9~| zu2SoVwmHDnA0ueu+HxzhS%Z{_{3Y2^>j9VtUxcjaZB}uPU#6csuTd)c%5m@T#TcJ8 z`*%u}b=#b?p{ig975*VlFz$OP0Vwdxk-tly(mz5)_sW4 zlGi^mY_1uXbtyR#zcR~ERXJDw%6LHqYY~K6mT)L%)CsA_ehU|vHMH)T^3r~N2T~NS znLu?;<-Cu^Rb1_x28c-{+mOuqKBcN|nsj^5@q~NEBR&SKKJLsWNL^6dxV1(ezMPf1D@F zQEyTLRx(S%F)0pFzI|=qg&_AcZAhKNWA~7yif-_(w;yzaqfI{SNsHWuAY>l%w*dQg zp4C8&aCLBp%1y~6Cz9)DjMiPGX62vijOaSsLRHd^Eb0W9UUnA%@dN7*ybWS9JlWNg z1JMtE2cl_{Lv1#{PeSgRIk;Y0W|c?#mERH9sKbC5s1hTh6_e{W!I!&iN{f7=DCdhy z1#7FJlLgmVITEaMe(L;YpKcpGb?!1I>~Tj2=!eUX)gZDWe%cuQS4ksbwU*L;Vk?CU zvW)D8RU&zkfa^?C_Cffm1B-uJtSgdD@!=@0n_C(0>~q zDZOQJ*f`R`cAEDzc(W}|Gv|VNSUBekKEHE&AHkO#RfJ~@FPl&gQJN4$HG^iY!r!z0 z9{o1d3Zi|gCt#fxLZxQkRv?N%e}cr|T#(atm;CAV8aM`%Tf>p0?@Fd<*rW&U*4T#w)9^;YY-ov~9)TA%6fXs_`i{OYRw%^)Opa9J*A2bATPXu3YdlWpYb3F| zx@240?1mIgj;c?cNGRxg5oI%`4C~q5cq%TF_a^biXj3k1-UZ4bED=lrZ2;*>p7O4KR$eEcp&=*N(Bw#k}I3vWs0#3X8`k9#vfXrCo z1TV4()(uYlB?O^LHR7PPIAACH2Kxm?z_XTy#z8l_L7)ptEPr?cvmPl@dP9J}v~1#n zeFkftFzm*D;SOLz`?$~HUS5MziHm~p3~oSk26Y@PJjFB-9tXvOF}CD7htmf>WRs=z zu7U=K!TW>2;(cIX8a!vPonvyN_M8y{gYISy;1(uZiVQrDGoqalQ$8Cy)hHO-dGX445(tkNN(6b-D$ydYJdSm z$(|g+p_xAedh%thhkG>%3~OzZP8?1G@TXYHg;Ub8xS8IaslIFgK?uZSYu~bQ&fkQ* zums^p;q05f;W-Le%OU?=P3l!D?r4KOfjIdiM24DP;a!8m!hdtZHsIa2g9D_olw8qP zoAbX zFK)eUMOVabutyC`Af9eH(wP2JfYF3UGtUGlcl~Y_9RxjY`vAj+T800S=Z!~Uvxi&B z(X8HXzhfCj)G2aQUa(^-D}cQOQ)pok?z#E{p+=?ZrjeX|gfAo_t d0xFTg8_o4jX?{4y4E(YU!Wx#dcnGqY#*p0;Q2J-?Y-4tAE@9AX>*0B{ot zW+VVW!bv3X7Y6<;U+&w0KkOj{=P&@^dA;{XybCTl4--Wx=1!E8!9JA8vzNSq$jC@F z-yr`mud^ZEYQdK-G&|~eId6XC$~}ow`MWv$hX{N1~f%!u<8HI3n$T>(?N{KVo{f9hDAdVt4h8>m-Oe ztA{lFm#XYBYqB@|j{*0%0yu5ZM-WFOwl-J5(9l4lU;PEJ2{fR7r5b==L051GYNcEC zX#%&=b(qxyGz>pC>S$~Axl4(W3Yv@13$nP>TBvY=zz}*}q&k?)OVf7gMvftjv4#S} z$=b@Nq^?FH?10sh%Wg#F*I|%7m`=b8Sf!je1q_L-2eWhpQxu7(a(J(D>)u%|dFUA3 zk+JnV8)Zyvr26>1_ac%qyg>=79AOPJEb#TtbBcEXVn}>lzdGoa7l9<<{?`HY$HX8r zF*{Foldwb6`n%?rm$Ea8w+R;z9>ODu+If%E5#`)&rcniBO`G!L0kTU%WI@*cJa-43 zlm8rzvS(RB$sAkvX?>VB^p5Bxk3))pAi#s{dL_Dr^VMAgswB3`1S%7wfl+K;3O0@? z_WmX$NEdZ@9MXaTcP*n@+ivHw%QFH!xNPKXlN4^ujPHlop<>RN=*m(R>2V`BP{kI@ZFBIU zF!4jLEuC*EfFk$78b9{EIF)BdCA%*fH-3?u9K*T*i&f9ZfX2*(u-yI8X>_+h_F>*( zk&(n-z0*IUA4uDhWr)h}2EmG=TnA(Q1}tlw?tAE6)-Wt!;WXKR>_L(9YkTaD6yeOn zmOCF+r)4ztgAH_1)MJwB{C=ocH}Em;)U+-+eoCP)R?Z#igsPWtdiux%M|}Slnthb( z(3C=9XMLsnpPa&s zu9F}@Xcj#|$~=wgQizqRzZg^9r+Rwzh~mz1W5>uJT-RymT6iztS>OH!EWM+>ICbCP z{yOiU9_+{`ciIeDisk%$Eg40ZkAF-Ww%WpO*+U<^gG+GXXL*@Oqu$MTKR%2)jQesI zJ{aO%L)Fg|(xalZes2D4Z8c`)J+1mCwt5oPl+l_=(~k0#jWE%}I#n)LI7YOZ&tW&; z{KB80MIA0!w|-ou7Ne4x-*ju!bd39u>yCmIsXIb$l>`wV2S0rF4EgldYR_I}jxa>% zW(TLrkg@mq={ZAWm5cmCpKFHkuBbdTGnk ze935mL^+nQSVb4p1O$qCheUPxbb%q=y8QO2I~1s1c%46Fo^0e0D&>zbKpFt*r~&MN zyu@;U%O=PP{sud!=!YYOb6gsHx)H8(s64I!(Gf!O2rAEBKXFUQ$!V#35#@oHv*!ps z$+`1qBf)@b4@~ZVv8(h-2D*!CZ;@O-4b@vsAZoy3>V0~R+ZyjqOpyo>rG?hz98zga z`)Gr;!?qsZ@cEov3r2DcL>cWI3ZGPFbwbB`Q*M(S0S6;n?1B@$QvOD*%Y%8bze=Bc zrBQP9XI5&M&R`f=k@d?%_5Blwm(Z5i`!y}B6Nqs65Pj-Q0^jb2^Gf9fkdw-b`08-{ zgt|?7AW?stfBV}r(n`9WV&tW(Ou3XwB& z=1tt?^0e8Yf;g8eDy%HAW$zTXwqob7bXd0R%vqKf-LhS!pYB>Je!{V@QM zmNRceueEPHw^!+p9dhS!t_k_ybUQEx9GP9_6#;NWzXT#=Pra4D)xF2^#fy_^cGTK? zubTTP`_^yfj^bX6^S|ZEzX-eW2vZt;)meOf`TwLNHh}7tklo(B{b!IO@6I8zYEs%- zfULY5^yqv#vAA@Au1BQ+_YgMN0``T+0j`KLbG(diUj!-wt>(F9A_C0J`;e=q6qKJ~ zLim<6$@wggo4+-doLK>^hb)iMOKM9urxb82|LAf7rB4&6Rmd^49p=uhz)7^7V#P}% zmBxMEi;^tbl)~X72q#G=-g{O#SBn=`kr6;_+f$ zJOQ-i%VFJ&4*me-;+00K>bWdPMu ze=K75R=;$B_kfh{P7YN1Z*I+B$ThF{+(~ZO#J*jRU=8h2ujSbe8!G;rGDN%aYuD|m zglRT9#o+Cp66BenQ<|FhXu6SXDgLk?DfNt9YuDLp>Y~q{g(uRG=0+IZm;cRV|3^#z z|JnYZ9OZwUzCHgc*82>?^x9?wNG@?{Xf>v(sq&TsHi<#>^Xaf5bmT#qwgmDH^N*OB zW58lik=h2l;2P+J?iCi)p^qTzfVu-)o0-&1WHgO_OayQg_99>hGl>_BR&NJlK^0g8 z{n=$+*g}aOhE*&>X3tpt_awz<&o28+nBk>s^AUww_He?xeN2yW9yZ0pydUod9sBV2 zNG~@b@B%RP@hYnnb`v(k*DnBcrYN8vxyP)q<({rg2BwC)sXK5f*OnpdIIuAL2sTe} zU1pM@J@?h10ZTeYmKm66x1Sr9P?*4m1BZ+I~b1?$qKvvKJuI zf`)pbZ_x3VEd<-&e=P;E9LNzN>ZB|#pk>pzntdshS#*td6ECsOOO|7a)2;nz$lEDF zh4K?XG1ou`hynw_a4-|O>37YW+E4Xv)Ajxd%EFv663^@*PZS7832!-lbt50v#;%cwKwon_mdXLhn`^6hNLHG5O zXXwE0pe&ZT4Rm8xtI5>~f2c+Tk`hd;nTABEKOUflg>+kSne9MfqXGWYQ+b9XY;4{C zk;^phQexD?^_mpH$!qPgxV^wKA8r6|I(*$=bW3rs(C^v~Y()2#w2y6{_1iAY1m?C1 zF;fF5Vc!wcn79pU$}{6x0Q&QnlQcc{dpT=>1~9mZHQ)nmGqzFq<86bt33g{ zz$fJciwdj>4w&)*rU-bJhm?1p(#^u%fF&xa$*Oj`)`^2sr!?;1ZX5|8n_jOC`jEYI2h1*9jCV zY{)Eeuqu(U1M$!TG>zUmMD*iAfjKk0Py{q7Iue5j19veVn5{af701womqhu)2k0r( zPxt@xI%Yx$IfzmO%FMM(PwQR37c+UcY)JQ%@;|zUbu`=b4=cE(`!8Mg4M{qo?jdg@ zBvFzI`8t;kODu_{f++CIgf9yZt0-yY%KzPu4~>&z3L3?K#KZF`Yx9>_q(@J^-+ki` z`7YtO^nZ!veA+>M#;&UTqQGh1f8+j@gA9b}jVY85I>kt5Z+(vS8Gi%{k1Zp**-~0g z_0-cGub0H!`?roHx7cR?(H~Z%ssXkm++qfP4=S>zQ43sRo8xrnuQ-OUhq&&l1l6m| zd?RsF!xrG-5p$(Q{TwI^#UAt#v|$I`6J7bHzHrpp(~_QR`krMZ#!R3D10IwLU*fuB zhytonzN!&Kza+0)x#s=8Y+BW)AgcL2>d&`pH1SmHSBYYY1}_ zUY<7v9>?G_dIvFi$!tNcN-0J~P+l7VhJ}aTIJgt$rgexe!xqq3;+Vv7ZX&L#C+L7B05TGN&4sJTLe??t6%XFu#^NT2vv0?y82qvfK>oK-p1(yTQ?~D zkG?1w0=H#sIS*_fl`5UC_WNAv0v{xaJ(l)?BAbWpXl`Cnxvf1{G zZJ#Uvo~+*zgm|?Ir;vM_T-gM zdh|`99pp!`t;xfk^0qD%^`&Um1nr%Fx-H>BML*G>VV@LeDeB@|eFQP^P}Ea;0cUH{ ztkJkH)9=<;d(l?Tdf(UUjS1TL7lV^o0f9_>d30i2)uFOmD-jv-CEbJj=Y@tG_-FN) z2fmf@u3hu-5oFG4X0NZcKg)4h`}XEyL*mA5Zhsf@N!tob>Y0?~M?PfB8O<`fw@%CG zjZ6DhM8yY$-l#=??#sb8htY-W-rm8*wo*F$j&B$E3>OuiyMDFHVgnpO=DDyi6Xy(< zY+0#O7RsxdELZw`wszDuw29qSzHvXXILy+uh8j1sAsAes2~8%(wWyYI4|=a zO*t}5?d29YZ_Rv#liYgzeJhiq)EVEV+&>@o#DwjRN>N%?bRY$V9=frt>fJeIK^M9r zHk2TMq+5lDv6D{}BQ1qL6FNE-N3`)KjcF(JfpyJk4?g1CdzLO~Aubd-jx(FqK=K9< zVv&~ed{~#cwQft%4dH>Cl3htJ;QPg@(trUp6x4>?JxNv%#OpOuPps=s>%|G<(voUY zwYzse!)RD=%SWB<10fnqDc7QWn|eKhZfRiF4=su2^uKV7{0-t!V<|yqGvQBf-GG{i zV*>p=UerZE>Q<1M-~D2+g^=Ax5AovM&?^SCsOsOQzboY0e9^1@#y~Ud7O<;ws?QYq zkEM0k#tIIJ*1v!rFlNe+<*?qxw>g$`f`LC-RXIvgEz`4vPp>^3m%2|i44^BK9{XUA zEz1e=QwLG@Y-_SDro9M&6-tg{y@vAk$F?TbqNiDHz7#o9@-Vu2zb&)azK$06-Cum`Weg#3$$**B8jXyE`_%vC zGIRM^WSW}bKO}L%pWg*pB21n4Q-iOc<#?Q-Ekx3y1Xo0PvGHi7-qsZfi6x3WS3{6bxB^YQ1C@}~(|G{}Wxz((=+@InXoz%j7vggq>YM=GnX%Z)9-6rduz@Ho+73no_G3LPdfQgZrfv2)9D4GP2P4y z<D`A#3#+iKAk?iM{EZ|xH@xe zrbFGLoM<;iV9FDCDs}ZPMq9Y9rAba^;N)dCe>}|$$oWvsH(2|zY&b< z;>Npx9vgn@1LUR!o_WKcxNR&6QyGop;HkV!s%LL`{O%1pBj0Gu2&qkt;hIL^;A zKmQ3Dj3J~?m`U|DG2|>7Drgx+sWG+L@Znv9H3`wDEE*|iDJU8}O%#}!y~yGsyHVs| zpglR_v(8oh#;&8;rA+m*0&X{%mrLHN9sORw6fY!&yAK#uI2$bAa=vu(%jTgb; z{LREKo?XiVVMgU#TeOG+TZn-q81550H(waa4d^VLW6s4kv8|oJo!LyMcbUzra|17r zpei3a*KCHLYnsB~7&4DmvoWCQS5!7?)(lU&cE%DfP2_$W-BYi{1!%5fo*+&>5?6Y^ z0a?SbHa#brJ@f*&r~x=GdV;@wN9STkqWs~mIfft#_zd%pDWekU56}L`mHS6=e{00ms;m zKnw9TdB%E8yE>o5fHSk9k{^gU)H0ituV5c)A773$o5J zmlxB5Fp?+~SPDb96R)}9V*ov=!rbMz_ivUl(3Kn{P0I$(%~nBWSsQ9FW)r|WY`@hS z2~zA&4`h3B13z`7*0nWal@>E`I{i+tZ8H46)^L2KiS(i#!3C@^zJf6pu}wHe=qeB- z8!Odxxuxl2N)Qrdx;+!c6l3SNygY_ubY-D=FqOF22aWU`>&AVW{tXz=1a+aW@NU|S zC`%EJ;RT;5aQ&9FB0c{}KV^THtqWOziy$%!$>9RaX zq`+&jWPJ_(d;G)6UoOb_7H{i@a89rNW5qO578?~I8!(?uEq!HEL^wt%1KX+zr;ne7%vMwF$`l0)k_v-GkLyFYmTXCM{3;qa=74)LtlS}M?Cwbl4 z@Il4VxUBx`QGB|IS1T-Hg8gdr%RYH7Rr-|ABMPO~r$`?0g4DMD!ZRIyR_=tH&clki z`J^hX;Vy5+TKzK{YK5XY&3>O z@l3|lu5dv$bc1b!e#@L({l?%9KbH}!rs5h&PB*Kq*?^VPgHJPx1z;eP#~xWFlmBGS z(PTQ{WzAy>u6A&OzI0W8x@*1eiowzheC?9{2Z|=#ZRF_ydaiz=oYm012$dT`GR0E<~WEOrviw!}X?k$apYFE$ivwPiCT3p4{K{2M*u4)%{7!(Y{*q@^WJIR! z2u{BT@7PHZ#d;BR^7e@w%}7Y0m;0h4h7h^+U~FDW8_<3YOjmaCl6GGH!9oejB9!ML3~YJkXO2nDD$b zA;3YZ?a|QZ?GGBJIWv?q%=;X~w_om3Ru{*JTAa$kYpDxpgPZ$<{_cY9{mH%^BzLKu(JkRpOElP8hpfy6|wP2yINgUdMn^|LD4R(6_)GZ$QmZ=juxjt%w zUn;mkb>J@FW|ZscuYQug8-wEq2Yhvg>fHP=zPnRq4uY9N;eV$;^B8`2i|>AWzV1GmN!VPbUstN9f_<$FO*$uej;MeVXiLM8B%NP zyFa6rDu~*o<@hKp`La}Q?_Bx$iNN`>#2}22{$j)~M{2z5Wm{Vs<^7AgOWZ(eL5s=r zYoV2>)yJd!TYbIf$!O;O$0{r(t(BmdJL?ho=u&z_C?+S6t3> zhcB;&BDCS1nQ(ATphun_pG}W)x)W#UDG`%YUk;xc**yBj{rSV$p&A~Jr&`kjwxUuq zQfYRf)};?R33uJhrOG>CpZI!e%ccrp`uTJ6_u~-bAH8?i`fxC6PP8t!*sEYoWk@tj zVgPSnOO{8}t~3mYoCsZ{Mb&UDaWiX&r{cq%b+Wa?*9c))5g?-?6Rb+lYLw_qSjX!! z;``34D~^~6$5gA;EGQTywBlen!ThW;$m`H{{N{lWwds8(#7=zHbAd&h@4N%v1NNYf z@Ye{Gsn4ZYt`Oh$wAOtAmK^9ZTF^{1@-n!5dF&K)@&vb)a;G(6?N_r*W7>Wr)B$__ zC3W0_CbX8;N+Hr5#W!hBhEsOhJ$y#~nu9?>95Jfk7CqD8t6})cF(y(61KwUQ$-$)r z90@rztuQvmwnoY_X1(pTEpdTi@tKeLFoGvX=y0=6@c~llUEHZpK92~jANKv?&Pao= zIr~}^sb1h21RlF4o00@W>=uj4WLFl7-hiUF!&p8kSSWRZYlv$-76$mVKr|EBR z@Bj;rQ_e{-Jd3NT>(~3%kAG zE?na7l+X~|dj|pE8Zg9;hhgV9hT$D~9xh9AXAd}@V?aI~qW%HKVrZp!nDYsw_p;^S zNq96#S`xbVV9GX}*V@ZRBC7SeBnml1VM&rVCwPV=*WCkDU>FoeM8@qQHMmoIk4IGZ zhQ8rUSpb81c4{0C!oBVvC?T)fw93qma9_H`}3n#)IoYi4mxgiHQ!23~V$JE4a-s#JLH7%oo z8`eHEsX5`QNpKv-Gl?{|X7+n_ThzFc%7c+%@eENFC#k zT#DmxVk}o=;E3uy*k<0#v>6n{J|0|vgWr^h(e>Ci7!#~GaTt#zk^<-0hlvf?-3!$D z(l63PTHk$ds{P5XVFV|kOvH*t&-uc)?wMq;-p#gJMd8E|nI#e&9}8c^@2=2wIB-N# z{2|_}Ny}3ncl4Lc}KW$5{`v=(U1@Y}Jf=>C% sgt1T1YpA^E&i~e%Ciw9blXFW-ahrm(QhJVsUwHvUb33!jW8`c90~=RKX8-^I literal 0 HcmV?d00001 diff --git a/packages/components/nodes/engine/QueryEngine/QueryEngine.ts b/packages/components/nodes/engine/QueryEngine/QueryEngine.ts new file mode 100644 index 00000000..7059c260 --- /dev/null +++ b/packages/components/nodes/engine/QueryEngine/QueryEngine.ts @@ -0,0 +1,126 @@ +import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { + RetrieverQueryEngine, + BaseNode, + Metadata, + ResponseSynthesizer, + CompactAndRefine, + TreeSummarize, + Refine, + SimpleResponseBuilder +} from 'llamaindex' + +class QueryEngine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Query Engine' + this.name = 'queryEngine' + this.version = 1.0 + this.type = 'QueryEngine' + this.icon = 'query-engine.png' + this.category = 'Engine' + this.description = 'Simple query engine built to answer question over your data, without memory' + this.baseClasses = [this.type] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Vector Store Retriever', + name: 'vectorStoreRetriever', + type: 'VectorIndexRetriever' + }, + { + label: 'Response Synthesizer', + name: 'responseSynthesizer', + type: 'ResponseSynthesizer', + description: + 'ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See more', + optional: true + }, + { + label: 'Return Source Documents', + name: 'returnSourceDocuments', + type: 'boolean', + optional: true + } + ] + } + + async init(nodeData: INodeData): Promise { + const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever + const responseSynthesizerObj = nodeData.inputs?.responseSynthesizer + + if (responseSynthesizerObj) { + if (responseSynthesizerObj.type === 'TreeSummarize') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new TreeSummarize(vectorStoreRetriever.serviceContext, responseSynthesizerObj.textQAPromptTemplate), + serviceContext: vectorStoreRetriever.serviceContext + }) + return new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + } else if (responseSynthesizerObj.type === 'CompactAndRefine') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new CompactAndRefine( + vectorStoreRetriever.serviceContext, + responseSynthesizerObj.textQAPromptTemplate, + responseSynthesizerObj.refinePromptTemplate + ), + serviceContext: vectorStoreRetriever.serviceContext + }) + return new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + } else if (responseSynthesizerObj.type === 'Refine') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new Refine( + vectorStoreRetriever.serviceContext, + responseSynthesizerObj.textQAPromptTemplate, + responseSynthesizerObj.refinePromptTemplate + ), + serviceContext: vectorStoreRetriever.serviceContext + }) + return new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + } else if (responseSynthesizerObj.type === 'SimpleResponseBuilder') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new SimpleResponseBuilder(vectorStoreRetriever.serviceContext), + serviceContext: vectorStoreRetriever.serviceContext + }) + return new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + } + } + + const queryEngine = new RetrieverQueryEngine(vectorStoreRetriever) + return queryEngine + } + + async run(nodeData: INodeData, input: string): Promise { + const queryEngine = nodeData.instance as RetrieverQueryEngine + const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean + + const response = await queryEngine.query(input) + if (returnSourceDocuments && response.sourceNodes?.length) + return { text: response?.response, sourceDocuments: reformatSourceDocuments(response.sourceNodes) } + + return response?.response + } +} + +const reformatSourceDocuments = (sourceNodes: BaseNode[]) => { + const sourceDocuments = [] + for (const node of sourceNodes) { + sourceDocuments.push({ + pageContent: (node as any).text, + metadata: node.metadata + }) + } + return sourceDocuments +} + +module.exports = { nodeClass: QueryEngine_LlamaIndex } diff --git a/packages/components/nodes/engine/QueryEngine/query-engine.png b/packages/components/nodes/engine/QueryEngine/query-engine.png new file mode 100644 index 0000000000000000000000000000000000000000..68efdbe0090cb34c90444842d256b0a52f17b4b9 GIT binary patch literal 12383 zcmd6Oc{tSX_xF2dFhxeCl4VeqB1>f-Ygxu5l#)Uy*|QX5XJ%3%J7r%!F(qUv*<(hD z$`)BdW~O8(V;_u}=dI7@^E}_*-_P^CuIIYAX58;{pZlD1pZ8f__bvK@8Jd?{h#LR^ zUW~DU1pq+7uTWqY7x=Lf*tZFO?DjFf<_iFb_dCCkHt!d=z(GMj!>fLmyq*05ZrpVO z0s;aQ+`K$|9dGzJDR|#?$@;A&1OVay#^B85!0h?akT;^1VeG|#-@AAJKwR9nOQrdd z9xLC6H48cF3#N@x!U5GWXb>HPw&T8 zLwKT)yYxwcZ}7lHGSz-p2k;!GxreEtA^Vr)0JMZqh_{5{NMg0HT4b&7U%Bzz-){K> z#9Ju3EG>$F+Iv_m)7U~`9jYtz!h|sX>P}`e0)MU=Nf#z>;q774t*uFY@L56$z8uCj zYqo;V0WXxt@M^#@z?<{=ht{M6Q-ZX!G!HTkl67-zB@GEYxv6zhjB$cSqsb*_P9CC} zkkyQ>cB8rx_j~0EBoW1j80YQr)+HeTptL*y>KALl&$5u5L;3)@Qcuc_9d*=2d4J=%=xS6W-ZJNk6(kF&DG{PEVhB&VOcW%q z;~fhkRzBMR?gar*0h;&~%%W-{3PO!XoW9}@^r@ChfoZjyZc`;4lu)XhTX%TW8O5qAsLwy{m zy=0j(&++}C;#yLlP6AYsEB6uNv&FOMNBIBSSK(GlC>gNIpiRzuvw@Q=2;fUZN3ia>o;{p()n%+3%Z|p4p{(L z8mOo~Nw%qw$k#W)yBJ}aqZ&yo*`9b^@#`03DY)B3LhMvMaJQ4Yhy`jLPgmvWX~d1U zQdQl1XkUq*8o{{JZw3&zQaIi|V8q>Z)#iFzJ)n}>r&yjJWCe>a!^WXM6#{)6k%sWY zKM&EwX`iRWlV4vo&WvHFPKoP_h$8je9~ri+e^i6C;{^uZmL=buJ2Tu`u&$JapLMsh zDmnN&S;c?EfyT9Sb07*L|W@~M5<>R~FLjQ7A+-1jF$6GcFO0EvN(~@a>ewd34K`y+R(r8fRvuh$A z=DC+9*0@j!`^!fuB8_xBa3lHV_XBT3H33<>mF@Kp&FyIi#wfJ$5y*wbe6@Rp5owAt zt&eEq=!LDr0!Y0NBOq-J;dF;&<1v;`Z^+pC(MGhWgGR-DKTTJN&ndV2fhp-E(cd_t zUQf!M7b6qU$z-Fjglf~5TwRa}j$fnM8?)mqH1{vA7pU_vGx$JH$jDWmY_9Y3zWyh1 zq@@OS60{B?Tcbhl{N*9RL%24va4URW*Fs!PZ`@Cuetcu((>6JLV!`&`!PAk!2%U(k z$HCpiPAZ$Zv#=Nb^9@Ql0z3pyzj0aJO0c z>$*)mdKOuo@ao-HfBVr)1$Ek(jh~*7T>Sybs-Yk9_SG9Bb#rDFF!7>J#4g)4mY~ji zW@CHOKLm4Rr-hs!N*&Y&k&PH<_ztH%wrl?Xo2V(~YEM4;T|Ms`n~{-(jS?AcoRl3T zQL;!6ABVBM} z;d(sNuyV&lhu{iZl>b3=`y%bN_vimf#G$bD`d^|qcX}X~-y6T_an}c%0yacB|nE{~avc z#&3P1nx#S&RdAF44%XxlPJA3RDm#)hnn?=z*SR8P$e)Pl55b%b)nOYZ}{mbje zNLykuiNb%)DD$Ytyh27@fw;gbVM|_J%vIT7_L!XOqeG4RYIz^u;5sR$z9$`xs=cjC zUr0@{bN7|-(eT;bnRTm`xq2KV?K1CqL;<88>`c2%PL@3Q5rLP)aK46#V=M@BfN}C= zac~pl^lk<`Ho=&{09@)n|^>ygd2H|FwFmr z^P8LTJoL8t4BbnO&d%6#UW~Ej?l4cLS^>g@ak3+sUw`J5!zVp?uG}KexW^;s9dvxu zeSivqB2&H1(lj6j&q@A3{@irzJ8IcPc36&POfvwB!vum8`T1YLhp(z9j*r2H{7XFN z`(x_Jbwo3YD{io#tU=Dkzs*v@HRi7&Bc$AToT1);85fHu@G&C}`(||)U7w~zzDTYP zA_>L4Q>OVjARF279)z#>qOdN+#DW!TD(rz|@3V~ns0&mLG8;`*_w5g+8POK-0f{KJ z;%*U|^U`)W55_95gWAaK+2Sg?8Uho8DR2(#C7oI;8GsEYmF*ST6I;U2p3#6?bob|P z5w<(1bs#CU=(r-B#dY-@vvu&XH8ga$I&r<+lBm>;@;)g4jH2YWysnE|saC{Nv}l)U zc0bi>pATx2?O|K%E2e24e{u?`TZMdJIY1h>vZjFTttP_>B~jFm!`6dIUP*H&TNj!P z3cr%5cO!A|5J_=1;nbiu-WPkal#P_~=6_0H;NGs(ag>_NI| zZ{ArXfy2+W3FVhs9d8N^C19*zzoQYo5!HFk8KlhKWvibD@k>lDx0C5`9B;6c8wFnj zmb`ts{=nx`1i!lVETTZ5vA!ofG)wt#pAFOk@|N?$!yNoFK>}3ZAS-Jn{|P$6H`ZuG z@kizkznXv_GcghP1t7!#ez8OjLP5xJcb!VEULbecn@)oUIHXt82&om}V+l1h;@J?2 zNVdL(=^WrU2*=0J!%4wvu-d&0AFzDh7=ldY-XS)JvklZH!>w?pK$m2v=oIVh%-%M@ zuU)cNm`On*4RThz$=^9SMolmy5`{1FIPkzPEDVx zmLf}#$H@F(OnF3L`}5Lc2qbUVc|pdOKUsm?Mcm-`hdHMhedy*|<9a?&w_pAaC;gCv zlTZA=N`pKtt9j|L&sq#c_#vno0Jd%^6rY->6XRy$jm9o)0&17~W*{e^p?R$?Kl$URGie=b82vnLD_3(@6ycmODPm)%Y9lb^4pt-cL6*SzD)1UC+ z(RTLBS-Pi5+&+7!gJ!2%t*gR~f1L>T^hUO%Si<6_c2z0n8|gF_8268 zu>H?=#BZGVDLj64CiHXAqG`&Uaj=-d%7Sj{=4D>0{t7B%C-U?kiA7Qsods5u#ZNZ;vP)4bM*zA-tF=dd>UjB4HtY+N31(? z_geO1W$Vm;mcN}oyWD!yo;M^a1D@Bw31WN@5@H7eIL?$Q;?H=We?ISPOxEephZ`Yj z61bi@pe2hVTYp>Wx#jA?-sSJ4C`A5^QZ5RF8RnO`?>q;~jn-X!=oTZ^?t1D!O~`5EbOEPQp{ro~lA{|ee4*LS808V;b^m?85pArh zv~gz5ML9(ElYOGgy}x{+uk&(_)NG6`qrG9W-#A;;$UodA=sz~ETn}49x70ig3QofOu zq+7pTqBlm!+fQXYEFr!0?mtNULefc{jG5lPD$^ty1DPbsn78&N=>YeD_$$e6_5g5DDz0(M%EME`q7e;rpU)wq#Nocc0`qI9_8(AQ_SaN z-3C_mrzh$k`uUG2PAu57rc}%CoiE7nt=2Wh1?+va3Z>onIWjtd)t$Dxi@1bO!F0#0 zXS(pZk*^`wJJ2^-EKrtZ*^kjJYPFpn^7KrUm35ro@y^f~8d|A*QjR9q>Mw3>7~B== zKOxt65-g%dLSBH)4)<%`X`M-hW)0Q#8@*h`OUzT9!Yg&LZ^pV8iwB=N1Iw6&*Xe2% zvGZ`&=-uAhY+F# z(cWqLv+34@SI^~-)N%V9T;%n14J)GbTk?cuF#I*%l)VVRN{Kh+l#C&cG$rwtHWMmW8 z%&?nq$gx?NDTXzfg=!*S7Ge?-FxY6Xv6%|KeN^(p-{*7Y%_W9-ubv4FLB6Dx1Yj*| zr`Adl4e<;v)vL!qY-T3jd>_>gKHH{JUh?_SXjA{iU3(IB z-+I=D++Co*M_m=vFz%K}XHebeEf6xoQWy48D?AVn(>8AcP1xECq5tUnm?e$C^_j5ENLjC0J>NYq%Q~r zZ~>g5dHS4|DE(cF#RD$$DvdG2!JnP4CRB3+}hM~lM{h`fpkl((fE91M46VPRq{RBp309kjh&z{ z+)i-ugl*6axnEN^pYHbbB!R*Dew`+9FDR7jSvYNG3XnD8AGR*@>`G$GrXulL`7XRR zi0_B8&y3d!2BOsU%T555N@4MRC~(HNV}Tny7^N!Ab_Jnt;^_GBgtPOeND+1p__Z(W z(4Fh_CCl6?LC89{T~lAMKTw=y#Qgq44X$VpI*M<%geld=a;0>SOe_*Vcaa{G+A9pQh~kM2N;{8#EyITUrY+)Dx_;;;n6hdcDho$xjfpWK3@z30ytoh?WiNkJ zJ~Mg+&iO?fFX#PxshQz5QFuHQ#9qtX1TEA-IY1P(D8CR|wmdT;L@NiiW#f?$?1dGR z+w~6GCy)4S@ZXVj_>ivW&c3LcIE>Tn5Rc>A0c(Q!n%7$LfES~n--4>{7~n}Cmdkik-!^uY8Ar>V&G6o+5!Hf8Zy3<@?8+Z&m>u8ryf16zHxffxBmOIuMt6BIs%vfFUs zzGc}|wZJy&W>Nnfa9JRXQ#|-HEF)aZ)3p}8Ax`VSD zKyUtvuWJ^(oEsLE$ISp?=D+Dbv^4GGbzp-MX!Uq{@mDCTNBo&!gJ#CNlKvk4edA~0 zk5uc<0Jq6IY83&rB|e00^=H2e^d*6WMN;1G4mbji0M?+@VvK-xf)x3v0|-Y~oJN0) z?NC(dqoycejQY7ncD-O_=(+nQd9uD=bk5PFQ_Do(yOi%>)3MVwMUfk`2ZtW2t=`z~ zgNsQu9WkV)SZW|7n?N*imtY~JFvY89wLZ3r>_+Z8h12iPAn9%tjJ!h8R9e)C)bcR- zXTKy`YSst6rKlpbiP&~)V_e~PRUCT>UE_!)?{R>=;+@W_Du4DkG?Iaw^ie#DN>tXc z`aM2sj^onFB$56w`>A?Uxc6@FFFA`G>7-%nTQ)e?^xM8KCTU7nO^Vjf4>PyB#fIY5 zoa_WRQ3;IJmCWHmi)Wg#41ZRH{IAS&LWXwhugjn2?oR9wOVO>DkGw025A}*d0B=vB z@Tl_Lp0|G-Vhza<+uCQ=WNDXFLd!PLg=I_&sdDKiwV;rBkD}gD(@;%nC_u97>jP6pNg)y%)80qEE zQ`*Y?sWIZj*t5K@fM(@uT$Ll`fDmB|O@t3piAghkW@idH1nZ7LK<9YA*AMB&=)@G) z^Qr7Ks^u*wfDLVk?Lj0oP{+sCZ&wv1#I3cNZO2*w&)&dyYo_P^2mpw+m1xb323iQa zL#h5UPT&hG8Uce8%){R*?bHs}$>m_O2T{b&9hL7lZS{li7{0N=API3jXZmFM@iuq? zG#v7qV@+^^Y8ddwgar3wxr6EIqCHsC(4>e%kY}#~#X8j)Fk8?_Ge^sTJ{L{e4F_w# zy}dwjT4L^as?llQTHbF;sE_zGsIJ%qwQ74$*B?w^{|+%HJ+J2o*Bmbs%GOcm`M_#RT~u8bbzf-KHB%+1Z-W@nX;Msx0A_3W zeRMky@7d+1M`P7G_A6%X(Y&Vx0DQEay8vf`=e)ajPUiM}Klu*FA9l!cQ(Sr3YD}CQ z9|BcZM78VtH7!Tvb52G|_abFlM^>YPJl zBavsHwn8QoDI!CX_Rk@jXiV{pUavZSDtE$XwFm5tcms|u&U-Hh?@C8K#71@5JlW_h zkI#f_nT1M>SeL@=Yz77XYDLAajG${sowE{-wh2>jEy4bD=C9{@8+@gaZWJen{>cYq zFwit~&gwXPK0GLe^8I7NHlWW@)JJAs@Rk}6Lv;F@4|b%Q96-2U5OKR%hVf1oBR}x4 zo%O!|1G$B1A1!nfI?ZX>Gk@Ep>Tvq^_b9{3vOgR3X8JYT2kX4%ZD_if&{9+WW2RBN z?2_N_;tua{tX@_(Q>{Ej38(gGo%82NzfV;nrO+U*%Wg-F)Y;xi7?@_F&RxttKBY9-f9y4dCph6r?4k?5P%bUX^LBX zRxZh+5|=7Xi9-e3@Y%v&Zst9+H~wG^c~AT%NI}~``nBy7mrb1$rTrdx9h2xsSsX(} zPGL{|0TB|;X)ROVmci{Vtj1jSZ-A+RZHmz)`;WDhp4pp!a|xv5%0ycQcBy>2q3r9$ zmhYdBo%h5^MB9n|0ZBIh`n6+QRPS-$51Zl@sf@Lgnbsy%F02~K;G7O`)<h6~H4 zoDKNTY>Zfz`*SHsvev~+!tx(3+%TnWYI*#DR;BwKayJsnQX%Md2GV2Bj!#RExuBC; ze=j)qXpo#wj_4`eEZA@hJPk$C=?D&m_mv|(-*{_xF;1VG6(zZUBBjr*G zQ2^rq8mWP|ridmn54}=)(Qld8y>7XB>rlnx&4Gn8Ghwek?4&f|JEd@MNcFSY?} zsn0Topoq9Os1PVPy?;8a1xLd6lfJ}m>|O&wE%qjePq?$X#uF%BeJ-B53_jE|NZm}C zCMJhq50~x{O)>VK(mYt3#aMXt8_cu8?;Krv``*_LEC}@um2Nbo=?0Hn6vK|wbf>(9 z6U*(Z?ydZMngGDRyo7ae^m15;L7Ge(@DI+Ozj+$pbn+}oS=_~-R2)nvoV4p?6MVGIM%mqrU2{Bdg&7{S$j4Z+m^F_v z{~?v3|AaO4D4iK3yPlB?FQmU$&PGPKQopmzp=hXDES(Ah*ixpVm+DoBr#NCT{L^yM zjYah6BdZ78_Ku@^AVWtoVzY!W#LfEhgl8S6VCKtnXRQxr=Kfqynn zJq|k4107nu3w7IMl8u@B3AN;b+5MR{b`$03l{2-E9evVO%@(!}_7ytt4ihi5}zk z^UeiI1XmT6J?PIae>f36n)|yHcd-5POf&9F5${#85}VbaxAsL|8+%`k^qGPzgU&U~ zQ6H??xjo=Ayl`4fJosr_$o&y$ewO~!n zvfl^g&j|B{i!w{42aI|f1nQYaM$_Pt%YbT6sX*?7<6&=b!h=+IXYDhYMW!>at-i)u z-SHM@9Wp4XTI?89!j(i<0pE~CBWvQOyrPYbp3t(ApzE5Yad2He)|IdBNV;J5cl2lt z+=pjzx7Ys=GxgtV>)Q zE{`2L(2FNYe_s}}_E==G)3;^kH5oym7DeIQBKST}CF8G5)gKp0^8E=jHmhF4$l3&k zn3vpmGu8cEDF^Go!fHu~bP8o=oxAsOKb7OkK2oP=so9J1tf@1?cE4XCeK_Y)zSj^U zhN!V(chRD@S}=}TTvLUx5!t)sN*ULbFbTUw&4v%HL#yG^mQ33s$r8e35^7wiS-p3w ztsvyKqs+pFjp>WLWG-Dx6#@0MH)D_Nj?1AJQqg zm*QE?MmD({HWyd-()TTiFdzsHQBQL4wJU=&Ij)z@Bw1}jo=?K*IB0F`%F&D|IjR0q z^46ySTBP5^LNoRC(qQ+Zluf~FVYdTeD}1V%vfHoOmxwkhiCi%`XO_j9-9oN&GNm`f zrn4TVZOfOG1@j3#@r`D`GP;zzQP}<=Yh*V=ayr+~ueGd?nePp+uV-GYGP>d}dc40D1!gv# zWoqU;o-a|sUh|5>EEZ1JaG;(FZ?k+eUYzArRoZ1x@ESR7;j+S4UA08hU5j8(tO2Yj zWuGsVU6{U6p z;K>A-Vi8mRim%8bHPQ)NstS1P6;y-dp0!Ifd-l*Y2W^~;A@fZS)Dp17q977fM8DGR z!%19@^KZvz;XUvrm;$`j18E^j)i2>Mvb)PT?97FLZ?60xAQz}9%FH@G zL*uJR^`pNH{-~Q}7VLgglzx+D2&+1_jXN^k2UO?U0 zWpdWo{P@em3>Wi6(eBWj#@gK9(WogZx;~BS?_q3ZBWYO)4MbKPS4Wq`!bFd-ay!2) zX|R*>maWv@+@SEby9aB--g3namG5Vd@yheDM42W_hZjA&UJu*O5^8$Rn$|9eF3v6c zq{7fiMz^h05>{RenhENZ*r%p=h1d!p4Z(iT#aKjdjI7@^5nyepZN>cUuAe;1#z3yx1*1PJY)SFsgx#S_(HE$nN7`1()gi^Bp znp96y{yFl@Nu_ik#cq1mX4`#f&nsP@C)0`AjFJ=J)t_kZywv{3^`e2C#g;42mPbQ6 z=9}%WEAiCkG5p=HC9V3>w3QuWZ71cAthMPhMRtfX`z^O_93#tCdV#HKD-0*Lo&8NL zY@g#+Gm>6dmSxBst4r-qu;$H1XL6LvD7KyW?9`%m&pht5hfOz=PiTA-6Y(54mLkd@ z>-9eMN##JYv%5Cr+o1=jEC$yr2!p3OT#D976oeI2#yDxI)O!2L= ze)uC?d|qfY+)Z6^%On})B1}8=v%4b8I(Ne)`L%2BK>&{>8}v$eZaRTg1OT3^B}o<` zE*UG{jnWzMvIv?`#K^w0iPD`?1#M=M{AlU=Prjy}@q+MV;?Fs5Ed%D6Z%Q~!^5z_t z9Q>276};ez$?1Jl2&mR@r0;pFs*cvx7`9KAbP%Dd{Ota$y__al)GZCs6=wmUHzX5v zMpBixJy!_z@xdZ!P(J0C;7#IaU5Ck+Qsfw=aCEXHw)YlIxJ9hiw@;q@JBTETNv`+M z0)r!L5x!_Qt4o-_i#wGp`Gj^7g!%a<9E)5C2}!wLiIN!Y;U{b7g+rBm9JK8tBr#4X zXzde51T%F;h9=p|;KU?99y&lvAv?B+t-ux6*|2HZ2a=d#a4e8!b`!xghq=&pftsB? z&k=t_TebF4FyH1ZK@38Icg(eY#v~JyOyLyH(1W!(TJY^i0uNpna9;Jc1X(=)h3;{N zD8eV8iqMI~-~HzB>SLX#TJEdb^htiQHu*YvlpIG!hMCL%7w^LVebGK_i4DOnq))z+ U)lvliIRL;Ini-Uxb-4Gx0Is0pTL1t6 literal 0 HcmV?d00001 diff --git a/packages/components/nodes/memory/BufferMemory/BufferMemory.ts b/packages/components/nodes/memory/BufferMemory/BufferMemory.ts index 7793d96d..5310f88d 100644 --- a/packages/components/nodes/memory/BufferMemory/BufferMemory.ts +++ b/packages/components/nodes/memory/BufferMemory/BufferMemory.ts @@ -1,6 +1,6 @@ -import { INode, INodeData, INodeParams } from '../../../src/Interface' -import { getBaseClasses } from '../../../src/utils' -import { BufferMemory } from 'langchain/memory' +import { IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface' +import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils' +import { BufferMemory, BufferMemoryInput } from 'langchain/memory' class BufferMemory_Memory implements INode { label: string @@ -41,7 +41,7 @@ class BufferMemory_Memory implements INode { async init(nodeData: INodeData): Promise { const memoryKey = nodeData.inputs?.memoryKey as string const inputKey = nodeData.inputs?.inputKey as string - return new BufferMemory({ + return new BufferMemoryExtended({ returnMessages: true, memoryKey, inputKey @@ -49,4 +49,43 @@ class BufferMemory_Memory implements INode { } } +class BufferMemoryExtended extends BufferMemory { + isShortTermMemory = true + + constructor(fields: BufferMemoryInput) { + super(fields) + } + + async getChatMessages(): Promise { + const memoryResult = await this.loadMemoryVariables({}) + const baseMessages = memoryResult[this.memoryKey ?? 'chat_history'] + return convertBaseMessagetoIMessage(baseMessages) + } + + async addChatMessages(msgArray: { text: string; type: MessageType }[]): Promise { + const input = msgArray.find((msg) => msg.type === 'userMessage') + const output = msgArray.find((msg) => msg.type === 'apiMessage') + + const inputValues = { [this.inputKey ?? 'input']: input?.text } + const outputValues = { output: output?.text } + + await this.saveContext(inputValues, outputValues) + } + + async clearChatMessages(): Promise { + await this.clear() + } + + async resumeMessages(messages: IMessage[]): Promise { + // Clear existing chatHistory to avoid duplication + if (messages.length) await this.clear() + + // Insert into chatHistory + for (const msg of messages) { + if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message) + else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message) + } + } +} + module.exports = { nodeClass: BufferMemory_Memory } diff --git a/packages/components/nodes/memory/BufferWindowMemory/BufferWindowMemory.ts b/packages/components/nodes/memory/BufferWindowMemory/BufferWindowMemory.ts index 84e607e5..9915d48d 100644 --- a/packages/components/nodes/memory/BufferWindowMemory/BufferWindowMemory.ts +++ b/packages/components/nodes/memory/BufferWindowMemory/BufferWindowMemory.ts @@ -1,5 +1,5 @@ -import { INode, INodeData, INodeParams } from '../../../src/Interface' -import { getBaseClasses } from '../../../src/utils' +import { IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface' +import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils' import { BufferWindowMemory, BufferWindowMemoryInput } from 'langchain/memory' class BufferWindowMemory_Memory implements INode { @@ -57,7 +57,46 @@ class BufferWindowMemory_Memory implements INode { k: parseInt(k, 10) } - return new BufferWindowMemory(obj) + return new BufferWindowMemoryExtended(obj) + } +} + +class BufferWindowMemoryExtended extends BufferWindowMemory { + isShortTermMemory = true + + constructor(fields: BufferWindowMemoryInput) { + super(fields) + } + + async getChatMessages(): Promise { + const memoryResult = await this.loadMemoryVariables({}) + const baseMessages = memoryResult[this.memoryKey ?? 'chat_history'] + return convertBaseMessagetoIMessage(baseMessages) + } + + async addChatMessages(msgArray: { text: string; type: MessageType }[]): Promise { + const input = msgArray.find((msg) => msg.type === 'userMessage') + const output = msgArray.find((msg) => msg.type === 'apiMessage') + + const inputValues = { [this.inputKey ?? 'input']: input?.text } + const outputValues = { output: output?.text } + + await this.saveContext(inputValues, outputValues) + } + + async clearChatMessages(): Promise { + await this.clear() + } + + async resumeMessages(messages: IMessage[]): Promise { + // Clear existing chatHistory to avoid duplication + if (messages.length) await this.clear() + + // Insert into chatHistory + for (const msg of messages) { + if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message) + else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message) + } } } diff --git a/packages/components/nodes/memory/ConversationSummaryMemory/ConversationSummaryMemory.ts b/packages/components/nodes/memory/ConversationSummaryMemory/ConversationSummaryMemory.ts index 332d73aa..e88beb13 100644 --- a/packages/components/nodes/memory/ConversationSummaryMemory/ConversationSummaryMemory.ts +++ b/packages/components/nodes/memory/ConversationSummaryMemory/ConversationSummaryMemory.ts @@ -1,5 +1,5 @@ -import { INode, INodeData, INodeParams } from '../../../src/Interface' -import { getBaseClasses } from '../../../src/utils' +import { IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface' +import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils' import { ConversationSummaryMemory, ConversationSummaryMemoryInput } from 'langchain/memory' import { BaseLanguageModel } from 'langchain/base_language' @@ -56,7 +56,50 @@ class ConversationSummaryMemory_Memory implements INode { inputKey } - return new ConversationSummaryMemory(obj) + return new ConversationSummaryMemoryExtended(obj) + } +} + +class ConversationSummaryMemoryExtended extends ConversationSummaryMemory { + isShortTermMemory = true + + constructor(fields: ConversationSummaryMemoryInput) { + super(fields) + } + + async getChatMessages(): Promise { + const memoryResult = await this.loadMemoryVariables({}) + const baseMessages = memoryResult[this.memoryKey ?? 'chat_history'] + return convertBaseMessagetoIMessage(baseMessages) + } + + async addChatMessages(msgArray: { text: string; type: MessageType }[]): Promise { + const input = msgArray.find((msg) => msg.type === 'userMessage') + const output = msgArray.find((msg) => msg.type === 'apiMessage') + + const inputValues = { [this.inputKey ?? 'input']: input?.text } + const outputValues = { output: output?.text } + + await this.saveContext(inputValues, outputValues) + } + + async clearChatMessages(): Promise { + await this.clear() + } + + async resumeMessages(messages: IMessage[]): Promise { + // Clear existing chatHistory to avoid duplication + if (messages.length) await this.clear() + + // Insert into chatHistory + for (const msg of messages) { + if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message) + else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message) + } + + // Replace buffer + const chatMessages = await this.chatHistory.getMessages() + this.buffer = await this.predictNewSummary(chatMessages.slice(-2), this.buffer) } } diff --git a/packages/components/nodes/memory/DynamoDb/DynamoDb.ts b/packages/components/nodes/memory/DynamoDb/DynamoDb.ts index 8ca6cf9e..a1c44554 100644 --- a/packages/components/nodes/memory/DynamoDb/DynamoDb.ts +++ b/packages/components/nodes/memory/DynamoDb/DynamoDb.ts @@ -1,15 +1,19 @@ import { - ICommonObject, - INode, - INodeData, - INodeParams, - getBaseClasses, - getCredentialData, - getCredentialParam, - serializeChatHistory -} from '../../../src' + DynamoDBClient, + DynamoDBClientConfig, + GetItemCommand, + GetItemCommandInput, + UpdateItemCommand, + UpdateItemCommandInput, + DeleteItemCommand, + DeleteItemCommandInput, + AttributeValue +} from '@aws-sdk/client-dynamodb' import { DynamoDBChatMessageHistory } from 'langchain/stores/message/dynamodb' import { BufferMemory, BufferMemoryInput } from 'langchain/memory' +import { mapStoredMessageToChatMessage, AIMessage, HumanMessage, StoredMessage } from 'langchain/schema' +import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { ICommonObject, IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface' class DynamoDb_Memory implements INode { label: string @@ -60,7 +64,8 @@ class DynamoDb_Memory implements INode { label: 'Session ID', name: 'sessionId', type: 'string', - description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', + description: + 'If not specified, a random id will be used. Learn more', default: '', additionalParams: true, optional: true @@ -78,73 +83,205 @@ class DynamoDb_Memory implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { return initalizeDynamoDB(nodeData, options) } - - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const dynamodbMemory = await initalizeDynamoDB(nodeData, options) - const sessionId = nodeData.inputs?.sessionId as string - const chatId = options?.chatId as string - options.logger.info(`Clearing DynamoDb memory session ${sessionId ? sessionId : chatId}`) - await dynamodbMemory.clear() - options.logger.info(`Successfully cleared DynamoDb memory session ${sessionId ? sessionId : chatId}`) - }, - async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise { - const memoryKey = nodeData.inputs?.memoryKey as string - const dynamodbMemory = await initalizeDynamoDB(nodeData, options) - const key = memoryKey ?? 'chat_history' - const memoryResult = await dynamodbMemory.loadMemoryVariables({}) - return serializeChatHistory(memoryResult[key]) - } - } } const initalizeDynamoDB = async (nodeData: INodeData, options: ICommonObject): Promise => { const tableName = nodeData.inputs?.tableName as string const partitionKey = nodeData.inputs?.partitionKey as string - const sessionId = nodeData.inputs?.sessionId as string const region = nodeData.inputs?.region as string const memoryKey = nodeData.inputs?.memoryKey as string const chatId = options.chatId let isSessionIdUsingChatMessageId = false - if (!sessionId && chatId) isSessionIdUsingChatMessageId = true + let sessionId = '' + + if (!nodeData.inputs?.sessionId && chatId) { + isSessionIdUsingChatMessageId = true + sessionId = chatId + } else { + sessionId = nodeData.inputs?.sessionId + } const credentialData = await getCredentialData(nodeData.credential ?? '', options) const accessKeyId = getCredentialParam('accessKey', credentialData, nodeData) const secretAccessKey = getCredentialParam('secretAccessKey', credentialData, nodeData) + const config: DynamoDBClientConfig = { + region, + credentials: { + accessKeyId, + secretAccessKey + } + } + + const client = new DynamoDBClient(config ?? {}) + const dynamoDb = new DynamoDBChatMessageHistory({ tableName, partitionKey, sessionId: sessionId ? sessionId : chatId, - config: { - region, - credentials: { - accessKeyId, - secretAccessKey - } - } + config }) const memory = new BufferMemoryExtended({ memoryKey: memoryKey ?? 'chat_history', chatHistory: dynamoDb, - isSessionIdUsingChatMessageId + isSessionIdUsingChatMessageId, + sessionId, + dynamodbClient: client }) return memory } interface BufferMemoryExtendedInput { isSessionIdUsingChatMessageId: boolean + dynamodbClient: DynamoDBClient + sessionId: string +} + +interface DynamoDBSerializedChatMessage { + M: { + type: { + S: string + } + text: { + S: string + } + role?: { + S: string + } + } } class BufferMemoryExtended extends BufferMemory { - isSessionIdUsingChatMessageId? = false + isSessionIdUsingChatMessageId = false + sessionId = '' + dynamodbClient: DynamoDBClient - constructor(fields: BufferMemoryInput & Partial) { + constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) { super(fields) this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId + this.sessionId = fields.sessionId + this.dynamodbClient = fields.dynamodbClient + } + + overrideDynamoKey(overrideSessionId = '') { + const existingDynamoKey = (this as any).dynamoKey + const partitionKey = (this as any).partitionKey + + let newDynamoKey: Record = {} + + if (Object.keys(existingDynamoKey).includes(partitionKey)) { + newDynamoKey[partitionKey] = { S: overrideSessionId } + } + + return Object.keys(newDynamoKey).length ? newDynamoKey : existingDynamoKey + } + + async addNewMessage( + messages: StoredMessage[], + client: DynamoDBClient, + tableName = '', + dynamoKey: Record = {}, + messageAttributeName = 'messages' + ) { + const params: UpdateItemCommandInput = { + TableName: tableName, + Key: dynamoKey, + ExpressionAttributeNames: { + '#m': messageAttributeName + }, + ExpressionAttributeValues: { + ':empty_list': { + L: [] + }, + ':m': { + L: messages.map((message) => { + const dynamoSerializedMessage: DynamoDBSerializedChatMessage = { + M: { + type: { + S: message.type + }, + text: { + S: message.data.content + } + } + } + if (message.data.role) { + dynamoSerializedMessage.M.role = { S: message.data.role } + } + return dynamoSerializedMessage + }) + } + }, + UpdateExpression: 'SET #m = list_append(if_not_exists(#m, :empty_list), :m)' + } + + await client.send(new UpdateItemCommand(params)) + } + + async getChatMessages(overrideSessionId = ''): Promise { + if (!this.dynamodbClient) return [] + + const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey + const tableName = (this as any).tableName + const messageAttributeName = (this as any).messageAttributeName + + const params: GetItemCommandInput = { + TableName: tableName, + Key: dynamoKey + } + + const response = await this.dynamodbClient.send(new GetItemCommand(params)) + const items = response.Item ? response.Item[messageAttributeName]?.L ?? [] : [] + const messages = items + .map((item) => ({ + type: item.M?.type.S, + data: { + role: item.M?.role?.S, + content: item.M?.text.S + } + })) + .filter((x): x is StoredMessage => x.type !== undefined && x.data.content !== undefined) + const baseMessages = messages.map(mapStoredMessageToChatMessage) + return convertBaseMessagetoIMessage(baseMessages) + } + + async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { + if (!this.dynamodbClient) return + + const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey + const tableName = (this as any).tableName + const messageAttributeName = (this as any).messageAttributeName + + const input = msgArray.find((msg) => msg.type === 'userMessage') + const output = msgArray.find((msg) => msg.type === 'apiMessage') + + if (input) { + const newInputMessage = new HumanMessage(input.text) + const messageToAdd = [newInputMessage].map((msg) => msg.toDict()) + await this.addNewMessage(messageToAdd, this.dynamodbClient, tableName, dynamoKey, messageAttributeName) + } + + if (output) { + const newOutputMessage = new AIMessage(output.text) + const messageToAdd = [newOutputMessage].map((msg) => msg.toDict()) + await this.addNewMessage(messageToAdd, this.dynamodbClient, tableName, dynamoKey, messageAttributeName) + } + } + + async clearChatMessages(overrideSessionId = ''): Promise { + if (!this.dynamodbClient) return + + const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey + const tableName = (this as any).tableName + + const params: DeleteItemCommandInput = { + TableName: tableName, + Key: dynamoKey + } + await this.dynamodbClient.send(new DeleteItemCommand(params)) + await this.clear() } } diff --git a/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts b/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts index 76cb7e31..8bebcfad 100644 --- a/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts +++ b/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts @@ -1,17 +1,9 @@ -import { - getBaseClasses, - getCredentialData, - getCredentialParam, - ICommonObject, - INode, - INodeData, - INodeParams, - serializeChatHistory -} from '../../../src' +import { MongoClient, Collection, Document } from 'mongodb' import { MongoDBChatMessageHistory } from 'langchain/stores/message/mongodb' import { BufferMemory, BufferMemoryInput } from 'langchain/memory' -import { BaseMessage, mapStoredMessageToChatMessage } from 'langchain/schema' -import { MongoClient } from 'mongodb' +import { BaseMessage, mapStoredMessageToChatMessage, AIMessage, HumanMessage } from 'langchain/schema' +import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { ICommonObject, IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface' class MongoDB_Memory implements INode { label: string @@ -57,7 +49,8 @@ class MongoDB_Memory implements INode { label: 'Session Id', name: 'sessionId', type: 'string', - description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', + description: + 'If not specified, a random id will be used. Learn more', default: '', additionalParams: true, optional: true @@ -75,44 +68,33 @@ class MongoDB_Memory implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { return initializeMongoDB(nodeData, options) } - - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const mongodbMemory = await initializeMongoDB(nodeData, options) - const sessionId = nodeData.inputs?.sessionId as string - const chatId = options?.chatId as string - options.logger.info(`Clearing MongoDB memory session ${sessionId ? sessionId : chatId}`) - await mongodbMemory.clear() - options.logger.info(`Successfully cleared MongoDB memory session ${sessionId ? sessionId : chatId}`) - }, - async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise { - const memoryKey = nodeData.inputs?.memoryKey as string - const mongodbMemory = await initializeMongoDB(nodeData, options) - const key = memoryKey ?? 'chat_history' - const memoryResult = await mongodbMemory.loadMemoryVariables({}) - return serializeChatHistory(memoryResult[key]) - } - } } const initializeMongoDB = async (nodeData: INodeData, options: ICommonObject): Promise => { const databaseName = nodeData.inputs?.databaseName as string const collectionName = nodeData.inputs?.collectionName as string - const sessionId = nodeData.inputs?.sessionId as string const memoryKey = nodeData.inputs?.memoryKey as string const chatId = options?.chatId as string let isSessionIdUsingChatMessageId = false - if (!sessionId && chatId) isSessionIdUsingChatMessageId = true + let sessionId = '' + + if (!nodeData.inputs?.sessionId && chatId) { + isSessionIdUsingChatMessageId = true + sessionId = chatId + } else { + sessionId = nodeData.inputs?.sessionId + } const credentialData = await getCredentialData(nodeData.credential ?? '', options) let mongoDBConnectUrl = getCredentialParam('mongoDBConnectUrl', credentialData, nodeData) const client = new MongoClient(mongoDBConnectUrl) await client.connect() + const collection = client.db(databaseName).collection(collectionName) + /**** Methods below are needed to override the original implementations ****/ const mongoDBChatMessageHistory = new MongoDBChatMessageHistory({ collection, sessionId: sessionId ? sessionId : chatId @@ -140,24 +122,83 @@ const initializeMongoDB = async (nodeData: INodeData, options: ICommonObject): P mongoDBChatMessageHistory.clear = async (): Promise => { await collection.deleteOne({ sessionId: (mongoDBChatMessageHistory as any).sessionId }) } + /**** End of override functions ****/ return new BufferMemoryExtended({ memoryKey: memoryKey ?? 'chat_history', chatHistory: mongoDBChatMessageHistory, - isSessionIdUsingChatMessageId + isSessionIdUsingChatMessageId, + sessionId, + collection }) } interface BufferMemoryExtendedInput { isSessionIdUsingChatMessageId: boolean + collection: Collection + sessionId: string } class BufferMemoryExtended extends BufferMemory { - isSessionIdUsingChatMessageId? = false + isSessionIdUsingChatMessageId = false + sessionId = '' + collection: Collection - constructor(fields: BufferMemoryInput & Partial) { + constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) { super(fields) this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId + this.sessionId = fields.sessionId + this.collection = fields.collection + } + + async getChatMessages(overrideSessionId = ''): Promise { + if (!this.collection) return [] + + const id = overrideSessionId ?? this.sessionId + const document = await this.collection.findOne({ sessionId: id }) + const messages = document?.messages || [] + const baseMessages = messages.map(mapStoredMessageToChatMessage) + return convertBaseMessagetoIMessage(baseMessages) + } + + async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { + if (!this.collection) return + + const id = overrideSessionId ?? this.sessionId + const input = msgArray.find((msg) => msg.type === 'userMessage') + const output = msgArray.find((msg) => msg.type === 'apiMessage') + + if (input) { + const newInputMessage = new HumanMessage(input.text) + const messageToAdd = [newInputMessage].map((msg) => msg.toDict()) + await this.collection.updateOne( + { sessionId: id }, + { + $push: { messages: { $each: messageToAdd } } + }, + { upsert: true } + ) + } + + if (output) { + const newOutputMessage = new AIMessage(output.text) + const messageToAdd = [newOutputMessage].map((msg) => msg.toDict()) + await this.collection.updateOne( + { sessionId: id }, + { + $push: { messages: { $each: messageToAdd } } + }, + { upsert: true } + ) + } + } + + async clearChatMessages(overrideSessionId = ''): Promise { + if (!this.collection) return + + const id = overrideSessionId ?? this.sessionId + await this.collection.deleteOne({ sessionId: id }) + await this.clear() } } diff --git a/packages/components/nodes/memory/MotorheadMemory/MotorheadMemory.ts b/packages/components/nodes/memory/MotorheadMemory/MotorheadMemory.ts index 9cdbcd5c..6dd94944 100644 --- a/packages/components/nodes/memory/MotorheadMemory/MotorheadMemory.ts +++ b/packages/components/nodes/memory/MotorheadMemory/MotorheadMemory.ts @@ -1,9 +1,9 @@ -import { INode, INodeData, INodeParams } from '../../../src/Interface' -import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface' +import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { ICommonObject } from '../../../src' import { MotorheadMemory, MotorheadMemoryInput } from 'langchain/memory' import fetch from 'node-fetch' -import { getBufferString } from 'langchain/memory' +import { InputValues, MemoryVariables, OutputValues } from 'langchain/memory' class MotorMemory_Memory implements INode { label: string @@ -46,7 +46,8 @@ class MotorMemory_Memory implements INode { label: 'Session Id', name: 'sessionId', type: 'string', - description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', + description: + 'If not specified, a random id will be used. Learn more', default: '', additionalParams: true, optional: true @@ -64,35 +65,22 @@ class MotorMemory_Memory implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { return initalizeMotorhead(nodeData, options) } - - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const motorhead = await initalizeMotorhead(nodeData, options) - const sessionId = nodeData.inputs?.sessionId as string - const chatId = options?.chatId as string - options.logger.info(`Clearing Motorhead memory session ${sessionId ? sessionId : chatId}`) - await motorhead.clear() - options.logger.info(`Successfully cleared Motorhead memory session ${sessionId ? sessionId : chatId}`) - }, - async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise { - const memoryKey = nodeData.inputs?.memoryKey as string - const motorhead = await initalizeMotorhead(nodeData, options) - const key = memoryKey ?? 'chat_history' - const memoryResult = await motorhead.loadMemoryVariables({}) - return getBufferString(memoryResult[key]) - } - } } const initalizeMotorhead = async (nodeData: INodeData, options: ICommonObject): Promise => { const memoryKey = nodeData.inputs?.memoryKey as string const baseURL = nodeData.inputs?.baseURL as string - const sessionId = nodeData.inputs?.sessionId as string const chatId = options?.chatId as string let isSessionIdUsingChatMessageId = false - if (!sessionId && chatId) isSessionIdUsingChatMessageId = true + let sessionId = '' + + if (!nodeData.inputs?.sessionId && chatId) { + isSessionIdUsingChatMessageId = true + sessionId = chatId + } else { + sessionId = nodeData.inputs?.sessionId + } const credentialData = await getCredentialData(nodeData.credential ?? '', options) const apiKey = getCredentialParam('apiKey', credentialData, nodeData) @@ -100,8 +88,9 @@ const initalizeMotorhead = async (nodeData: INodeData, options: ICommonObject): let obj: MotorheadMemoryInput & Partial = { returnMessages: true, - sessionId: sessionId ? sessionId : chatId, - memoryKey + sessionId, + memoryKey, + isSessionIdUsingChatMessageId } if (baseURL) { @@ -117,8 +106,6 @@ const initalizeMotorhead = async (nodeData: INodeData, options: ICommonObject): } } - if (isSessionIdUsingChatMessageId) obj.isSessionIdUsingChatMessageId = true - const motorheadMemory = new MotorheadMemoryExtended(obj) // Get messages from sessionId @@ -139,7 +126,24 @@ class MotorheadMemoryExtended extends MotorheadMemory { this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId } - async clear(): Promise { + async loadMemoryVariables(values: InputValues, overrideSessionId = ''): Promise { + if (overrideSessionId) { + super.sessionId = overrideSessionId + } + return super.loadMemoryVariables({ values }) + } + + async saveContext(inputValues: InputValues, outputValues: OutputValues, overrideSessionId = ''): Promise { + if (overrideSessionId) { + super.sessionId = overrideSessionId + } + return super.saveContext(inputValues, outputValues) + } + + async clear(overrideSessionId = ''): Promise { + if (overrideSessionId) { + super.sessionId = overrideSessionId + } try { await this.caller.call(fetch, `${this.url}/sessions/${this.sessionId}/memory`, { //@ts-ignore @@ -155,6 +159,28 @@ class MotorheadMemoryExtended extends MotorheadMemory { await this.chatHistory.clear() await super.clear() } + + async getChatMessages(overrideSessionId = ''): Promise { + const id = overrideSessionId ?? this.sessionId + const memoryVariables = await this.loadMemoryVariables({}, id) + const baseMessages = memoryVariables[this.memoryKey] + return convertBaseMessagetoIMessage(baseMessages) + } + + async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { + const id = overrideSessionId ?? this.sessionId + const input = msgArray.find((msg) => msg.type === 'userMessage') + const output = msgArray.find((msg) => msg.type === 'apiMessage') + const inputValues = { [this.inputKey ?? 'input']: input?.text } + const outputValues = { output: output?.text } + + await this.saveContext(inputValues, outputValues, id) + } + + async clearChatMessages(overrideSessionId = ''): Promise { + const id = overrideSessionId ?? this.sessionId + await this.clear(id) + } } module.exports = { nodeClass: MotorMemory_Memory } diff --git a/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts b/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts index 7fe447ad..4692088b 100644 --- a/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts +++ b/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts @@ -1,9 +1,9 @@ -import { INode, INodeData, INodeParams, ICommonObject } from '../../../src/Interface' -import { getBaseClasses, getCredentialData, getCredentialParam, serializeChatHistory } from '../../../src/utils' +import { Redis } from 'ioredis' import { BufferMemory, BufferMemoryInput } from 'langchain/memory' import { RedisChatMessageHistory, RedisChatMessageHistoryInput } from 'langchain/stores/message/ioredis' -import { mapStoredMessageToChatMessage, BaseMessage } from 'langchain/schema' -import { Redis } from 'ioredis' +import { mapStoredMessageToChatMessage, BaseMessage, AIMessage, HumanMessage } from 'langchain/schema' +import { INode, INodeData, INodeParams, ICommonObject, MessageType, IMessage } from '../../../src/Interface' +import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' class RedisBackedChatMemory_Memory implements INode { label: string @@ -38,7 +38,8 @@ class RedisBackedChatMemory_Memory implements INode { label: 'Session Id', name: 'sessionId', type: 'string', - description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', + description: + 'If not specified, a random id will be used. Learn more', default: '', additionalParams: true, optional: true @@ -64,40 +65,28 @@ class RedisBackedChatMemory_Memory implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { return await initalizeRedis(nodeData, options) } - - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const redis = await initalizeRedis(nodeData, options) - const sessionId = nodeData.inputs?.sessionId as string - const chatId = options?.chatId as string - options.logger.info(`Clearing Redis memory session ${sessionId ? sessionId : chatId}`) - await redis.clear() - options.logger.info(`Successfully cleared Redis memory session ${sessionId ? sessionId : chatId}`) - }, - async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise { - const memoryKey = nodeData.inputs?.memoryKey as string - const redis = await initalizeRedis(nodeData, options) - const key = memoryKey ?? 'chat_history' - const memoryResult = await redis.loadMemoryVariables({}) - return serializeChatHistory(memoryResult[key]) - } - } } const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Promise => { - const sessionId = nodeData.inputs?.sessionId as string const sessionTTL = nodeData.inputs?.sessionTTL as number const memoryKey = nodeData.inputs?.memoryKey as string const chatId = options?.chatId as string let isSessionIdUsingChatMessageId = false - if (!sessionId && chatId) isSessionIdUsingChatMessageId = true + let sessionId = '' + + if (!nodeData.inputs?.sessionId && chatId) { + isSessionIdUsingChatMessageId = true + sessionId = chatId + } else { + sessionId = nodeData.inputs?.sessionId + } const credentialData = await getCredentialData(nodeData.credential ?? '', options) const redisUrl = getCredentialParam('redisUrl', credentialData, nodeData) let client: Redis + if (!redisUrl || redisUrl === '') { const username = getCredentialParam('redisCacheUser', credentialData, nodeData) const password = getCredentialParam('redisCachePwd', credentialData, nodeData) @@ -115,7 +104,7 @@ const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Prom } let obj: RedisChatMessageHistoryInput = { - sessionId: sessionId ? sessionId : chatId, + sessionId, client } @@ -128,6 +117,7 @@ const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Prom const redisChatMessageHistory = new RedisChatMessageHistory(obj) + /**** Methods below are needed to override the original implementations ****/ redisChatMessageHistory.getMessages = async (): Promise => { const rawStoredMessages = await client.lrange((redisChatMessageHistory as any).sessionId, 0, -1) const orderedMessages = rawStoredMessages.reverse().map((message) => JSON.parse(message)) @@ -145,25 +135,73 @@ const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Prom redisChatMessageHistory.clear = async (): Promise => { await client.del((redisChatMessageHistory as any).sessionId) } + /**** End of override functions ****/ const memory = new BufferMemoryExtended({ memoryKey: memoryKey ?? 'chat_history', chatHistory: redisChatMessageHistory, - isSessionIdUsingChatMessageId + isSessionIdUsingChatMessageId, + sessionId, + redisClient: client }) + return memory } interface BufferMemoryExtendedInput { isSessionIdUsingChatMessageId: boolean + redisClient: Redis + sessionId: string } class BufferMemoryExtended extends BufferMemory { - isSessionIdUsingChatMessageId? = false + isSessionIdUsingChatMessageId = false + sessionId = '' + redisClient: Redis - constructor(fields: BufferMemoryInput & Partial) { + constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) { super(fields) this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId + this.sessionId = fields.sessionId + this.redisClient = fields.redisClient + } + + async getChatMessages(overrideSessionId = ''): Promise { + if (!this.redisClient) return [] + + const id = overrideSessionId ?? this.sessionId + const rawStoredMessages = await this.redisClient.lrange(id, 0, -1) + const orderedMessages = rawStoredMessages.reverse().map((message) => JSON.parse(message)) + const baseMessages = orderedMessages.map(mapStoredMessageToChatMessage) + return convertBaseMessagetoIMessage(baseMessages) + } + + async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { + if (!this.redisClient) return + + const id = overrideSessionId ?? this.sessionId + const input = msgArray.find((msg) => msg.type === 'userMessage') + const output = msgArray.find((msg) => msg.type === 'apiMessage') + + if (input) { + const newInputMessage = new HumanMessage(input.text) + const messageToAdd = [newInputMessage].map((msg) => msg.toDict()) + await this.redisClient.lpush(id, JSON.stringify(messageToAdd[0])) + } + + if (output) { + const newOutputMessage = new AIMessage(output.text) + const messageToAdd = [newOutputMessage].map((msg) => msg.toDict()) + await this.redisClient.lpush(id, JSON.stringify(messageToAdd[0])) + } + } + + async clearChatMessages(overrideSessionId = ''): Promise { + if (!this.redisClient) return + + const id = overrideSessionId ?? this.sessionId + await this.redisClient.del(id) + await this.clear() } } diff --git a/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts b/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts index 8bca0440..327f0f32 100644 --- a/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts +++ b/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts @@ -1,8 +1,10 @@ -import { INode, INodeData, INodeParams } from '../../../src/Interface' -import { getBaseClasses, getCredentialData, getCredentialParam, serializeChatHistory } from '../../../src/utils' -import { ICommonObject } from '../../../src' +import { Redis } from '@upstash/redis' import { BufferMemory, BufferMemoryInput } from 'langchain/memory' import { UpstashRedisChatMessageHistory } from 'langchain/stores/message/upstash_redis' +import { mapStoredMessageToChatMessage, AIMessage, HumanMessage, StoredMessage } from 'langchain/schema' +import { IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface' +import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { ICommonObject } from '../../../src/Interface' class UpstashRedisBackedChatMemory_Memory implements INode { label: string @@ -43,7 +45,8 @@ class UpstashRedisBackedChatMemory_Memory implements INode { label: 'Session Id', name: 'sessionId', type: 'string', - description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', + description: + 'If not specified, a random id will be used. Learn more', default: '', additionalParams: true, optional: true @@ -62,51 +65,43 @@ class UpstashRedisBackedChatMemory_Memory implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { return initalizeUpstashRedis(nodeData, options) } - - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const redis = await initalizeUpstashRedis(nodeData, options) - const sessionId = nodeData.inputs?.sessionId as string - const chatId = options?.chatId as string - options.logger.info(`Clearing Upstash Redis memory session ${sessionId ? sessionId : chatId}`) - await redis.clear() - options.logger.info(`Successfully cleared Upstash Redis memory session ${sessionId ? sessionId : chatId}`) - }, - async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise { - const redis = await initalizeUpstashRedis(nodeData, options) - const key = 'chat_history' - const memoryResult = await redis.loadMemoryVariables({}) - return serializeChatHistory(memoryResult[key]) - } - } } const initalizeUpstashRedis = async (nodeData: INodeData, options: ICommonObject): Promise => { const baseURL = nodeData.inputs?.baseURL as string - const sessionId = nodeData.inputs?.sessionId as string const sessionTTL = nodeData.inputs?.sessionTTL as string const chatId = options?.chatId as string let isSessionIdUsingChatMessageId = false - if (!sessionId && chatId) isSessionIdUsingChatMessageId = true + let sessionId = '' + + if (!nodeData.inputs?.sessionId && chatId) { + isSessionIdUsingChatMessageId = true + sessionId = chatId + } else { + sessionId = nodeData.inputs?.sessionId + } const credentialData = await getCredentialData(nodeData.credential ?? '', options) const upstashRestToken = getCredentialParam('upstashRestToken', credentialData, nodeData) + const client = new Redis({ + url: baseURL, + token: upstashRestToken + }) + const redisChatMessageHistory = new UpstashRedisChatMessageHistory({ sessionId: sessionId ? sessionId : chatId, sessionTTL: sessionTTL ? parseInt(sessionTTL, 10) : undefined, - config: { - url: baseURL, - token: upstashRestToken - } + client }) const memory = new BufferMemoryExtended({ memoryKey: 'chat_history', chatHistory: redisChatMessageHistory, - isSessionIdUsingChatMessageId + isSessionIdUsingChatMessageId, + sessionId, + redisClient: client }) return memory @@ -114,14 +109,59 @@ const initalizeUpstashRedis = async (nodeData: INodeData, options: ICommonObject interface BufferMemoryExtendedInput { isSessionIdUsingChatMessageId: boolean + redisClient: Redis + sessionId: string } class BufferMemoryExtended extends BufferMemory { - isSessionIdUsingChatMessageId? = false + isSessionIdUsingChatMessageId = false + sessionId = '' + redisClient: Redis - constructor(fields: BufferMemoryInput & Partial) { + constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) { super(fields) this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId + this.sessionId = fields.sessionId + this.redisClient = fields.redisClient + } + + async getChatMessages(overrideSessionId = ''): Promise { + if (!this.redisClient) return [] + + const id = overrideSessionId ?? this.sessionId + const rawStoredMessages: StoredMessage[] = await this.redisClient.lrange(id, 0, -1) + const orderedMessages = rawStoredMessages.reverse() + const previousMessages = orderedMessages.filter((x): x is StoredMessage => x.type !== undefined && x.data.content !== undefined) + const baseMessages = previousMessages.map(mapStoredMessageToChatMessage) + return convertBaseMessagetoIMessage(baseMessages) + } + + async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { + if (!this.redisClient) return + + const id = overrideSessionId ?? this.sessionId + const input = msgArray.find((msg) => msg.type === 'userMessage') + const output = msgArray.find((msg) => msg.type === 'apiMessage') + + if (input) { + const newInputMessage = new HumanMessage(input.text) + const messageToAdd = [newInputMessage].map((msg) => msg.toDict()) + await this.redisClient.lpush(id, JSON.stringify(messageToAdd[0])) + } + + if (output) { + const newOutputMessage = new AIMessage(output.text) + const messageToAdd = [newOutputMessage].map((msg) => msg.toDict()) + await this.redisClient.lpush(id, JSON.stringify(messageToAdd[0])) + } + } + + async clearChatMessages(overrideSessionId = ''): Promise { + if (!this.redisClient) return + + const id = overrideSessionId ?? this.sessionId + await this.redisClient.del(id) + await this.clear() } } diff --git a/packages/components/nodes/memory/ZepMemory/ZepMemory.ts b/packages/components/nodes/memory/ZepMemory/ZepMemory.ts index ced871a1..ac0ac896 100644 --- a/packages/components/nodes/memory/ZepMemory/ZepMemory.ts +++ b/packages/components/nodes/memory/ZepMemory/ZepMemory.ts @@ -1,9 +1,8 @@ -import { SystemMessage } from 'langchain/schema' -import { INode, INodeData, INodeParams } from '../../../src/Interface' -import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface' +import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { ZepMemory, ZepMemoryInput } from 'langchain/memory/zep' import { ICommonObject } from '../../../src' -import { getBufferString } from 'langchain/memory' +import { InputValues, MemoryVariables, OutputValues } from 'langchain/memory' class ZepMemory_Memory implements INode { label: string @@ -20,7 +19,7 @@ class ZepMemory_Memory implements INode { constructor() { this.label = 'Zep Memory' this.name = 'ZepMemory' - this.version = 1.0 + this.version = 2.0 this.type = 'ZepMemory' this.icon = 'zep.png' this.category = 'Memory' @@ -41,17 +40,12 @@ class ZepMemory_Memory implements INode { type: 'string', default: 'http://127.0.0.1:8000' }, - { - label: 'Auto Summary', - name: 'autoSummary', - type: 'boolean', - default: true - }, { label: 'Session Id', name: 'sessionId', type: 'string', - description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId', + description: + 'If not specified, a random id will be used. Learn more', default: '', additionalParams: true, optional: true @@ -61,13 +55,7 @@ class ZepMemory_Memory implements INode { name: 'k', type: 'number', default: '10', - description: 'Window of size k to surface the last k back-and-forth to use as memory.' - }, - { - label: 'Auto Summary Template', - name: 'autoSummaryTemplate', - type: 'string', - default: 'This is the summary of the following conversation:\n{summary}', + description: 'Window of size k to surface the last k back-and-forth to use as memory.', additionalParams: true }, { @@ -109,57 +97,7 @@ class ZepMemory_Memory implements INode { } async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { - const autoSummaryTemplate = nodeData.inputs?.autoSummaryTemplate as string - const autoSummary = nodeData.inputs?.autoSummary as boolean - - const k = nodeData.inputs?.k as string - - let zep = await initalizeZep(nodeData, options) - - // hack to support summary - let tmpFunc = zep.loadMemoryVariables - zep.loadMemoryVariables = async (values) => { - let data = await tmpFunc.bind(zep, values)() - if (autoSummary && zep.returnMessages && data[zep.memoryKey] && data[zep.memoryKey].length) { - const zepClient = await zep.zepClientPromise - const memory = await zepClient.memory.getMemory(zep.sessionId, parseInt(k, 10) ?? 10) - if (memory?.summary) { - let summary = autoSummaryTemplate.replace(/{summary}/g, memory.summary.content) - // eslint-disable-next-line no-console - console.log('[ZepMemory] auto summary:', summary) - data[zep.memoryKey].unshift(new SystemMessage(summary)) - } - } - // for langchain zep memory compatibility, or we will get "Missing value for input variable chat_history" - if (data instanceof Array) { - data = { - [zep.memoryKey]: data - } - } - return data - } - return zep - } - - //@ts-ignore - memoryMethods = { - async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise { - const zep = await initalizeZep(nodeData, options) - const sessionId = nodeData.inputs?.sessionId as string - const chatId = options?.chatId as string - options.logger.info(`Clearing Zep memory session ${sessionId ? sessionId : chatId}`) - await zep.clear() - options.logger.info(`Successfully cleared Zep memory session ${sessionId ? sessionId : chatId}`) - }, - async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise { - const memoryKey = nodeData.inputs?.memoryKey as string - const aiPrefix = nodeData.inputs?.aiPrefix as string - const humanPrefix = nodeData.inputs?.humanPrefix as string - const zep = await initalizeZep(nodeData, options) - const key = memoryKey ?? 'chat_history' - const memoryResult = await zep.loadMemoryVariables({}) - return getBufferString(memoryResult[key], humanPrefix, aiPrefix) - } + return await initalizeZep(nodeData, options) } } @@ -169,40 +107,94 @@ const initalizeZep = async (nodeData: INodeData, options: ICommonObject): Promis const humanPrefix = nodeData.inputs?.humanPrefix as string const memoryKey = nodeData.inputs?.memoryKey as string const inputKey = nodeData.inputs?.inputKey as string - const sessionId = nodeData.inputs?.sessionId as string + const k = nodeData.inputs?.k as string const chatId = options?.chatId as string let isSessionIdUsingChatMessageId = false - if (!sessionId && chatId) isSessionIdUsingChatMessageId = true + let sessionId = '' + + if (!nodeData.inputs?.sessionId && chatId) { + isSessionIdUsingChatMessageId = true + sessionId = chatId + } else { + sessionId = nodeData.inputs?.sessionId + } const credentialData = await getCredentialData(nodeData.credential ?? '', options) const apiKey = getCredentialParam('apiKey', credentialData, nodeData) - const obj: ZepMemoryInput & Partial = { + const obj: ZepMemoryInput & ZepMemoryExtendedInput = { baseURL, - sessionId: sessionId ? sessionId : chatId, aiPrefix, humanPrefix, returnMessages: true, memoryKey, - inputKey + inputKey, + sessionId, + isSessionIdUsingChatMessageId, + k: k ? parseInt(k, 10) : undefined } if (apiKey) obj.apiKey = apiKey - if (isSessionIdUsingChatMessageId) obj.isSessionIdUsingChatMessageId = true return new ZepMemoryExtended(obj) } interface ZepMemoryExtendedInput { isSessionIdUsingChatMessageId: boolean + k?: number } class ZepMemoryExtended extends ZepMemory { - isSessionIdUsingChatMessageId? = false + isSessionIdUsingChatMessageId = false + lastN?: number - constructor(fields: ZepMemoryInput & Partial) { + constructor(fields: ZepMemoryInput & ZepMemoryExtendedInput) { super(fields) this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId + this.lastN = fields.k + } + + async loadMemoryVariables(values: InputValues, overrideSessionId = ''): Promise { + if (overrideSessionId) { + super.sessionId = overrideSessionId + } + return super.loadMemoryVariables({ ...values, lastN: this.lastN }) + } + + async saveContext(inputValues: InputValues, outputValues: OutputValues, overrideSessionId = ''): Promise { + if (overrideSessionId) { + super.sessionId = overrideSessionId + } + return super.saveContext(inputValues, outputValues) + } + + async clear(overrideSessionId = ''): Promise { + if (overrideSessionId) { + super.sessionId = overrideSessionId + } + return super.clear() + } + + async getChatMessages(overrideSessionId = ''): Promise { + const id = overrideSessionId ?? this.sessionId + const memoryVariables = await this.loadMemoryVariables({}, id) + const baseMessages = memoryVariables[this.memoryKey] + return convertBaseMessagetoIMessage(baseMessages) + } + + async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { + const id = overrideSessionId ?? this.sessionId + const input = msgArray.find((msg) => msg.type === 'userMessage') + const output = msgArray.find((msg) => msg.type === 'apiMessage') + const inputValues = { [this.inputKey ?? 'input']: input?.text } + const outputValues = { output: output?.text } + + await this.saveContext(inputValues, outputValues, id) + } + + async clearChatMessages(overrideSessionId = ''): Promise { + const id = overrideSessionId ?? this.sessionId + await this.clear(id) } } diff --git a/packages/components/nodes/responsesynthesizer/CompactRefine/CompactRefine.ts b/packages/components/nodes/responsesynthesizer/CompactRefine/CompactRefine.ts new file mode 100644 index 00000000..db998e1f --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/CompactRefine/CompactRefine.ts @@ -0,0 +1,75 @@ +import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { ResponseSynthesizerClass } from '../base' + +class CompactRefine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Compact and Refine' + this.name = 'compactrefineLlamaIndex' + this.version = 1.0 + this.type = 'CompactRefine' + this.icon = 'compactrefine.svg' + this.category = 'Response Synthesizer' + this.description = + 'CompactRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.' + this.baseClasses = [this.type, 'ResponseSynthesizer'] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Refine Prompt', + name: 'refinePrompt', + type: 'string', + rows: 4, + default: `The original query is as follows: {query} +We have provided an existing answer: {existingAnswer} +We have the opportunity to refine the existing answer (only if needed) with some more context below. +------------ +{context} +------------ +Given the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer. +Refined Answer:`, + warning: `Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}`, + optional: true + }, + { + label: 'Text QA Prompt', + name: 'textQAPrompt', + type: 'string', + rows: 4, + default: `Context information is below. +--------------------- +{context} +--------------------- +Given the context information and not prior knowledge, answer the query. +Query: {query} +Answer:`, + warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`, + optional: true + } + ] + } + + async init(nodeData: INodeData): Promise { + const refinePrompt = nodeData.inputs?.refinePrompt as string + const textQAPrompt = nodeData.inputs?.textQAPrompt as string + + const refinePromptTemplate = ({ context = '', existingAnswer = '', query = '' }) => + refinePrompt.replace('{existingAnswer}', existingAnswer).replace('{context}', context).replace('{query}', query) + const textQAPromptTemplate = ({ context = '', query = '' }) => textQAPrompt.replace('{context}', context).replace('{query}', query) + + return new ResponseSynthesizerClass({ textQAPromptTemplate, refinePromptTemplate, type: 'CompactAndRefine' }) + } +} + +module.exports = { nodeClass: CompactRefine_LlamaIndex } diff --git a/packages/components/nodes/responsesynthesizer/CompactRefine/compactrefine.svg b/packages/components/nodes/responsesynthesizer/CompactRefine/compactrefine.svg new file mode 100644 index 00000000..9ea95529 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/CompactRefine/compactrefine.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/responsesynthesizer/Refine/Refine.ts b/packages/components/nodes/responsesynthesizer/Refine/Refine.ts new file mode 100644 index 00000000..267bc208 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/Refine/Refine.ts @@ -0,0 +1,75 @@ +import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { ResponseSynthesizerClass } from '../base' + +class Refine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Refine' + this.name = 'refineLlamaIndex' + this.version = 1.0 + this.type = 'Refine' + this.icon = 'refine.svg' + this.category = 'Response Synthesizer' + this.description = + 'Create and refine an answer by sequentially going through each retrieved text chunk. This makes a separate LLM call per Node. Good for more detailed answers.' + this.baseClasses = [this.type, 'ResponseSynthesizer'] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Refine Prompt', + name: 'refinePrompt', + type: 'string', + rows: 4, + default: `The original query is as follows: {query} +We have provided an existing answer: {existingAnswer} +We have the opportunity to refine the existing answer (only if needed) with some more context below. +------------ +{context} +------------ +Given the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer. +Refined Answer:`, + warning: `Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}`, + optional: true + }, + { + label: 'Text QA Prompt', + name: 'textQAPrompt', + type: 'string', + rows: 4, + default: `Context information is below. +--------------------- +{context} +--------------------- +Given the context information and not prior knowledge, answer the query. +Query: {query} +Answer:`, + warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`, + optional: true + } + ] + } + + async init(nodeData: INodeData): Promise { + const refinePrompt = nodeData.inputs?.refinePrompt as string + const textQAPrompt = nodeData.inputs?.textQAPrompt as string + + const refinePromptTemplate = ({ context = '', existingAnswer = '', query = '' }) => + refinePrompt.replace('{existingAnswer}', existingAnswer).replace('{context}', context).replace('{query}', query) + const textQAPromptTemplate = ({ context = '', query = '' }) => textQAPrompt.replace('{context}', context).replace('{query}', query) + + return new ResponseSynthesizerClass({ textQAPromptTemplate, refinePromptTemplate, type: 'Refine' }) + } +} + +module.exports = { nodeClass: Refine_LlamaIndex } diff --git a/packages/components/nodes/responsesynthesizer/Refine/refine.svg b/packages/components/nodes/responsesynthesizer/Refine/refine.svg new file mode 100644 index 00000000..1170c584 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/Refine/refine.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/SimpleResponseBuilder.ts b/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/SimpleResponseBuilder.ts new file mode 100644 index 00000000..cb880020 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/SimpleResponseBuilder.ts @@ -0,0 +1,35 @@ +import { INode, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { ResponseSynthesizerClass } from '../base' + +class SimpleResponseBuilder_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Simple Response Builder' + this.name = 'simpleResponseBuilderLlamaIndex' + this.version = 1.0 + this.type = 'SimpleResponseBuilder' + this.icon = 'simplerb.svg' + this.category = 'Response Synthesizer' + this.description = `Apply a query to a collection of text chunks, gathering the responses in an array, and return a combined string of all responses. Useful for individual queries on each text chunk.` + this.baseClasses = [this.type, 'ResponseSynthesizer'] + this.tags = ['LlamaIndex'] + this.inputs = [] + } + + async init(): Promise { + return new ResponseSynthesizerClass({ type: 'SimpleResponseBuilder' }) + } +} + +module.exports = { nodeClass: SimpleResponseBuilder_LlamaIndex } diff --git a/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/simplerb.svg b/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/simplerb.svg new file mode 100644 index 00000000..6f04fdc9 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/SimpleResponseBuilder/simplerb.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/responsesynthesizer/TreeSummarize/TreeSummarize.ts b/packages/components/nodes/responsesynthesizer/TreeSummarize/TreeSummarize.ts new file mode 100644 index 00000000..44872786 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/TreeSummarize/TreeSummarize.ts @@ -0,0 +1,56 @@ +import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { ResponseSynthesizerClass } from '../base' + +class TreeSummarize_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'TreeSummarize' + this.name = 'treeSummarizeLlamaIndex' + this.version = 1.0 + this.type = 'TreeSummarize' + this.icon = 'treesummarize.svg' + this.category = 'Response Synthesizer' + this.description = + 'Given a set of text chunks and the query, recursively construct a tree and return the root node as the response. Good for summarization purposes.' + this.baseClasses = [this.type, 'ResponseSynthesizer'] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Prompt', + name: 'prompt', + type: 'string', + rows: 4, + default: `Context information from multiple sources is below. +--------------------- +{context} +--------------------- +Given the information from multiple sources and not prior knowledge, answer the query. +Query: {query} +Answer:`, + warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`, + optional: true + } + ] + } + + async init(nodeData: INodeData): Promise { + const prompt = nodeData.inputs?.prompt as string + + const textQAPromptTemplate = ({ context = '', query = '' }) => prompt.replace('{context}', context).replace('{query}', query) + + return new ResponseSynthesizerClass({ textQAPromptTemplate, type: 'TreeSummarize' }) + } +} + +module.exports = { nodeClass: TreeSummarize_LlamaIndex } diff --git a/packages/components/nodes/responsesynthesizer/TreeSummarize/treesummarize.svg b/packages/components/nodes/responsesynthesizer/TreeSummarize/treesummarize.svg new file mode 100644 index 00000000..f81a3a53 --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/TreeSummarize/treesummarize.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/responsesynthesizer/base.ts b/packages/components/nodes/responsesynthesizer/base.ts new file mode 100644 index 00000000..68fd7f1a --- /dev/null +++ b/packages/components/nodes/responsesynthesizer/base.ts @@ -0,0 +1,11 @@ +export class ResponseSynthesizerClass { + type: string + textQAPromptTemplate?: any + refinePromptTemplate?: any + + constructor(params: { type: string; textQAPromptTemplate?: any; refinePromptTemplate?: any }) { + this.type = params.type + this.textQAPromptTemplate = params.textQAPromptTemplate + this.refinePromptTemplate = params.refinePromptTemplate + } +} diff --git a/packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts b/packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts new file mode 100644 index 00000000..683e6f25 --- /dev/null +++ b/packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts @@ -0,0 +1,366 @@ +import { + BaseNode, + Document, + Metadata, + VectorStore, + VectorStoreQuery, + VectorStoreQueryResult, + serviceContextFromDefaults, + storageContextFromDefaults, + VectorStoreIndex, + BaseEmbedding +} from 'llamaindex' +import { FetchResponse, Index, Pinecone, ScoredPineconeRecord } from '@pinecone-database/pinecone' +import { flatten } from 'lodash' +import { Document as LCDocument } from 'langchain/document' +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { flattenObject, getCredentialData, getCredentialParam } from '../../../src/utils' + +class PineconeLlamaIndex_VectorStores implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + tags: string[] + baseClasses: string[] + inputs: INodeParams[] + credential: INodeParams + + constructor() { + this.label = 'Pinecone' + this.name = 'pineconeLlamaIndex' + this.version = 1.0 + this.type = 'Pinecone' + this.icon = 'pinecone.png' + this.category = 'Vector Stores' + this.description = `Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database` + this.baseClasses = [this.type, 'VectorIndexRetriever'] + this.tags = ['LlamaIndex'] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['pineconeApi'] + } + this.inputs = [ + { + label: 'Document', + name: 'document', + type: 'Document', + list: true, + optional: true + }, + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel_LlamaIndex' + }, + { + label: 'Embeddings', + name: 'embeddings', + type: 'BaseEmbedding_LlamaIndex' + }, + { + label: 'Pinecone Index', + name: 'pineconeIndex', + type: 'string' + }, + { + label: 'Pinecone Namespace', + name: 'pineconeNamespace', + type: 'string', + placeholder: 'my-first-namespace', + additionalParams: true, + optional: true + }, + { + label: 'Pinecone Metadata Filter', + name: 'pineconeMetadataFilter', + type: 'json', + optional: true, + additionalParams: true + }, + { + label: 'Top K', + name: 'topK', + description: 'Number of top results to fetch. Default to 4', + placeholder: '4', + type: 'number', + additionalParams: true, + optional: true + } + ] + } + + //@ts-ignore + vectorStoreMethods = { + async upsert(nodeData: INodeData, options: ICommonObject): Promise { + const indexName = nodeData.inputs?.pineconeIndex as string + const pineconeNamespace = nodeData.inputs?.pineconeNamespace as string + const docs = nodeData.inputs?.document as LCDocument[] + const embeddings = nodeData.inputs?.embeddings as BaseEmbedding + const model = nodeData.inputs?.model + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData) + const pineconeEnv = getCredentialParam('pineconeEnv', credentialData, nodeData) + + const pcvs = new PineconeVectorStore({ + indexName, + apiKey: pineconeApiKey, + environment: pineconeEnv, + namespace: pineconeNamespace + }) + + const flattenDocs = docs && docs.length ? flatten(docs) : [] + const finalDocs = [] + for (let i = 0; i < flattenDocs.length; i += 1) { + if (flattenDocs[i] && flattenDocs[i].pageContent) { + finalDocs.push(new LCDocument(flattenDocs[i])) + } + } + + const llamadocs: Document[] = [] + for (const doc of finalDocs) { + llamadocs.push(new Document({ text: doc.pageContent, metadata: doc.metadata })) + } + + const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings }) + const storageContext = await storageContextFromDefaults({ vectorStore: pcvs }) + + try { + await VectorStoreIndex.fromDocuments(llamadocs, { serviceContext, storageContext }) + } catch (e) { + throw new Error(e) + } + } + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const indexName = nodeData.inputs?.pineconeIndex as string + const pineconeNamespace = nodeData.inputs?.pineconeNamespace as string + const pineconeMetadataFilter = nodeData.inputs?.pineconeMetadataFilter + const embeddings = nodeData.inputs?.embeddings as BaseEmbedding + const model = nodeData.inputs?.model + const topK = nodeData.inputs?.topK as string + const k = topK ? parseFloat(topK) : 4 + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData) + const pineconeEnv = getCredentialParam('pineconeEnv', credentialData, nodeData) + + const obj: PineconeParams = { + indexName, + apiKey: pineconeApiKey, + environment: pineconeEnv + } + + if (pineconeNamespace) obj.namespace = pineconeNamespace + if (pineconeMetadataFilter) { + const metadatafilter = typeof pineconeMetadataFilter === 'object' ? pineconeMetadataFilter : JSON.parse(pineconeMetadataFilter) + obj.queryFilter = metadatafilter + } + + const pcvs = new PineconeVectorStore(obj) + + const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings }) + const storageContext = await storageContextFromDefaults({ vectorStore: pcvs }) + + const index = await VectorStoreIndex.init({ + nodes: [], + storageContext, + serviceContext + }) + + const retriever = index.asRetriever() + retriever.similarityTopK = k + ;(retriever as any).serviceContext = serviceContext + + return retriever + } +} + +type PineconeParams = { + indexName: string + apiKey: string + environment: string + namespace?: string + chunkSize?: number + queryFilter?: object +} + +class PineconeVectorStore implements VectorStore { + storesText: boolean = true + db?: Pinecone + indexName: string + apiKey: string + environment: string + chunkSize: number + namespace?: string + queryFilter?: object + + constructor(params: PineconeParams) { + this.indexName = params?.indexName + this.apiKey = params?.apiKey + this.environment = params?.environment + this.namespace = params?.namespace ?? '' + this.chunkSize = params?.chunkSize ?? Number.parseInt(process.env.PINECONE_CHUNK_SIZE ?? '100') + this.queryFilter = params?.queryFilter ?? {} + } + + private async getDb(): Promise { + if (!this.db) { + this.db = new Pinecone({ + apiKey: this.apiKey, + environment: this.environment + }) + } + return Promise.resolve(this.db) + } + + client() { + return this.getDb() + } + + async index() { + const db: Pinecone = await this.getDb() + return db.Index(this.indexName) + } + + async clearIndex() { + const db: Pinecone = await this.getDb() + return await db.index(this.indexName).deleteAll() + } + + async add(embeddingResults: BaseNode[]): Promise { + if (embeddingResults.length == 0) { + return Promise.resolve([]) + } + + const idx: Index = await this.index() + const nodes = embeddingResults.map(this.nodeToRecord) + + for (let i = 0; i < nodes.length; i += this.chunkSize) { + const chunk = nodes.slice(i, i + this.chunkSize) + const result = await this.saveChunk(idx, chunk) + if (!result) { + return Promise.reject() + } + } + return Promise.resolve([]) + } + + protected async saveChunk(idx: Index, chunk: any) { + try { + const namespace = idx.namespace(this.namespace ?? '') + await namespace.upsert(chunk) + return true + } catch (err) { + return false + } + } + + async delete(refDocId: string): Promise { + const idx = await this.index() + const namespace = idx.namespace(this.namespace ?? '') + return namespace.deleteOne(refDocId) + } + + async query(query: VectorStoreQuery): Promise { + const queryOptions: any = { + vector: query.queryEmbedding, + topK: query.similarityTopK, + filter: this.queryFilter + } + + const idx = await this.index() + const namespace = idx.namespace(this.namespace ?? '') + const results = await namespace.query(queryOptions) + + const idList = results.matches.map((row) => row.id) + const records: FetchResponse = await namespace.fetch(idList) + const rows = Object.values(records.records) + + const nodes = rows.map((row) => { + return new Document({ + id_: row.id, + text: this.textFromResultRow(row), + metadata: this.metaWithoutText(row.metadata), + embedding: row.values + }) + }) + + const result = { + nodes: nodes, + similarities: results.matches.map((row) => row.score || 999), + ids: results.matches.map((row) => row.id) + } + + return Promise.resolve(result) + } + + /** + * Required by VectorStore interface. Currently ignored. + */ + persist(): Promise { + return Promise.resolve() + } + + textFromResultRow(row: ScoredPineconeRecord): string { + return row.metadata?.text ?? '' + } + + metaWithoutText(meta: Metadata): any { + return Object.keys(meta) + .filter((key) => key != 'text') + .reduce((acc: any, key: string) => { + acc[key] = meta[key] + return acc + }, {}) + } + + nodeToRecord(node: BaseNode) { + let id: any = node.id_.length ? node.id_ : null + return { + id: id, + values: node.getEmbedding(), + metadata: { + ...cleanupMetadata(node.metadata), + text: (node as any).text + } + } + } +} + +const cleanupMetadata = (nodeMetadata: ICommonObject) => { + // Pinecone doesn't support nested objects, so we flatten them + const documentMetadata: any = { ...nodeMetadata } + // preserve string arrays which are allowed + const stringArrays: Record = {} + for (const key of Object.keys(documentMetadata)) { + if (Array.isArray(documentMetadata[key]) && documentMetadata[key].every((el: any) => typeof el === 'string')) { + stringArrays[key] = documentMetadata[key] + delete documentMetadata[key] + } + } + const metadata: { + [key: string]: string | number | boolean | string[] | null + } = { + ...flattenObject(documentMetadata), + ...stringArrays + } + // Pinecone doesn't support null values, so we remove them + for (const key of Object.keys(metadata)) { + if (metadata[key] == null) { + delete metadata[key] + } else if (typeof metadata[key] === 'object' && Object.keys(metadata[key] as unknown as object).length === 0) { + delete metadata[key] + } + } + return metadata +} + +module.exports = { nodeClass: PineconeLlamaIndex_VectorStores } diff --git a/packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts b/packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts new file mode 100644 index 00000000..eeef6f69 --- /dev/null +++ b/packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts @@ -0,0 +1,124 @@ +import path from 'path' +import { flatten } from 'lodash' +import { storageContextFromDefaults, serviceContextFromDefaults, VectorStoreIndex, Document } from 'llamaindex' +import { Document as LCDocument } from 'langchain/document' +import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { getUserHome } from '../../../src' + +class SimpleStoreUpsert_LlamaIndex_VectorStores implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'SimpleStore' + this.name = 'simpleStoreLlamaIndex' + this.version = 1.0 + this.type = 'SimpleVectorStore' + this.icon = 'simplevs.svg' + this.category = 'Vector Stores' + this.description = 'Upsert embedded data to local path and perform similarity search' + this.baseClasses = [this.type, 'VectorIndexRetriever'] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'Document', + name: 'document', + type: 'Document', + list: true, + optional: true + }, + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel_LlamaIndex' + }, + { + label: 'Embeddings', + name: 'embeddings', + type: 'BaseEmbedding_LlamaIndex' + }, + { + label: 'Base Path to store', + name: 'basePath', + description: + 'Path to store persist embeddings indexes with persistence. If not specified, default to same path where database is stored', + type: 'string', + optional: true + }, + { + label: 'Top K', + name: 'topK', + description: 'Number of top results to fetch. Default to 4', + placeholder: '4', + type: 'number', + optional: true + } + ] + } + + //@ts-ignore + vectorStoreMethods = { + async upsert(nodeData: INodeData): Promise { + const basePath = nodeData.inputs?.basePath as string + const docs = nodeData.inputs?.document as LCDocument[] + const embeddings = nodeData.inputs?.embeddings + const model = nodeData.inputs?.model + + let filePath = '' + if (!basePath) filePath = path.join(getUserHome(), '.flowise', 'llamaindex') + else filePath = basePath + + const flattenDocs = docs && docs.length ? flatten(docs) : [] + const finalDocs = [] + for (let i = 0; i < flattenDocs.length; i += 1) { + finalDocs.push(new LCDocument(flattenDocs[i])) + } + + const llamadocs: Document[] = [] + for (const doc of finalDocs) { + llamadocs.push(new Document({ text: doc.pageContent, metadata: doc.metadata })) + } + + const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings }) + const storageContext = await storageContextFromDefaults({ persistDir: filePath }) + + try { + await VectorStoreIndex.fromDocuments(llamadocs, { serviceContext, storageContext }) + } catch (e) { + throw new Error(e) + } + } + } + + async init(nodeData: INodeData): Promise { + const basePath = nodeData.inputs?.basePath as string + const embeddings = nodeData.inputs?.embeddings + const model = nodeData.inputs?.model + const topK = nodeData.inputs?.topK as string + const k = topK ? parseFloat(topK) : 4 + + let filePath = '' + if (!basePath) filePath = path.join(getUserHome(), '.flowise', 'llamaindex') + else filePath = basePath + + const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings }) + const storageContext = await storageContextFromDefaults({ persistDir: filePath }) + + const index = await VectorStoreIndex.init({ storageContext, serviceContext }) + const retriever = index.asRetriever() + retriever.similarityTopK = k + + return retriever + } +} + +module.exports = { nodeClass: SimpleStoreUpsert_LlamaIndex_VectorStores } diff --git a/packages/components/nodes/vectorstores/SimpleStore/simplevs.svg b/packages/components/nodes/vectorstores/SimpleStore/simplevs.svg new file mode 100644 index 00000000..52c74432 --- /dev/null +++ b/packages/components/nodes/vectorstores/SimpleStore/simplevs.svg @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/packages/components/package.json b/packages/components/package.json index bea9a7a0..d1485b37 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -21,7 +21,7 @@ "@aws-sdk/client-s3": "^3.427.0", "@dqbd/tiktoken": "^1.0.7", "@elastic/elasticsearch": "^8.9.0", - "@getzep/zep-js": "^0.6.3", + "@getzep/zep-js": "^0.9.0", "@gomomento/sdk": "^1.51.1", "@gomomento/sdk-core": "^1.51.1", "@google-ai/generativelanguage": "^0.2.1", @@ -54,6 +54,7 @@ "langfuse-langchain": "^1.0.31", "langsmith": "^0.0.32", "linkifyjs": "^4.1.1", + "llamaindex": "^0.0.30", "llmonitor": "^0.5.5", "mammoth": "^1.5.1", "moment": "^2.29.3", diff --git a/packages/components/src/Interface.ts b/packages/components/src/Interface.ts index 6752f944..53d72cb4 100644 --- a/packages/components/src/Interface.ts +++ b/packages/components/src/Interface.ts @@ -91,6 +91,7 @@ export interface INodeProperties { version: number category: string baseClasses: string[] + tags?: string[] description?: string filePath?: string badge?: string @@ -107,10 +108,6 @@ export interface INode extends INodeProperties { search: (nodeData: INodeData, options?: ICommonObject) => Promise delete: (nodeData: INodeData, options?: ICommonObject) => Promise } - memoryMethods?: { - clearSessionMemory: (nodeData: INodeData, options?: ICommonObject) => Promise - getChatMessages: (nodeData: INodeData, options?: ICommonObject) => Promise - } init?(nodeData: INodeData, input: string, options?: ICommonObject): Promise run?(nodeData: INodeData, input: string, options?: ICommonObject): Promise } diff --git a/packages/components/src/utils.ts b/packages/components/src/utils.ts index 404f7c75..ceeb402a 100644 --- a/packages/components/src/utils.ts +++ b/packages/components/src/utils.ts @@ -8,7 +8,7 @@ import { DataSource } from 'typeorm' import { ICommonObject, IDatabaseEntity, IMessage, INodeData } from './Interface' import { AES, enc } from 'crypto-js' import { ChatMessageHistory } from 'langchain/memory' -import { AIMessage, HumanMessage } from 'langchain/schema' +import { AIMessage, HumanMessage, BaseMessage } from 'langchain/schema' export const numberOrExpressionRegex = '^(\\d+\\.?\\d*|{{.*}})$' //return true if string consists only numbers OR expression {{}} export const notEmptyRegex = '(.|\\s)*\\S(.|\\s)*' //return true if string is not empty or blank @@ -587,3 +587,54 @@ export const convertSchemaToZod = (schema: string | object): ICommonObject => { throw new Error(e) } } + +/** + * Flatten nested object + * @param {ICommonObject} obj + * @param {string} parentKey + * @returns {ICommonObject} + */ +export const flattenObject = (obj: ICommonObject, parentKey?: string) => { + let result: any = {} + + Object.keys(obj).forEach((key) => { + const value = obj[key] + const _key = parentKey ? parentKey + '.' + key : key + if (typeof value === 'object') { + result = { ...result, ...flattenObject(value, _key) } + } else { + result[_key] = value + } + }) + + return result +} + +/** + * Convert BaseMessage to IMessage + * @param {ICommonObject} obj + * @param {string} parentKey + * @returns {ICommonObject} + */ +export const convertBaseMessagetoIMessage = (messages: BaseMessage[]): IMessage[] => { + const formatmessages: IMessage[] = [] + for (const m of messages) { + if (m._getType() === 'human') { + formatmessages.push({ + message: m.content as string, + type: 'userMessage' + }) + } else if (m._getType() === 'ai') { + formatmessages.push({ + message: m.content as string, + type: 'apiMessage' + }) + } else if (m._getType() === 'system') { + formatmessages.push({ + message: m.content as string, + type: 'apiMessage' + }) + } + } + return formatmessages +} diff --git a/packages/server/marketplaces/chatflows/Context Chat Engine.json b/packages/server/marketplaces/chatflows/Context Chat Engine.json new file mode 100644 index 00000000..971aeea5 --- /dev/null +++ b/packages/server/marketplaces/chatflows/Context Chat Engine.json @@ -0,0 +1,855 @@ +{ + "description": "Answer question based on retrieved documents (context) with built-in memory to remember conversation using LlamaIndex", + "badge": "NEW", + "nodes": [ + { + "width": 300, + "height": 438, + "id": "textFile_0", + "position": { + "x": 221.215421786192, + "y": 94.91489477412404 + }, + "type": "customNode", + "data": { + "id": "textFile_0", + "label": "Text File", + "version": 3, + "name": "textFile", + "type": "Document", + "baseClasses": ["Document"], + "category": "Document Loaders", + "description": "Load data from text files", + "inputParams": [ + { + "label": "Txt File", + "name": "txtFile", + "type": "file", + "fileType": ".txt, .html, .aspx, .asp, .cpp, .c, .cs, .css, .go, .h, .java, .js, .less, .ts, .php, .proto, .python, .py, .rst, .ruby, .rb, .rs, .scala, .sc, .scss, .sol, .sql, .swift, .markdown, .md, .tex, .ltx, .vb, .xml", + "id": "textFile_0-input-txtFile-file" + }, + { + "label": "Metadata", + "name": "metadata", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "textFile_0-input-metadata-json" + } + ], + "inputAnchors": [ + { + "label": "Text Splitter", + "name": "textSplitter", + "type": "TextSplitter", + "optional": true, + "id": "textFile_0-input-textSplitter-TextSplitter" + } + ], + "inputs": { + "textSplitter": "{{recursiveCharacterTextSplitter_0.data.instance}}", + "metadata": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "textFile_0-output-document-Document", + "name": "document", + "label": "Document", + "type": "Document" + }, + { + "id": "textFile_0-output-text-string|json", + "name": "text", + "label": "Text", + "type": "string | json" + } + ], + "default": "document" + } + ], + "outputs": { + "output": "document" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 221.215421786192, + "y": 94.91489477412404 + }, + "dragging": false + }, + { + "width": 300, + "height": 429, + "id": "recursiveCharacterTextSplitter_0", + "position": { + "x": -203.4868320229876, + "y": 101.32475976329766 + }, + "type": "customNode", + "data": { + "id": "recursiveCharacterTextSplitter_0", + "label": "Recursive Character Text Splitter", + "version": 2, + "name": "recursiveCharacterTextSplitter", + "type": "RecursiveCharacterTextSplitter", + "baseClasses": ["RecursiveCharacterTextSplitter", "TextSplitter", "BaseDocumentTransformer", "Runnable"], + "category": "Text Splitters", + "description": "Split documents recursively by different characters - starting with \"\\n\\n\", then \"\\n\", then \" \"", + "inputParams": [ + { + "label": "Chunk Size", + "name": "chunkSize", + "type": "number", + "default": 1000, + "optional": true, + "id": "recursiveCharacterTextSplitter_0-input-chunkSize-number" + }, + { + "label": "Chunk Overlap", + "name": "chunkOverlap", + "type": "number", + "optional": true, + "id": "recursiveCharacterTextSplitter_0-input-chunkOverlap-number" + }, + { + "label": "Custom Separators", + "name": "separators", + "type": "string", + "rows": 4, + "description": "Array of custom separators to determine when to split the text, will override the default separators", + "placeholder": "[\"|\", \"##\", \">\", \"-\"]", + "additionalParams": true, + "optional": true, + "id": "recursiveCharacterTextSplitter_0-input-separators-string" + } + ], + "inputAnchors": [], + "inputs": { + "chunkSize": 1000, + "chunkOverlap": "", + "separators": "" + }, + "outputAnchors": [ + { + "id": "recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable", + "name": "recursiveCharacterTextSplitter", + "label": "RecursiveCharacterTextSplitter", + "type": "RecursiveCharacterTextSplitter | TextSplitter | BaseDocumentTransformer | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": -203.4868320229876, + "y": 101.32475976329766 + }, + "dragging": false + }, + { + "width": 300, + "height": 334, + "id": "openAIEmbedding_LlamaIndex_0", + "position": { + "x": 176.27434578083106, + "y": 953.3664298122493 + }, + "type": "customNode", + "data": { + "id": "openAIEmbedding_LlamaIndex_0", + "label": "OpenAI Embedding", + "version": 1, + "name": "openAIEmbedding_LlamaIndex", + "type": "OpenAIEmbedding", + "baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"], + "tags": ["LlamaIndex"], + "category": "Embeddings", + "description": "OpenAI Embedding with LlamaIndex implementation", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "openAIEmbedding_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-basepath-string" + } + ], + "inputAnchors": [], + "inputs": { + "timeout": "", + "basepath": "" + }, + "outputAnchors": [ + { + "id": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "name": "openAIEmbedding_LlamaIndex", + "label": "OpenAIEmbedding", + "type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 176.27434578083106, + "y": 953.3664298122493 + }, + "dragging": false + }, + { + "width": 300, + "height": 585, + "id": "pineconeLlamaIndex_0", + "position": { + "x": 609.3087433345761, + "y": 488.2141798951578 + }, + "type": "customNode", + "data": { + "id": "pineconeLlamaIndex_0", + "label": "Pinecone", + "version": 1, + "name": "pineconeLlamaIndex", + "type": "Pinecone", + "baseClasses": ["Pinecone", "VectorIndexRetriever"], + "tags": ["LlamaIndex"], + "category": "Vector Stores", + "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["pineconeApi"], + "id": "pineconeLlamaIndex_0-input-credential-credential" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string", + "id": "pineconeLlamaIndex_0-input-pineconeIndex-string" + }, + { + "label": "Pinecone Namespace", + "name": "pineconeNamespace", + "type": "string", + "placeholder": "my-first-namespace", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-pineconeNamespace-string" + }, + { + "label": "Pinecone Metadata Filter", + "name": "pineconeMetadataFilter", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "pineconeLlamaIndex_0-input-pineconeMetadataFilter-json" + }, + { + "label": "Top K", + "name": "topK", + "description": "Number of top results to fetch. Default to 4", + "placeholder": "4", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-topK-number" + } + ], + "inputAnchors": [ + { + "label": "Document", + "name": "document", + "type": "Document", + "list": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-document-Document" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "BaseEmbedding_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex" + } + ], + "inputs": { + "document": ["{{textFile_0.data.instance}}"], + "model": "{{chatOpenAI_LlamaIndex_1.data.instance}}", + "embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}", + "pineconeIndex": "", + "pineconeNamespace": "", + "pineconeMetadataFilter": "", + "topK": "" + }, + "outputAnchors": [ + { + "id": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever", + "name": "pineconeLlamaIndex", + "label": "Pinecone", + "type": "Pinecone | VectorIndexRetriever" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 609.3087433345761, + "y": 488.2141798951578 + }, + "dragging": false + }, + { + "width": 300, + "height": 529, + "id": "chatOpenAI_LlamaIndex_1", + "position": { + "x": -195.15244974578656, + "y": 584.9467028201428 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_LlamaIndex_1", + "label": "ChatOpenAI", + "version": 1, + "name": "chatOpenAI_LlamaIndex", + "type": "ChatOpenAI", + "baseClasses": ["ChatOpenAI", "BaseChatModel_LlamaIndex"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around OpenAI Chat LLM with LlamaIndex implementation", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "chatOpenAI_LlamaIndex_1-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_LlamaIndex_1-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_LlamaIndex_1-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-topP-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo-16k", + "temperature": 0.9, + "maxTokens": "", + "topP": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex", + "name": "chatOpenAI_LlamaIndex", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel_LlamaIndex" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": -195.15244974578656, + "y": 584.9467028201428 + }, + "dragging": false + }, + { + "width": 300, + "height": 513, + "id": "contextChatEngine_0", + "position": { + "x": 1550.2553933740128, + "y": 270.7914631777829 + }, + "type": "customNode", + "data": { + "id": "contextChatEngine_0", + "label": "Context Chat Engine", + "version": 1, + "name": "contextChatEngine", + "type": "ContextChatEngine", + "baseClasses": ["ContextChatEngine"], + "tags": ["LlamaIndex"], + "category": "Engine", + "description": "Answer question based on retrieved documents (context) with built-in memory to remember conversation", + "inputParams": [ + { + "label": "System Message", + "name": "systemMessagePrompt", + "type": "string", + "rows": 4, + "optional": true, + "placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.", + "id": "contextChatEngine_0-input-systemMessagePrompt-string" + } + ], + "inputAnchors": [ + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "contextChatEngine_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Vector Store Retriever", + "name": "vectorStoreRetriever", + "type": "VectorIndexRetriever", + "id": "contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever" + }, + { + "label": "Memory", + "name": "memory", + "type": "BaseChatMemory", + "id": "contextChatEngine_0-input-memory-BaseChatMemory" + } + ], + "inputs": { + "model": "{{chatOpenAI_LlamaIndex_2.data.instance}}", + "vectorStoreRetriever": "{{pineconeLlamaIndex_0.data.instance}}", + "memory": "{{RedisBackedChatMemory_0.data.instance}}", + "systemMessagePrompt": "" + }, + "outputAnchors": [ + { + "id": "contextChatEngine_0-output-contextChatEngine-ContextChatEngine", + "name": "contextChatEngine", + "label": "ContextChatEngine", + "type": "ContextChatEngine" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1550.2553933740128, + "y": 270.7914631777829 + }, + "dragging": false + }, + { + "width": 300, + "height": 329, + "id": "RedisBackedChatMemory_0", + "position": { + "x": 1081.252815805786, + "y": 990.1701092562037 + }, + "type": "customNode", + "data": { + "id": "RedisBackedChatMemory_0", + "label": "Redis-Backed Chat Memory", + "version": 2, + "name": "RedisBackedChatMemory", + "type": "RedisBackedChatMemory", + "baseClasses": ["RedisBackedChatMemory", "BaseChatMemory", "BaseMemory"], + "category": "Memory", + "description": "Summarizes the conversation and stores the memory in Redis server", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "optional": true, + "credentialNames": ["redisCacheApi", "redisCacheUrlApi"], + "id": "RedisBackedChatMemory_0-input-credential-credential" + }, + { + "label": "Session Id", + "name": "sessionId", + "type": "string", + "description": "If not specified, a random id will be used. Learn more", + "default": "", + "additionalParams": true, + "optional": true, + "id": "RedisBackedChatMemory_0-input-sessionId-string" + }, + { + "label": "Session Timeouts", + "name": "sessionTTL", + "type": "number", + "description": "Omit this parameter to make sessions never expire", + "additionalParams": true, + "optional": true, + "id": "RedisBackedChatMemory_0-input-sessionTTL-number" + }, + { + "label": "Memory Key", + "name": "memoryKey", + "type": "string", + "default": "chat_history", + "additionalParams": true, + "id": "RedisBackedChatMemory_0-input-memoryKey-string" + } + ], + "inputAnchors": [], + "inputs": { + "sessionId": "", + "sessionTTL": "", + "memoryKey": "chat_history" + }, + "outputAnchors": [ + { + "id": "RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory", + "name": "RedisBackedChatMemory", + "label": "RedisBackedChatMemory", + "type": "RedisBackedChatMemory | BaseChatMemory | BaseMemory" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 1081.252815805786, + "y": 990.1701092562037 + } + }, + { + "width": 300, + "height": 529, + "id": "chatOpenAI_LlamaIndex_2", + "position": { + "x": 1015.1605888108386, + "y": -38.31143117572401 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_LlamaIndex_2", + "label": "ChatOpenAI", + "version": 1, + "name": "chatOpenAI_LlamaIndex", + "type": "ChatOpenAI", + "baseClasses": ["ChatOpenAI", "BaseChatModel_LlamaIndex"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around OpenAI Chat LLM with LlamaIndex implementation", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "chatOpenAI_LlamaIndex_2-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_LlamaIndex_2-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_LlamaIndex_2-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_2-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_2-input-topP-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_2-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo", + "temperature": 0.9, + "maxTokens": "", + "topP": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex", + "name": "chatOpenAI_LlamaIndex", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel_LlamaIndex" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1015.1605888108386, + "y": -38.31143117572401 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "recursiveCharacterTextSplitter_0", + "sourceHandle": "recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable", + "target": "textFile_0", + "targetHandle": "textFile_0-input-textSplitter-TextSplitter", + "type": "buttonedge", + "id": "recursiveCharacterTextSplitter_0-recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable-textFile_0-textFile_0-input-textSplitter-TextSplitter", + "data": { + "label": "" + } + }, + { + "source": "textFile_0", + "sourceHandle": "textFile_0-output-document-Document", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-document-Document", + "type": "buttonedge", + "id": "textFile_0-textFile_0-output-document-Document-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-document-Document", + "data": { + "label": "" + } + }, + { + "source": "chatOpenAI_LlamaIndex_1", + "sourceHandle": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatOpenAI_LlamaIndex_1-chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex", + "data": { + "label": "" + } + }, + { + "source": "openAIEmbedding_LlamaIndex_0", + "sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex", + "type": "buttonedge", + "id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex", + "data": { + "label": "" + } + }, + { + "source": "pineconeLlamaIndex_0", + "sourceHandle": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever", + "target": "contextChatEngine_0", + "targetHandle": "contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever", + "type": "buttonedge", + "id": "pineconeLlamaIndex_0-pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever-contextChatEngine_0-contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever", + "data": { + "label": "" + } + }, + { + "source": "RedisBackedChatMemory_0", + "sourceHandle": "RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory", + "target": "contextChatEngine_0", + "targetHandle": "contextChatEngine_0-input-memory-BaseChatMemory", + "type": "buttonedge", + "id": "RedisBackedChatMemory_0-RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory-contextChatEngine_0-contextChatEngine_0-input-memory-BaseChatMemory", + "data": { + "label": "" + } + }, + { + "source": "chatOpenAI_LlamaIndex_2", + "sourceHandle": "chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex", + "target": "contextChatEngine_0", + "targetHandle": "contextChatEngine_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatOpenAI_LlamaIndex_2-chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex-contextChatEngine_0-contextChatEngine_0-input-model-BaseChatModel_LlamaIndex", + "data": { + "label": "" + } + } + ] +} diff --git a/packages/server/marketplaces/chatflows/Long Term Memory.json b/packages/server/marketplaces/chatflows/Long Term Memory.json index c508b480..2973594f 100644 --- a/packages/server/marketplaces/chatflows/Long Term Memory.json +++ b/packages/server/marketplaces/chatflows/Long Term Memory.json @@ -205,7 +205,7 @@ "data": { "id": "ZepMemory_0", "label": "Zep Memory", - "version": 1, + "version": 2, "name": "ZepMemory", "type": "ZepMemory", "baseClasses": ["ZepMemory", "BaseChatMemory", "BaseMemory"], @@ -228,13 +228,6 @@ "default": "http://127.0.0.1:8000", "id": "ZepMemory_0-input-baseURL-string" }, - { - "label": "Auto Summary", - "name": "autoSummary", - "type": "boolean", - "default": true, - "id": "ZepMemory_0-input-autoSummary-boolean" - }, { "label": "Session Id", "name": "sessionId", @@ -252,15 +245,8 @@ "default": "10", "step": 1, "description": "Window of size k to surface the last k back-and-forths to use as memory.", - "id": "ZepMemory_0-input-k-number" - }, - { - "label": "Auto Summary Template", - "name": "autoSummaryTemplate", - "type": "string", - "default": "This is the summary of the following conversation:\n{summary}", "additionalParams": true, - "id": "ZepMemory_0-input-autoSummaryTemplate-string" + "id": "ZepMemory_0-input-k-number" }, { "label": "AI Prefix", @@ -306,10 +292,8 @@ "inputAnchors": [], "inputs": { "baseURL": "http://127.0.0.1:8000", - "autoSummary": true, "sessionId": "", "k": "10", - "autoSummaryTemplate": "This is the summary of the following conversation:\n{summary}", "aiPrefix": "ai", "humanPrefix": "human", "memoryKey": "chat_history", diff --git a/packages/server/marketplaces/chatflows/Query Engine.json b/packages/server/marketplaces/chatflows/Query Engine.json new file mode 100644 index 00000000..921ff1d6 --- /dev/null +++ b/packages/server/marketplaces/chatflows/Query Engine.json @@ -0,0 +1,509 @@ +{ + "description": "Stateless query engine designed to answer question over your data using LlamaIndex", + "badge": "NEW", + "nodes": [ + { + "width": 300, + "height": 382, + "id": "queryEngine_0", + "position": { + "x": 1407.9610494306783, + "y": 241.12144405808692 + }, + "type": "customNode", + "data": { + "id": "queryEngine_0", + "label": "Query Engine", + "version": 1, + "name": "queryEngine", + "type": "QueryEngine", + "baseClasses": ["QueryEngine"], + "tags": ["LlamaIndex"], + "category": "Engine", + "description": "Simple query engine built to answer question over your data, without memory", + "inputParams": [ + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true, + "id": "queryEngine_0-input-returnSourceDocuments-boolean" + } + ], + "inputAnchors": [ + { + "label": "Vector Store Retriever", + "name": "vectorStoreRetriever", + "type": "VectorIndexRetriever", + "id": "queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever" + }, + { + "label": "Response Synthesizer", + "name": "responseSynthesizer", + "type": "ResponseSynthesizer", + "description": "ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See more", + "optional": true, + "id": "queryEngine_0-input-responseSynthesizer-ResponseSynthesizer" + } + ], + "inputs": { + "vectorStoreRetriever": "{{pineconeLlamaIndex_0.data.instance}}", + "responseSynthesizer": "{{compactrefineLlamaIndex_0.data.instance}}", + "returnSourceDocuments": true + }, + "outputAnchors": [ + { + "id": "queryEngine_0-output-queryEngine-QueryEngine", + "name": "queryEngine", + "label": "QueryEngine", + "type": "QueryEngine" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 1407.9610494306783, + "y": 241.12144405808692 + }, + "dragging": false + }, + { + "width": 300, + "height": 585, + "id": "pineconeLlamaIndex_0", + "position": { + "x": 977.3886641397302, + "y": -261.2253031641797 + }, + "type": "customNode", + "data": { + "id": "pineconeLlamaIndex_0", + "label": "Pinecone", + "version": 1, + "name": "pineconeLlamaIndex", + "type": "Pinecone", + "baseClasses": ["Pinecone", "VectorIndexRetriever"], + "tags": ["LlamaIndex"], + "category": "Vector Stores", + "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["pineconeApi"], + "id": "pineconeLlamaIndex_0-input-credential-credential" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string", + "id": "pineconeLlamaIndex_0-input-pineconeIndex-string" + }, + { + "label": "Pinecone Namespace", + "name": "pineconeNamespace", + "type": "string", + "placeholder": "my-first-namespace", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-pineconeNamespace-string" + }, + { + "label": "Pinecone Metadata Filter", + "name": "pineconeMetadataFilter", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "pineconeLlamaIndex_0-input-pineconeMetadataFilter-json" + }, + { + "label": "Top K", + "name": "topK", + "description": "Number of top results to fetch. Default to 4", + "placeholder": "4", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-topK-number" + } + ], + "inputAnchors": [ + { + "label": "Document", + "name": "document", + "type": "Document", + "list": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-document-Document" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "BaseEmbedding_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex" + } + ], + "inputs": { + "document": "", + "model": "{{chatAnthropic_LlamaIndex_0.data.instance}}", + "embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}", + "pineconeIndex": "", + "pineconeNamespace": "", + "pineconeMetadataFilter": "", + "topK": "" + }, + "outputAnchors": [ + { + "id": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever", + "name": "pineconeLlamaIndex", + "label": "Pinecone", + "type": "Pinecone | VectorIndexRetriever" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 977.3886641397302, + "y": -261.2253031641797 + }, + "dragging": false + }, + { + "width": 300, + "height": 334, + "id": "openAIEmbedding_LlamaIndex_0", + "position": { + "x": 529.8690713844503, + "y": -18.955726653613254 + }, + "type": "customNode", + "data": { + "id": "openAIEmbedding_LlamaIndex_0", + "label": "OpenAI Embedding", + "version": 1, + "name": "openAIEmbedding_LlamaIndex", + "type": "OpenAIEmbedding", + "baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"], + "tags": ["LlamaIndex"], + "category": "Embeddings", + "description": "OpenAI Embedding with LlamaIndex implementation", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "openAIEmbedding_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-basepath-string" + } + ], + "inputAnchors": [], + "inputs": { + "timeout": "", + "basepath": "" + }, + "outputAnchors": [ + { + "id": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "name": "openAIEmbedding_LlamaIndex", + "label": "OpenAIEmbedding", + "type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 529.8690713844503, + "y": -18.955726653613254 + }, + "dragging": false + }, + { + "width": 300, + "height": 749, + "id": "compactrefineLlamaIndex_0", + "position": { + "x": 170.71031618977543, + "y": -33.83233752386292 + }, + "type": "customNode", + "data": { + "id": "compactrefineLlamaIndex_0", + "label": "Compact and Refine", + "version": 1, + "name": "compactrefineLlamaIndex", + "type": "CompactRefine", + "baseClasses": ["CompactRefine", "ResponseSynthesizer"], + "tags": ["LlamaIndex"], + "category": "Response Synthesizer", + "description": "CompactRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.", + "inputParams": [ + { + "label": "Refine Prompt", + "name": "refinePrompt", + "type": "string", + "rows": 4, + "default": "The original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:", + "warning": "Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}", + "optional": true, + "id": "compactrefineLlamaIndex_0-input-refinePrompt-string" + }, + { + "label": "Text QA Prompt", + "name": "textQAPrompt", + "type": "string", + "rows": 4, + "default": "Context information is below.\n---------------------\n{context}\n---------------------\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}\nAnswer:", + "warning": "Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}", + "optional": true, + "id": "compactrefineLlamaIndex_0-input-textQAPrompt-string" + } + ], + "inputAnchors": [], + "inputs": { + "refinePrompt": "The original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:", + "textQAPrompt": "Context information:\n\n{context}\n\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}" + }, + "outputAnchors": [ + { + "id": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer", + "name": "compactrefineLlamaIndex", + "label": "CompactRefine", + "type": "CompactRefine | ResponseSynthesizer" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 170.71031618977543, + "y": -33.83233752386292 + }, + "dragging": false + }, + { + "width": 300, + "height": 529, + "id": "chatAnthropic_LlamaIndex_0", + "position": { + "x": 521.3530883359147, + "y": -584.8241219614786 + }, + "type": "customNode", + "data": { + "id": "chatAnthropic_LlamaIndex_0", + "label": "ChatAnthropic", + "version": 1, + "name": "chatAnthropic_LlamaIndex", + "type": "ChatAnthropic", + "baseClasses": ["ChatAnthropic", "BaseChatModel_LlamaIndex"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around ChatAnthropic LLM with LlamaIndex implementation", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["anthropicApi"], + "id": "chatAnthropic_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "claude-2", + "name": "claude-2", + "description": "Claude 2 latest major version, automatically get updates to the model as they are released" + }, + { + "label": "claude-2.1", + "name": "claude-2.1", + "description": "Claude 2 latest full version" + }, + { + "label": "claude-instant-1", + "name": "claude-instant-1", + "description": "Claude Instant latest major version, automatically get updates to the model as they are released" + }, + { + "label": "claude-v1", + "name": "claude-v1" + }, + { + "label": "claude-v1-100k", + "name": "claude-v1-100k" + }, + { + "label": "claude-v1.0", + "name": "claude-v1.0" + }, + { + "label": "claude-v1.2", + "name": "claude-v1.2" + }, + { + "label": "claude-v1.3", + "name": "claude-v1.3" + }, + { + "label": "claude-v1.3-100k", + "name": "claude-v1.3-100k" + }, + { + "label": "claude-instant-v1", + "name": "claude-instant-v1" + }, + { + "label": "claude-instant-v1-100k", + "name": "claude-instant-v1-100k" + }, + { + "label": "claude-instant-v1.0", + "name": "claude-instant-v1.0" + }, + { + "label": "claude-instant-v1.1", + "name": "claude-instant-v1.1" + }, + { + "label": "claude-instant-v1.1-100k", + "name": "claude-instant-v1.1-100k" + } + ], + "default": "claude-2", + "optional": true, + "id": "chatAnthropic_LlamaIndex_0-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatAnthropic_LlamaIndex_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokensToSample", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_LlamaIndex_0-input-maxTokensToSample-number" + }, + { + "label": "Top P", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_LlamaIndex_0-input-topP-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "claude-2", + "temperature": 0.9, + "maxTokensToSample": "", + "topP": "" + }, + "outputAnchors": [ + { + "id": "chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex", + "name": "chatAnthropic_LlamaIndex", + "label": "ChatAnthropic", + "type": "ChatAnthropic | BaseChatModel_LlamaIndex" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 521.3530883359147, + "y": -584.8241219614786 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "pineconeLlamaIndex_0", + "sourceHandle": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever", + "target": "queryEngine_0", + "targetHandle": "queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever", + "type": "buttonedge", + "id": "pineconeLlamaIndex_0-pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever-queryEngine_0-queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever", + "data": { + "label": "" + } + }, + { + "source": "openAIEmbedding_LlamaIndex_0", + "sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex", + "type": "buttonedge", + "id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex", + "data": { + "label": "" + } + }, + { + "source": "compactrefineLlamaIndex_0", + "sourceHandle": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer", + "target": "queryEngine_0", + "targetHandle": "queryEngine_0-input-responseSynthesizer-ResponseSynthesizer", + "type": "buttonedge", + "id": "compactrefineLlamaIndex_0-compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer-queryEngine_0-queryEngine_0-input-responseSynthesizer-ResponseSynthesizer", + "data": { + "label": "" + } + }, + { + "source": "chatAnthropic_LlamaIndex_0", + "sourceHandle": "chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatAnthropic_LlamaIndex_0-chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex", + "data": { + "label": "" + } + } + ] +} diff --git a/packages/server/marketplaces/chatflows/Simple Chat Engine.json b/packages/server/marketplaces/chatflows/Simple Chat Engine.json new file mode 100644 index 00000000..b3854a51 --- /dev/null +++ b/packages/server/marketplaces/chatflows/Simple Chat Engine.json @@ -0,0 +1,270 @@ +{ + "description": "Simple chat engine to handle back and forth conversations using LlamaIndex", + "badge": "NEW", + "nodes": [ + { + "width": 300, + "height": 462, + "id": "simpleChatEngine_0", + "position": { + "x": 1210.127368000538, + "y": 324.98110560103896 + }, + "type": "customNode", + "data": { + "id": "simpleChatEngine_0", + "label": "Simple Chat Engine", + "version": 1, + "name": "simpleChatEngine", + "type": "SimpleChatEngine", + "baseClasses": ["SimpleChatEngine"], + "tags": ["LlamaIndex"], + "category": "Engine", + "description": "Simple engine to handle back and forth conversations", + "inputParams": [ + { + "label": "System Message", + "name": "systemMessagePrompt", + "type": "string", + "rows": 4, + "optional": true, + "placeholder": "You are a helpful assistant", + "id": "simpleChatEngine_0-input-systemMessagePrompt-string" + } + ], + "inputAnchors": [ + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Memory", + "name": "memory", + "type": "BaseChatMemory", + "id": "simpleChatEngine_0-input-memory-BaseChatMemory" + } + ], + "inputs": { + "model": "{{azureChatOpenAI_LlamaIndex_0.data.instance}}", + "memory": "{{bufferMemory_0.data.instance}}", + "systemMessagePrompt": "You are a helpful assistant." + }, + "outputAnchors": [ + { + "id": "simpleChatEngine_0-output-simpleChatEngine-SimpleChatEngine", + "name": "simpleChatEngine", + "label": "SimpleChatEngine", + "type": "SimpleChatEngine" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 1210.127368000538, + "y": 324.98110560103896 + } + }, + { + "width": 300, + "height": 376, + "id": "bufferMemory_0", + "position": { + "x": 393.9823478014782, + "y": 415.7414943210391 + }, + "type": "customNode", + "data": { + "id": "bufferMemory_0", + "label": "Buffer Memory", + "version": 1, + "name": "bufferMemory", + "type": "BufferMemory", + "baseClasses": ["BufferMemory", "BaseChatMemory", "BaseMemory"], + "category": "Memory", + "description": "Remembers previous conversational back and forths directly", + "inputParams": [ + { + "label": "Memory Key", + "name": "memoryKey", + "type": "string", + "default": "chat_history", + "id": "bufferMemory_0-input-memoryKey-string" + }, + { + "label": "Input Key", + "name": "inputKey", + "type": "string", + "default": "input", + "id": "bufferMemory_0-input-inputKey-string" + } + ], + "inputAnchors": [], + "inputs": { + "memoryKey": "chat_history", + "inputKey": "input" + }, + "outputAnchors": [ + { + "id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", + "name": "bufferMemory", + "label": "BufferMemory", + "type": "BufferMemory | BaseChatMemory | BaseMemory" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 393.9823478014782, + "y": 415.7414943210391 + }, + "dragging": false + }, + { + "width": 300, + "height": 529, + "id": "azureChatOpenAI_LlamaIndex_0", + "position": { + "x": 746.5530862509605, + "y": -54.107978373323306 + }, + "type": "customNode", + "data": { + "id": "azureChatOpenAI_LlamaIndex_0", + "label": "AzureChatOpenAI", + "version": 1, + "name": "azureChatOpenAI_LlamaIndex", + "type": "AzureChatOpenAI", + "baseClasses": ["AzureChatOpenAI", "BaseChatModel_LlamaIndex"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around Azure OpenAI Chat LLM with LlamaIndex implementation", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["azureOpenAIApi"], + "id": "azureChatOpenAI_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + } + ], + "default": "gpt-3.5-turbo-16k", + "optional": true, + "id": "azureChatOpenAI_LlamaIndex_0-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "azureChatOpenAI_LlamaIndex_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "azureChatOpenAI_LlamaIndex_0-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "azureChatOpenAI_LlamaIndex_0-input-topP-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "azureChatOpenAI_LlamaIndex_0-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo-16k", + "temperature": 0.9, + "maxTokens": "", + "topP": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex", + "name": "azureChatOpenAI_LlamaIndex", + "label": "AzureChatOpenAI", + "type": "AzureChatOpenAI | BaseChatModel_LlamaIndex" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 746.5530862509605, + "y": -54.107978373323306 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "bufferMemory_0", + "sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", + "target": "simpleChatEngine_0", + "targetHandle": "simpleChatEngine_0-input-memory-BaseChatMemory", + "type": "buttonedge", + "id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-simpleChatEngine_0-simpleChatEngine_0-input-memory-BaseChatMemory", + "data": { + "label": "" + } + }, + { + "source": "azureChatOpenAI_LlamaIndex_0", + "sourceHandle": "azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex", + "target": "simpleChatEngine_0", + "targetHandle": "simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "azureChatOpenAI_LlamaIndex_0-azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex-simpleChatEngine_0-simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex", + "data": { + "label": "" + } + } + ] +} diff --git a/packages/server/marketplaces/chatflows/WebPage QnA.json b/packages/server/marketplaces/chatflows/WebPage QnA.json index 9b1119b9..be077f97 100644 --- a/packages/server/marketplaces/chatflows/WebPage QnA.json +++ b/packages/server/marketplaces/chatflows/WebPage QnA.json @@ -589,7 +589,7 @@ "label": "Session Id", "name": "sessionId", "type": "string", - "description": "If not specified, the first CHAT_MESSAGE_ID will be used as sessionId", + "description": "If not specified, a random id will be used. Learn more", "default": "", "additionalParams": true, "optional": true, diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index d87d2c0a..78cc4260 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -37,13 +37,12 @@ import { databaseEntities, transformToCredentialEntity, decryptCredentialData, - clearAllSessionMemory, replaceInputsWithConfig, getEncryptionKey, - checkMemorySessionId, - clearSessionMemoryFromViewMessageDialog, + replaceMemorySessionId, getUserHome, - replaceChatHistory + replaceChatHistory, + clearSessionMemory } from './utils' import { cloneDeep, omit, uniqWith, isEqual } from 'lodash' import { getDataSource } from './DataSource' @@ -387,7 +386,12 @@ export class App { const endingNodeData = nodes.find((nd) => nd.id === endingNodeId)?.data if (!endingNodeData) return res.status(500).send(`Ending node ${endingNodeId} data not found`) - if (endingNodeData && endingNodeData.category !== 'Chains' && endingNodeData.category !== 'Agents') { + if ( + endingNodeData && + endingNodeData.category !== 'Chains' && + endingNodeData.category !== 'Agents' && + endingNodeData.category !== 'Engine' + ) { return res.status(500).send(`Ending node must be either a Chain or Agent`) } @@ -472,18 +476,15 @@ export class App { const parsedFlowData: IReactFlowObject = JSON.parse(flowData) const nodes = parsedFlowData.nodes - if (isClearFromViewMessageDialog) { - await clearSessionMemoryFromViewMessageDialog( - nodes, - this.nodesPool.componentNodes, - chatId, - this.AppDataSource, - sessionId, - memoryType - ) - } else { - await clearAllSessionMemory(nodes, this.nodesPool.componentNodes, chatId, this.AppDataSource, sessionId) - } + await clearSessionMemory( + nodes, + this.nodesPool.componentNodes, + chatId, + this.AppDataSource, + sessionId, + memoryType, + isClearFromViewMessageDialog + ) const deleteOptions: FindOptionsWhere = { chatflowid, chatId } if (memoryType) deleteOptions.memoryType = memoryType @@ -1377,7 +1378,13 @@ export class App { const endingNodeData = nodes.find((nd) => nd.id === endingNodeId)?.data if (!endingNodeData) return res.status(500).send(`Ending node ${endingNodeId} data not found`) - if (endingNodeData && endingNodeData.category !== 'Chains' && endingNodeData.category !== 'Agents' && !isUpsert) { + if ( + endingNodeData && + endingNodeData.category !== 'Chains' && + endingNodeData.category !== 'Agents' && + endingNodeData.category !== 'Engine' && + !isUpsert + ) { return res.status(500).send(`Ending node must be either a Chain or Agent`) } @@ -1396,7 +1403,9 @@ export class App { isStreamValid = isFlowValidForStream(nodes, endingNodeData) - let chatHistory: IMessage[] | string = incomingInput.history + let chatHistory: IMessage[] = incomingInput.history + + // If chatHistory is empty, and sessionId/chatId is present, replace it if ( endingNodeData.inputs?.memory && !incomingInput.history && @@ -1437,8 +1446,10 @@ export class App { const nodeToExecute = reactFlowNodes.find((node: IReactFlowNode) => node.id === endingNodeId) if (!nodeToExecute) return res.status(404).send(`Node ${endingNodeId} not found`) - if (incomingInput.overrideConfig) + if (incomingInput.overrideConfig) { nodeToExecute.data = replaceInputsWithConfig(nodeToExecute.data, incomingInput.overrideConfig) + } + const reactFlowNodeData: INodeData = resolveVariables( nodeToExecute.data, reactFlowNodes, @@ -1458,19 +1469,11 @@ export class App { logger.debug(`[server]: Running ${nodeToExecuteData.label} (${nodeToExecuteData.id})`) let sessionId = undefined - if (nodeToExecuteData.instance) sessionId = checkMemorySessionId(nodeToExecuteData.instance, chatId) - - const memoryNode = this.findMemoryLabel(nodes, edges) - const memoryType = memoryNode?.data.label - - let chatHistory: IMessage[] | string = incomingInput.history - if (memoryNode && !incomingInput.history && (incomingInput.chatId || incomingInput.overrideConfig?.sessionId)) { - chatHistory = await replaceChatHistory(memoryNode, incomingInput, this.AppDataSource, databaseEntities, logger) - } + if (nodeToExecuteData.instance) sessionId = replaceMemorySessionId(nodeToExecuteData.instance, chatId) let result = isStreamValid ? await nodeInstance.run(nodeToExecuteData, incomingInput.question, { - chatHistory, + chatHistory: incomingInput.history, socketIO, socketIOClientId: incomingInput.socketIOClientId, logger, @@ -1480,7 +1483,7 @@ export class App { chatId }) : await nodeInstance.run(nodeToExecuteData, incomingInput.question, { - chatHistory, + chatHistory: incomingInput.history, logger, appDataSource: this.AppDataSource, databaseEntities, @@ -1495,6 +1498,9 @@ export class App { sessionId = result.assistant.threadId } + const memoryNode = this.findMemoryLabel(nodes, edges) + const memoryType = memoryNode?.data.label + const userMessage: Omit = { role: 'userMessage', content: incomingInput.question, diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index 0b1e62d2..0b37b608 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -217,7 +217,7 @@ export const buildLangchain = async ( depthQueue: IDepthQueue, componentNodes: IComponentNodes, question: string, - chatHistory: IMessage[] | string, + chatHistory: IMessage[], chatId: string, chatflowid: string, appDataSource: DataSource, @@ -324,22 +324,30 @@ export const buildLangchain = async ( } /** - * Clear all session memories on the canvas + * Clear session memories * @param {IReactFlowNode[]} reactFlowNodes * @param {IComponentNodes} componentNodes * @param {string} chatId * @param {DataSource} appDataSource * @param {string} sessionId + * @param {string} memoryType + * @param {string} isClearFromViewMessageDialog */ -export const clearAllSessionMemory = async ( +export const clearSessionMemory = async ( reactFlowNodes: IReactFlowNode[], componentNodes: IComponentNodes, chatId: string, appDataSource: DataSource, - sessionId?: string + sessionId?: string, + memoryType?: string, + isClearFromViewMessageDialog?: string ) => { for (const node of reactFlowNodes) { if (node.data.category !== 'Memory' && node.data.type !== 'OpenAIAssistant') continue + + // Only clear specific session memory from View Message Dialog UI + if (isClearFromViewMessageDialog && memoryType && node.data.label !== memoryType) continue + const nodeInstanceFilePath = componentNodes[node.data.name].filePath as string const nodeModule = await import(nodeInstanceFilePath) const newNodeInstance = new nodeModule.nodeClass() @@ -348,42 +356,10 @@ export const clearAllSessionMemory = async ( node.data.inputs.sessionId = sessionId } - if (newNodeInstance.memoryMethods && newNodeInstance.memoryMethods.clearSessionMemory) { - await newNodeInstance.memoryMethods.clearSessionMemory(node.data, { chatId, appDataSource, databaseEntities, logger }) - } - } -} + const initializedInstance = await newNodeInstance.init(node.data, '', { chatId, appDataSource, databaseEntities, logger }) -/** - * Clear specific session memory from View Message Dialog UI - * @param {IReactFlowNode[]} reactFlowNodes - * @param {IComponentNodes} componentNodes - * @param {string} chatId - * @param {DataSource} appDataSource - * @param {string} sessionId - * @param {string} memoryType - */ -export const clearSessionMemoryFromViewMessageDialog = async ( - reactFlowNodes: IReactFlowNode[], - componentNodes: IComponentNodes, - chatId: string, - appDataSource: DataSource, - sessionId?: string, - memoryType?: string -) => { - if (!sessionId) return - for (const node of reactFlowNodes) { - if (node.data.category !== 'Memory' && node.data.type !== 'OpenAIAssistant') continue - if (memoryType && node.data.label !== memoryType) continue - const nodeInstanceFilePath = componentNodes[node.data.name].filePath as string - const nodeModule = await import(nodeInstanceFilePath) - const newNodeInstance = new nodeModule.nodeClass() - - if (sessionId && node.data.inputs) node.data.inputs.sessionId = sessionId - - if (newNodeInstance.memoryMethods && newNodeInstance.memoryMethods.clearSessionMemory) { - await newNodeInstance.memoryMethods.clearSessionMemory(node.data, { chatId, appDataSource, databaseEntities, logger }) - return + if (initializedInstance.clearChatMessages) { + await initializedInstance.clearChatMessages() } } } @@ -400,7 +376,7 @@ export const getVariableValue = ( paramValue: string, reactFlowNodes: IReactFlowNode[], question: string, - chatHistory: IMessage[] | string, + chatHistory: IMessage[], isAcceptVariable = false ) => { let returnVal = paramValue @@ -433,10 +409,7 @@ export const getVariableValue = ( } if (isAcceptVariable && variableFullPath === CHAT_HISTORY_VAR_PREFIX) { - variableDict[`{{${variableFullPath}}}`] = handleEscapeCharacters( - typeof chatHistory === 'string' ? chatHistory : convertChatHistoryToText(chatHistory), - false - ) + variableDict[`{{${variableFullPath}}}`] = handleEscapeCharacters(convertChatHistoryToText(chatHistory), false) } // Split by first occurrence of '.' to get just nodeId @@ -479,7 +452,7 @@ export const resolveVariables = ( reactFlowNodeData: INodeData, reactFlowNodes: IReactFlowNode[], question: string, - chatHistory: IMessage[] | string + chatHistory: IMessage[] ): INodeData => { let flowNodeData = cloneDeep(reactFlowNodeData) const types = 'inputs' @@ -558,7 +531,7 @@ export const isStartNodeDependOnInput = (startingNodes: IReactFlowNode[], nodes: if (inputVariables.length > 0) return true } } - const whitelistNodeNames = ['vectorStoreToDocument', 'autoGPT'] + const whitelistNodeNames = ['vectorStoreToDocument', 'autoGPT', 'chatPromptTemplate', 'promptTemplate'] //If these nodes are found, chatflow cannot be reused for (const node of nodes) { if (whitelistNodeNames.includes(node.data.name)) return true } @@ -706,7 +679,15 @@ export const findAvailableConfigs = (reactFlowNodes: IReactFlowNode[], component */ export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNodeData: INodeData) => { const streamAvailableLLMs = { - 'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama', 'awsChatBedrock'], + 'Chat Models': [ + 'azureChatOpenAI', + 'chatOpenAI', + 'chatOpenAI_LlamaIndex', + 'chatAnthropic', + 'chatAnthropic_LlamaIndex', + 'chatOllama', + 'awsChatBedrock' + ], LLMs: ['azureOpenAI', 'openAI', 'ollama'] } @@ -729,6 +710,9 @@ export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNod // Agent that are available to stream const whitelistAgents = ['openAIFunctionAgent', 'csvAgent', 'airtableAgent', 'conversationalRetrievalAgent'] isValidChainOrAgent = whitelistAgents.includes(endingNodeData.name) + } else if (endingNodeData.category === 'Engine') { + const whitelistEngine = ['contextChatEngine', 'simpleChatEngine'] + isValidChainOrAgent = whitelistEngine.includes(endingNodeData.name) } // If no output parser, flow is available to stream @@ -866,7 +850,7 @@ export const redactCredentialWithPasswordType = ( * @param {any} instance * @param {string} chatId */ -export const checkMemorySessionId = (instance: any, chatId: string): string | undefined => { +export const replaceMemorySessionId = (instance: any, chatId: string): string | undefined => { if (instance.memory && instance.memory.isSessionIdUsingChatMessageId && chatId) { instance.memory.sessionId = chatId instance.memory.chatHistory.sessionId = chatId @@ -893,7 +877,7 @@ export const replaceChatHistory = async ( appDataSource: DataSource, databaseEntities: IDatabaseEntity, logger: any -): Promise => { +): Promise => { const nodeInstanceFilePath = memoryNode.data.filePath as string const nodeModule = await import(nodeInstanceFilePath) const newNodeInstance = new nodeModule.nodeClass() @@ -902,14 +886,12 @@ export const replaceChatHistory = async ( memoryNode.data.inputs.sessionId = incomingInput.overrideConfig.sessionId } - if (newNodeInstance.memoryMethods && newNodeInstance.memoryMethods.getChatMessages) { - return await newNodeInstance.memoryMethods.getChatMessages(memoryNode.data, { - chatId: incomingInput.chatId, - appDataSource, - databaseEntities, - logger - }) - } + const initializedInstance = await newNodeInstance.init(memoryNode.data, '', { + chatId: incomingInput.chatId, + appDataSource, + databaseEntities, + logger + }) - return '' + return await initializedInstance.getChatMessages() } diff --git a/packages/ui/src/assets/images/llamaindex.png b/packages/ui/src/assets/images/llamaindex.png new file mode 100644 index 0000000000000000000000000000000000000000..139c33eb027b7ea654e151a4c32ef6a5506259af GIT binary patch literal 28343 zcmd3Mg;QKlur;=@IE%YO@C0{vch@Dz;;w-NXK{CT2<{HS32wpNT?2#=;PLyu_eZ>{ zy|p#BcJIucKBuSqbVsPE$fBbVqrkwxpv%iiX~4k1a=l-iK*aYuy&-QO-oIeoG-M@U zYNkj}-UkTQ;>zMMF!gb$|G2eX}N$Ks}8w;XGV0h~NOP+x*r9$5G3FJkl$@;}4mJ$=_i(cn63LVI{} z!p3OvWjS+@oCF-z>}uz)>Qn#f)0)#&=0;aj8qNnO|C`%YMB5Dqb=vo;?-S7HvmTGf zXOphSOVR1>tDZ**DJ}5Pjki!%Zr!~wd7Sn99$<3srU}htMu=Q>eEa{vz+t1WOhuS{ zdh!Qq=E$B6(Z95L;s2J#$`v<)jq-F7G#T6a{}=n;op}y#aYMRFve*y_unT$I(ePe) zwjDyhP6AEKEjYMNo5_;o&Rb}SaUbS&d)SlNk5mhnwM?EYa1)WAISL> zo{Unba(#c6`hxGg9dSXPz2-r=R#N8de@Ek92>rL#db3*UAj&G!YQ8Rghl^o_onGhp z-)?3p3i94=;^uOV1uGIy3o3rfj~z9duVfUIzdi5V7=7%@uZ9UJSDTm@_F7k<@O|t& zX2lc|lf_(@2=z-Crq`=N;ZMeHZT^PZeRkngx!d4BOA^JFg0*tUtu4Ow9f37i!f_ynQ#n|4mQ}+;HJ{wQ}sz z)}B|`)m|?Gk5U21$TlUJuwOun)0%=CY801%ht9@2QAT>WDiOpW&DSdz(;b$vVe4kF>?-#W zItwOf($-)}IS%}CiGNlpm{N#>DamM5fPrzfS)bFOj;c8Xdl6)*yS$Tfl&mgSHWI-s`ZUlApyIFlj@<~k*(#buGt-nxH)wW|v zRbI6kM@D|%>(^6r^|Q+@1zB&s4vP%L%z)=nU#0UgKTO2NS?ocJo(Y+fZ zZj%|EK(^Ml`)v_{had1_u6INQ3BV^?rgSY;(Of3JHvy|I%v15Nd+;HWfB)Y*MUcdaLhrs(Bf&J^GDV+T5Q;Siq6;eo^SH%=BKfatG^}Icox7JP{dJE+??*bfRlE*9s zmzq_?NZZ>7T1kuJ{H_OnKeT@39V%iMoVfQEvg<=#C1%iO;>uFdWXo%1JcF5nvD0nC zF73piD2#hiy((IBblF>l(_R=gc3BNk8U~`o_u>Nz?|JN zeqK>v{=z)Dr%%^YRs}OCrTaqBd8E_va{QGXqsFm%iYOX`M-gWJyBl|^S*|}o^O5I2oFjZJUhG0r}*~vd@?iTpLkf!6=!X^ z`dc3M?m-VYuVIY?#60mG3?IF7WZ=xF@DBS*H2D|88|5>{*X@Hsq=}tGkQ(P}XYRjW-zo-vbo+ zUfuUptDXLWLq+PKFpY|~K?hoP8Xm09FP$ebU0V@aY03*bjU#(5HZJ0vh{scX7r*ktmi*%ZZ_4pZLpLtq^mDv z_xJAVbb}Izvva17;Ta_On@bu6VqOG z^t`LXxl)u2BjAY}DX|17Q{bV5eoj6;&X=<&s$9Iqjq>UEJi_y2=E8Imo(V-JXicsfpNiLXYEZ2adw6Jd!St+*rjw|}H^WvZ+{lUVfomp3)Kk`hW_H3!m2^?aL&= zTjZ3fkR8Xh_u-_!)ZO=pp+xr)GTC>pF;eO+L=I?uHF))hq_}2G8Fxtdytaos@{0w- zza-K>BJ(5_6Z{DoD&)##oue7aE0~=Hq5O=yzJrisLQutnQ&PtXj@J*EanZw1e&psX zZ1slrCjhk-SnShv*UXUi49MNx*K)Cr%2JxY4YM_^YzP|>Ekuu?L;c{L-y zn3(w_{Ic%G#m{fU`5s@9pb!IpW|YqR;Kr@MVq!={W2nNQI08i))m>c1yP;s&?vc(gE?D9i)}V~%6=mx?$K zUE9`sM3r#(ADtXos??o3p?M3zb;#CzX(4L^Q*MdXZ#S>6{rzjR_WI&}mP^Lf_J!i& z>a0i*X#>>0UX>fwYsw;Bd&%CE1}N zLJdPjDug&hE>!+*y`cs^6N$p+w6e}dq+f}JxTGxCC)ULK0MB6bk6_L_j&2`&g#+XJ z#21XxCarATeS=iGJ>LT-`U*8?a`*5bdI_Le^>Fq4NRVH5dw_dLATy*U?(eA`S^nB? zml~In{~BUNkzz0B_KkN}KDStCHNCSMexVkvlsX;>9wop9nDSRl8eL=#B;)@FS}9i0 zb*A;(=w{wkhm@v(;U2vq-{Xt;op&-19#?rJsG?ad=!bj&8G;pexd%XNIp|({--sd! zR?NS;Qlzn4P+D02ImIH*TGPeD-#_qN~Gw+L(0S*`2+ESffSEa%){zAXGtTfAUOVtvLGo)+G z{yA@EY5|Yon;m4_+ zSKtddXuNiz<@dp5X>SH%>pKPRArwP?^U>tyyQkiaq>G=$v#5H^B48M zy{Y(48*Ztr;lN8hWfC$nN&KR+1`H_UQJxK$?{Q!WgFcY@Z~=-?Fm`~bL`UNL(VzOq znB_D7w4m1?w%MD?@_IaAU_aH0`3MEQ2awK>FOPEt#z>4r@`O9clmk+N7FCD5*Hm&0 zQFUmX9vCXdfvinf*0*Z-I=Sr?KU@|ji4u|um{!dw+x&kbcnrnh9o;kun#`n%o9vJ& z&ppTXgtmA+d=!-(Dr}^7BnmM`T3#YSqqZU?h*G^>J`GNH5y1lQIa_}dE%EiGi_xsU z^IpaI2vJtMS)CUZ^V(|76X;kLfCs3w;$!2jgc~}oACT+(K;X3p)QV;o=hoFoyZxHo zvk`qq<`8t`bwCy}eQBJ${U#xFF4>?bFW|oLDI|74;3q|Yd_sZ%aW95Jl0`v`*LITU zVFhAbSq^*mR3idj_?nZTQa9NKF56v!`A*^!bIX_n)+Zg_2_Gu^!#*zZQiXJXeD?KE z<-_bs`%N+`EW%J4KYIa&=<6JJ)~%JCN)X6$rbQC`8GEU{; zf6cpbQ<@<^TCQ%Is7z*v^Y5&?bA;WS&M4`|Vy+w`s+9vUfDR<{S+#iyw=j z=+2(n))`ni+AcEFkT0IWhZ){!PL^!(9;E+=V`a0I0?D7%xZtyis@>Lbg;3~_)Zri> z3(k}H$icQEztZj+pI`f4ggcaa?!uxsS^8OH(T2okbp9d!!@b0U{&8t;>;>-GInVPu z_}Jg~3vOt4$EYo>A8;4XS1wiX>HON6KPnMQVuS?&jZs4n@S}y|r#vi)!toeDbWrNm z2J=!cPGy>NwY0TF&zX>k0r83Gwm_-xlG!8km+!H0K#%7aMb*3~!boXWS|PJdwNYGP z93F@CsRXnM9dH3L#Nq~V4%-YkR0ehCVu^)2)Uy#={6~c5+}#Rz%&%ALJvo4Cs{#iE zsdXc93+!av|78iR4skIJ12y)@j8kyDLh-WC`_{ihIV6-Nv(SIz3u8aN0bStILtOw^ zZq%GU>2La0Pg%IiTeE zGr18Bt`LqrFrD38pNh1a&x?)-hz%$s;~@NBA5ov>KG|B(E`5TSx1fd z;6#8!4c(5^Loa)s3l^+r#!%}bGo;jCciqHhCW;-Y#*~_GvGA>Eetz@aXcG57+JbNp zd?s(JVUQgOS#&YQ#EetzqU7lWZ0ai0eE-eEVD{3Ak5FoU@S{;U`g?lIrq*tXu{tIuqVcf zFR3+Mat9VYd#j#MGny3c{q}Xf;O;8V7)`Wi9YqPygZ6KEW#_bow~)hs-bG8n$3V}> zjEdc^4*`lwBdG*MMgZ=b1C+#wpvhC;^eH4|V4r5>Zm+<2coS`2%;sajRe8FAOjN=JvdB3;;v&f z>%~2hyRK42?2#ilMucSx%$a4vH$jUqjdMK`Wo$Xc+3H;KTchs98VmCqk|2Gf@d3@Z zrhqc^_unAMEmy2FOdj59d9pU$Ba&!zyARkQV9u89%6q=5l$E`!K8pzJgx(kbd^&`D zVLf-KZy}~e2Ob|niMl=lSqMNFtIpWK1V~!u?jTUsYMWK zL)=5u!`8}2Nlg+^Q}>6(ICgvUFKtZUJpv3b!u7Dh^=btHLss5Fyqn@|=#ZSW+#uq> zyigQYlnT^eZ7@e8q#bQDQTy7HPyr%qma3n6e{m1)f4}bQX`NHsYP=$B(TuGw4ey_>lX_jB0>6 zWI;ZAo(`=N<74Jv3xBLzdEzJ$XYmfgWt6g)NkC?ViiWE741aiJotM1dMsw3pV#qZQBVft z5TL|}M2Sv%#+|u-Umb{gNUUkFAt(U+sz|*3NeZPO0TsO;QC*ATBA;zF5P7td(nn`v zUwa#iw=^xWsXJ&S&-`{`qUz>#r~0!Vo)K zZ`JB21|&r+_-yNnDL-M+61|FBM4L7jn`u`J0jSKGgu0SPQq@+Zr$bU@jS-zHQqqJZ zU}o7uCKl6LW-y{wagK6}w_d|V^JP^;q;JYW{UpkP-#)W@Cl$k%;38~Yw(~Eh z0>+1y18Gntf;*@!sPW4OeHL-}gi^A~bcd&BQX;4}WhdKulw>55y;`&nU$7KgV5K)# z3hI0+9fxMuP$=d(qLwFi;bI658Ip*~#?HA8tnnGU`BaHim`lqb>`4Wop#V_tzy9oh z_4-6dFg^z4*$&@INw!~qIoTG7J5%y7*oOPVAq}~jgEg3|nPe_!Yj9*pWHM3ov2^!| zVV;Xe-|(628l4vvYK(TftJM=LF#>MJw?cm+jCMyw2Iojcpll9*OFS^!MZvFS>I32l_8H3K6L)NVum=6<=H{Qg5__n*ai8 zks}|J=oG&_y4WR`yimI3Qmtnq)j4&b!+3wMl*0n&z^%n7u(dOu`>JA`wIMV$r^rWT zd~#ZoKdW@&nAURMMnN@Z_I%&cfUsA>jWjopsHp!pEuKCt`Z8?Ou?_dX3}bw^_tj?p ztf~lP2#%>ugA`yg5HpMdALqDTnmJ_B_dH0?6 ztA^G3P+J&n0ySAF4&0{HGy&)=&V=_cgP?xoQ-qiS14OgDg}?CaL!>E+bYs>8jGUs< zEA2K0=^3RrKbg5i{cF=X z9R-KU%wa%kW9?lgmrx~jak~YM2dR)~P7-9|A8ZxdM%DS^O(;C*i+WQnMLrM;m*l|m z0wc8<${;of{HRl%{wI2Z6^`+VX*F*iW!+L)8<&0Rm^`11@b761UkjP{AMJUZeO@AweKT)nsxjxXVnyU~A~1ORy`eVqx}g)T)oPNB!3R3uTTzIc1DXyD zhOL5)SAS`vQQ*}t@O>8vNdM7(aveyVZR-)}M{6{RRN4b-2%<4qt%0jY4Z_k;<$7Q}b_I)H%@(E!w1WfbOh;(EY z@$d*{w?0x3gH5mKbk2)FGL_wc%ji!@cybd#k1#9x#{9qIGN8q z9nq7V^7ORSl}jSUJi$1S_mUiWT4oq^m8lJ1+)#aj2k4lF$KC)gNyv}brnL75QSCQg zQ&vfoia=-~`YQ7SAq7_2>uS>=E;2{+Ei<}~aQq8u^aI>;)Il^~+%V};w&IHvJ`j3e zR2aj0Z)nT9E3-rI5uwMX4{wR*Wsz24lzCTzYk}C1`6k!?%FKHq3b4erzfKF$P1zcL zYcnS3FW99DvK*2>{9)gLlBTDE#g&EV!f~(MClf?}XU7xi`pcnyOKxVn??i}PtbF3F zBZ&T>U4$p1T}Y}xo{H94C>rT$CA>}h9Eh{KxWX$I9DZ499DY?2Xxv&iv?w3yn$YjV zLeYmqBd~P10(qYwaUEA|$sS33z$p;}^XQ-0AUjd5m>(`4d$LP-1hv#TC7+z;jf7rx ztwE;#ZNZwBIF9)Kg+gbdNoADxaE{^UGTslk)xi{imq78-T>x`+|K2oKRSTT;px*hi zWEYwXPGfAdvm&p@x-ZH7`Wxl_xu`;gTDB`)%Yv&uh{xLP3}x}>FY^2TSoowvee(Y@ zcmgd!U}n@m+auZU@i5I(Gn2FvZ<5(fXi~+8(fJYAE+*up1|F3md`qPX@O^Nvsm6I1o~7pRkEyk)`dbi4Aa z4Ud!_y|0rC9%p0=#?fk?;MAc{PM6U&!9%6rgd}%>(uL~n37~_E;XShRL{gP&5t;qW z!J57WuBEb(A=K4|(Gp3go_lon^J5Z2lbIP-Dy5qAK$)OaYuFiACW-usmxhFVShC2D zMePVd=UH-V;}BG}Eiy-2-#U@qOJiM_z|bK8z-_5Q+{-KjyyKEP0NTuBL5T7n9M2}- zBMflqro~qLfMCu0t z^|E(=(%z$87W4^+pQO@w&29>ADaks1fnrej<6H>c3J@>1r)iMoXbtw@Q;nQ>pxCtUEJ*n(Y^5gvImVahs+_x;5>2B&cd+`^k^%;os)eLZy0lnWf70m;5jIvSj1oUz5=uq<7B{b8fW*R^AisRG@DLzG|F$%KFgies2kq}#-#jy&k zJIkS8=I{3vxgQ_DaJ4=25HEW`{|dJLyhkhuj=G7BO!>JGMO=30NwWgTB=$+^dAaB< zb^3aMqop*hUAyFoSk~iPE_8nkxSb~d_x0~V+$-?~$?|~i*E5q($BkI%mfS(5=7>7j zT*+Pl;GMO;k9nhpb&m1-=`lfVPXvaZMG_ApV>oFg5@2#3z@h4A`wIF<)XlXm;Z64H!sv{y|!?gi}!%Y0|)&D+=zCChzo;0@*aFa@l zhA$#?GtTt7#cz5X2}1IV?oFMl-M}y}BY176|ACuI4~(V#=q2JN)(O z2r&>@Sk{@S8spsXL5JxsrA_XgMDWNS!g6LxF@eRhBOQyS#A z;wt=+>kSvYlbs&RL1j(@USM6@ZYbX+7o0n$E;J|zRt<4V4>G(uH&k20!1x4dVzOep zT~0)YM=#1DJ%TM3pICqSC79De5xIh?vL_{YOT!asD-v{m<|NUkBA{xz$j}*wZ z?B7SRq16!X*!$w}yN}}4%YA=FP>Nyg9s?4rIEiCHol_9;gv_)}{x!;k;aI2!D_^to zrb7r-YU;ur#xpe4!=JCa0Oof6h1=ALybbzkwNi6vf)$4alYRC9Mxa;k%eh4=Okv0k zkf7g2N?+f89FAUxY$OV+#7r_8h?dqz&%S^P#~$U{A*eL?TENg^*<$MCBAy?#t7pck zlm$dul~gOb-tZ6frb{wz`uqm}JMi1z#-_JXVceY^Kf%aMf+owEwQCx;J9i(?bk`D6 z)ADL4ki+`5 z0JRN0L?CvaQOnurOi6&^c~p`R!_E9!`kZeXk4owM>>)@M8H%N6J*PG-!;*0On46gXytW*H0l&=inn!PO{A7Plb3XZB)8vnJ$%vSlBp=FAyUQ<9o=V0x!6pmQhd81vg@9fgI- zRgm&@dF#+=m2sNMZg!Xg$;|aoE_n^vR?HjqSwki(9y1WT8^S1%D0`*084i@NwcCVj z*4zgOmHO}};WE_}nSVIYM9-<@mlFk8SY!;_5T-R3tO zo}tB47)oML-SAGQd4AH7BUyd!a^IZ9hhHSHbyZEIegWHLQ`IJ<145MqLoIm-Ql3ZjgC0mG8lk zD1XB_ZM$IQr-yLP(5AXmUX1Q6%|7HjA6$@G>B0jHqkn*Za}66Ad{oXZvu5k2b+g&m zBgq2&E8H%L!7#<43{f7uU}2({OPcx#2Ei{;#rd-gsO>N{L{NPL5>^L0?0}e>4jkbn zO7V%tj*T)F)zy&J#5x9707GaJfdasBCs&oMFX3umqdeFWr+Qa%M&Rb~uf7y{*jTmM zjxhl3j2?%EUOeH-NkrVp9kCc7Yr+_ikB}0u1L>;L<%&#a?7FnEFg#CLvx`b?8IeF7 zkASea>9-qo--|JQrk&57Esc&cGgKA~pZ6}W#Ps%LxRe-M8}~41#>DL7O~`l*U;{O& zBsxTbk?UuDxO~WygXNO0oEtdTc%Cl5{*e8;v1`H&uWDvKBMCN)8#bk-sYiUq9k7A6G;7Ht>KS{!;8KO-~a&skpu5HFHFP2F6Oq&905M@$Hw4lzFR)%k|3p+M_xzC`EqTtjuxtHd2Vthen#>BVAGxP|E z+pS1Wh*bdLO}4;EIo&*|TpwC8xkK>Nyvj%uJFL7b_ z$G~v|8`K&xW+DA0Q}cqA7Gj|zM3ay&Wkt0ZU zS?eRQt!Hgz)pPV-#EIkOhBKd&0|t+ZtZW6`_GQ7Et8?2R4X7+h7@DDQ(sgJD@J=4|e6rhGIHckuvwc zke5+=$&j(z(SDC)mbf$)5Af8*=73=z$WdsukYtXV-*+HgG-a(@a8LK)-bN?w^H-0L z-8-P}w;h0S`Ij4;OCDLK@opDnQjBTWa*suYHiF(T>19$XaRH-6PbEDiC{i`2o(>b# z8`ELdXGU)IkYyu`*$SdsCHT-RGIIf!?2!i>>QUNejtCmS4*Lboz|OWunylhN+}3g$VHt_Fwc8z5RX||`(Z(Lqu32H-rUyyUw4{2hC^ea zqyWI>q7DmJ7vY*&T8d9u03a3Y55;6*1DIQlQQ4363ox$={;V^&vHv=t(+6BJ6NRlcG!pnDaMiISQ*y7HX#KcmlMWi z$arqsDAQ4K6-DodoZBNHPU56T%x?qyVXQ+Z6Iy`IjP+}|r5bmGM{Y+iTT`ufQHB}B zH<7DD?_FR=YF==Z4E^YRnJDHi}`F0Z^`Xf|rZe6M&EoVwoG zV3pGyY#_q7J#PTDs&!;n#C*KAf_vddGux!1`y+5ei`eD&^41gIOHVjjEV$4xlYfN8 zknn`5V4;WE+3iXXL_73yxr%%-=kMl&bg1Y1qAECGaG8VnL{rsY0WIks11kgBM;sS_ zMjGcU86F(Yljgaa$$hD0$)`FUs~H|Fa3fgyfn;)-PmDA*u?vd>W>BdIY1`uG)wcVR zf+UHf5TvAnu3Ri&W2=o6%fM?|t(G&PIM*W5ob~k({V3kofCvu`CLum{3=EC*#QnpB z@hb~nr3Xc=Sx9g`>_n_?>eFCQI+d!!)puM=TlRM$BPZAqJ~o{&mll zqasvn&(mMbSveAD8LjsX^Jd-I@0Ijiq)QwHsmm4D;$NgYl0tdv2x>+HJ07w>iLJA; z9Tl*Z@ZbJ*B+=PTC~tEP_IIRvKXyU$qB>q+2V5;F+}6hJl4u}}a(E_j_(%N2KcoRp zp>nJr^mG}ReHJ5NTzvGLvDoTMI#kpfa}}&~5_`ZZ?kvA>@tNBYqA4 zLIc0nU$5!@XDgP)&0DxSK+lW^&B#SkZ%B%jjITL8=+5ADpy0Daw7WM07$2#AiqFVl z+|#is)YkxC&ag$K|F2QGKtR{&tKi8G^r-m0jP!qoaY=~vpQo#97go#F@#NpVl6xa< zkqDBP+$_&6UIfw?l2bt&-zwaC&m11yl{_&+rW&zTd$n3JASwQ|f^*(LFZSkfzS}!@ zr3USno2S$;L|E8C;+_UAwmC$#^cqZl*)@tEi^&|97FwT-Ooo)+YpZtDR?q=i@>Mu& z9`U|pZe*)1eirrQCb0Lew_vut1=>j$>tHb<%DX@TLdU_Q?P)64v-TdI$D@Zq{jToA z%lIcNC>>lK^9lG-cax4GcEvotrsP*RyC zZH^;xTI81U({2?c!(Ef&SQ*ZK8czz1U-`LNfY8yFthj)spP|gMa_#Uk&R>Eshvei( zGB;>Hx>HTkFY0V{iTv>QaYa==K9gv{+`4M>yob68mqP(n{{ z6HK7zd)Dkr1f(2p6J&wyMFdd4>iKsEZ^Tqr`ANTF}#LVa;kaU{7*im_?-P ze^UTyP9=v(uH&L9E)SAK#Hm#4tH$fJRb58SoyUH7n0GK-b0=Vh_e!`(EU?H&2iVkT zLrXVa`d2$Ta!e=GeW;XE%uNcL_kJ0?I{@`l6O&Puqw0W2A^{jbzUHcR_X|qmC3%0e zkIG-=a{F*Wdu@w>Lv-)O^W~Z~{ISBGu;#`Z{fT8se@G&C7mIzCTwA8uPG1RY6wK-@ zFNhaA=Cmc-Jz2a%L^}UCv&oDEi*&;aM%<3pkBy`3cmcVkp49hi01Ld!DIWa?hJhM2 z24}SAx_@8ob+RS5_0j+S(ad<`jKbl;Z&R9`Eon4dS6o7u9GuZV4NLTs77M`|=}P^h z^1)3|R|I>9@5qJFLEe}Rb!(yaB5ta_BE5hNZ@9yM_j4yPyBh%cr@*@XKW_u&iD(KT zC!Ga*B~TPo`e?d;JqXb}tU+8HUT&WL`imAM$dcrjgH)2SEwnREv&1Fy6K@Oh zj^uee@$CUxXH|>?%X@NWto!ClqkmM>1SJXEE-xug`Yq5~@)5ichoEQzP_@@N4Z$=o zb?C2vfQf~@k~zoj`&51k7>mr%#VWtTo>fU@R0 z5HOmpn=P#8ld}x#>SJI#7!Z)D;ha}G2e5J;IZh7E!%bPQ7Bnd=-`k)4V|e;eTY3z2 z29}ZRb%&Jda+`u(PyLug<}OmmCDabRM{FD#a*mfsbyAcfA26fu4; zjPb5ti!wT51T1Hd6rVYBbXR^#i^xYiST(IwBaMT0!tt&%+1PzM`Xhh*1^i!soJ!GYxbgepqItXIILg(st10`cgAABk1pG z5n25lttI2(@?xz%4s#4I1!{T<7Pp>BpAYBKCkd0ixJ)zVv+?Raj5etFAWfUk)zUATr&|t{XHb8lm9UE z1%CJ#$PBbIU~ML>lT=EP7M{@3kk;n#1Db+#$$PlS=2NS?RC~ zm@S|_Pal%yT)UErA054b74Hm!)w9O%>4>Y2V*FqJ=jJjrM-# z;WbweJa9{%+D)wM5#xBoWSvSh14oQTOx(@!HN0&~#_ZBZk{<>xf3z=Vcxvb=GZ5f% z-tbRDGi}D9l!j!p399`69^O0o&}%$&8qw@F6hxhOBZ>UbKYs)B z>*v%LBIYqy!Plf9hF_faREM1Njy1Z?BK_Lw)XDreTN!odV$NJWOHd@{ES!mYx{Ko# z1HDm=EG^R;eHHz?Yx)n-SmX^G@4|f5)6Bmnng^Z?H9Ld2uq@oPAY)x9)Um$~XG2|3 zG#)jFjLm}&7-jOn#3q1?Q0_FA>FQjLsUB3TLwGoTONzY}Nfc0fzpXW)L{h4p9GKqj z5rDy+7MT$NP4BY?%!7Lvvi_XGR|z*dPKMMIwX6+xBz`t;_&zGaZv)*M7R%wbzdOJa znW~{Sb}EM1-w7V|QS!$29etRVz?(ZwJ%f%=eGr(%A z$A=C7@owB5lLCV6dqi_^twSF$NM160!J1J!IUq6tgFA-KDm?RCGCdw63f=|n5oHHx zXlbQ*>km7%L`VE5HUPuPqnzn}mcHzaOf$*p0aI)SrsczKA2hvyrSp6GiVzOXnt_Y*Ll9D4-2h6 z&?1{S~k*lNML6I$EFUFb#8Av;+0Ct)3<0tFI3vp!o6(9=z*#9EsrLC?N2_qFLk zR*z8-*3w#ZbW?6igwSJhN(m_53+SObV{D zza(C_ES{kYWB>yq6bQ|!PR<7#8lG%RPP*{1^2q$$>G6mcKqFwAIAfJvx%zuTJeg`t z3LB1ZR;@9Amoc{;;o(r&e?0`Q)I{W?8L@8vS;Fh&>|zg*ipTo34LDX5?g zn0Jr&RIL)1^m*@Jfj)ByHYP8DEJL3B8ty?DX%qT;dydEU-2Ny3mjl_8{y}M!@Y=kT zQu95N8oBuk`40_G0tg69GBSSA9|t7v`IT39+A_WjsByAbmarO9xa40PE5kIJa+RsPEF75eIzUUDEf%mq&B7jh;mpWf`&aWk;`QwN7izBm+qx= z=`K)8)NG4QxR)cV&yS=xp`9uUV-(d)1{`Mz*Q~j9dpIhBj*(sI=-=Ci}l+Q$aKEo)LZBIysAhKJRkEL_=on9+u z2<>8LQ!k-4-~7E~>yI~xHrmo8losgp5c&qjs=16SUT(7W*Hl>z_;|0DDV}Sw_6I%x z^|GQ~Bg=v8ic8qO*8 zfRgdiVxdyYztIUH1P59ZYBK)D42F1|7)5NW$^K0u=SW@B@RnK=O-VFc&|nc5&DR3G{NG(YdlT^QxgH4~ zv9jvCsnE0K-`_ZSMNh7aM)5q4wt?Paw75-3cBmrj3g%&TB4!w%xvuYR^+_#lWD{9< zD>iG+5E4QVCm4^j8r5{EjS8ba_} zf8Xo&*b3hMiXuMU@ev1z<8fl)Hhv$n6wHbwx~;^=dzJ~{+5*0}<1ePT)D*j|VNv<$ zXrA7QMn9-Fg?22Ps7+wW@I#5jI{q$CGd=hyfG`)1;7UyeEFCHD2UM)kI|J{EN5m44 zHZNQ>ox9dQQ{o#WjpQu`F%MrTZ99Ld0T^lN*$Z;HCcK^YBrNFdd~zM&cgH3N^6D9F_~nV>4%)%!EzAd1tr8oRdE zL_ld4hW)OiZ0cw8yaEI!TRV+}vj8H`F&mu9UutiqF*4h&k_+n^;Fhi+{LG$p^}x`u z$8uxlvwOcYs}YJ&4@5FfLzY=ym2RcpKfG^&Dlt7{VWX?>3t9)$t0^kZwlnJ)F;SNn zx!`dw*!*^terbyc3*Id=Bhl}?I2D|&`UHn3O>hVBX#6Iy;I7WDnW`tl3u-iNrisxL zOUi^(cMDJQ?rFd}+H0Kn+{4hjN+8T8+bKeXRs7TPydB@!Km#I*?o%igSHVd z=lid-p9N>P8}6z;95->UaJ}4ki7383xb@16d=!#?a#hcz514`Z!=#16<^l~hIgJ^d zFq0}}ay&mXmc`?4C3AoukZwlw`SIcDRLNLV=PDNN-dKHs3y-t-tVp>|Y@0`+tt@dSFT}4d3FjTtx?$!Tg zF$i))NMr-hEXJaHN`dQp$Kr`Bm+AS7e9|iRCQB(G_&$a*qGZG+zrXZCK~QIc)=^qz zNNHYqRLI^0hx5r=()?liyr7=RXN#^Rcbk-8u+kv*jB1ND*w4%1e>(fBu(rBrTZ$KV zr&w_(xH}ZL7AqDYgdzn36j~@A912B(yR{T4?hY*k4FoUlt_6ym{O7xm_qz}GF;CfR zuf66PbIviwWLX!ck~h8du9~6LY`>Wdyk)x|R?g(G{**A5_j&NR2%(v^j&%g>T{q;J zB8go%y^NB*Xes){dsW&x-zyv%K4wFS*Qps#BT5BG>3K^zTpm@S8SUr!B7iXz-CSW% zEDZR@1;ew6aKqlRs-^<#`2j(7i>fq3KU|U*j9V0n1p#G8eCe0q0WpnQi<8%{;>)q9 z2AF+|f=RJrqUe!rtLa5__!dXl&jJE`t6}YigsS>#gEK zF_WkQu$p+^T2QaOla?W^2PO=vk56Q9cJ*-U6}iPx*Jq*I#JN@|O^?OrmL&~v<8hML z7dZt>ujjmWAw(5um3vWVDoQ1Izn`Ep+=Vs#+|T<}WUOZ27Xwvd$GOO0rGR29Jr$Zz zYy&2@XLZD6DMzftj%vRoml@w_WriD2kNL&pyr;(p!*Cxv|l~OkG}rO#~3@ms}m^(|smz{&RV@p^9m!pHkaiP5CCo z+Ytz}rj^$v$0w(VQLpH7!dFYqF;d3Di-Mi`dbIAs0dKKZ!#XtG0-uI8xLv1Zwe$$aL%ru@ezBlrXy z*M&1XbL|H=r;!S;i@#M0Lu>8Rc16F9$3*FMgQGIo(&ng1Mdbx!GyJ6?_bw@~^cMns z>0DIW^WAXgrq;R|0T%#o;4VRjxPP?ZC3=Jr_r9hN`M|mn9y%*<(5|7I*?@$Y6cB~K zL+jah3efSt^@#O9#0$mRWbq+tz1lpY6St-mqQ~a>_|t8%!Qm5ws(FS)o!xJB3QvMI zhqk4bqiYEf*apUNdqz}3F;el)E0jZ9937dC`;}ay%`=XIk?uJiv}DGPM<$APp+}a^ z(aZaGU_(^O7l}`Y(wy`K$0n(LgO#n-+I_sAhIo5EvJPx4Pot#AzcnNMUq)*yIh;mX z@3eY6&thRZuavBXZJEJiQjKz8EPPkR*%cTrgnM9($V{WiJ`0>9^Z;g>s4w&N`92_b zygAH+_t?T1t$BlhmDPHMIwA+0f#+xY)tlJ;(vV?3Qc+(YD*BxY5B}PT?8|aoI%pN; za5_XZ==Nof!c{%BXz66j`4WW(OJ*$T^Z*)Ue@z}|ZDY7(_s|q`1D19L`7-fk-hOeH-&yeW8EI&!>xE^^JX<8wg)1IkI z66zCek0{jn`qgxIeS&hh#>5uL6o}bd^yakz+~ZYx^It`HG5_sslrDDSng`7X+1DhK z2^BWvfE!H>T2r`f&MWz{*Na*lSu4L{kdB1c)cBS8q94e zDnobiW%Iunp@h8Kz~kz-w6a~%QlhiU_@OMt{|s6`r7X#0pAQl)5@8x{kMePm6Aw9$ z{IPVNO=#0uC~ZFTdO&fCUj^UnI>-lm?8#DdoIFc&;^PDf90M}&77YK(!g%e{T$4 z0znkm|8RqLp!4QmT{-4JB~3yU2CMoKw;_ki071GRn=oXtTNk}yj3O#SVA7*4nimAC z-e44C4K6%}H+^6DHl7HsU^eFvS>etC0!*V$13=K~j}I2){RIAZ zM(UC|5*Q1d6WGHTfr^aGgb)Z1XaU;kGeZdgOjoi1d#*g=$-hU;VkD_=n{ z2F+4Gno_YIJ+{t-%(~-5W>*bFc(;4e1F>pKN~v;<^%E=Lqw}p|$`sN4VCK#eiz+Kr z$F)Z(k1i+LFrnJ`?O+v&65RH6!AUca<9&`J(_YQ~<@xO20)A2euMQ6gw@oh{jCK`5 z`>t>UlAe6+S|mYA$={sG++z13xv)Yd!5pAnw)0h~vONTv01c@3_q7?;#BIDactir*)iUB6 zm3U|A1}-q33R6cl?t{35hz?@qj>qZ|u~NpH_du~VR8>>HW;%NkwCWTaYn zcEER|tPzef5iyrBGmrPcg1ceFIwv_DXJ-=h+aG!}EwQ-C^re4yvL`PTO{8VfBoTN! zrX&ned#){{3ZJWh%~(1#NQvtBxpc4tq3ChEAd#iGAufv8Gxr$*MoKdJ3cO*hF^cMl zNoZDAGGMWic*8G()e!GOzBl4@*Hm14K6VGZpG903_3$F+_Q<9b@w8{Uw>}h*;#Y}1VNV3N7EoF;XVWM-taAI9uU_73csT9W7JooYN)kiuJY#w@-h-? zka3j60)MC6h|fUMf}DtyxJ5WbtGxCnevDCv5dTTO0!e{TtfAyKF3snS>6iSScOtq9 zcewEZ+X(M7;?J>T_k-0F87ub+yS+nCHgpgF#wQOUL(XSYQh1zX6np#X_Dfa0X(UgU zoG*~C*JLG_M!0O{W=hUNynhG~$l3L@fIBVqpt<5zNOOPz(E(BxyWP-GCyY_@0#AbTh{)6GzBM4}AO@?d!gxRt24+8&Vso z$2j6G*(pyRHL&x|XDT?HU~e20Ft|SVA5KU#{0xf0a?i#)-J+B!m79*dYW&N_ zR8-V;&ty(Hw@;xYHvV3j%XeV^ESF79#;nS9Fs7l1?Etw72Wo|Jzd5 zsWSewNo@4lE)Cf6K*6-6{`;tntJaNkpy_>4|MfD($%=Z?n01WLUlilMKQzYMH~N?1 zQo9%?^XS{p+7+7+6M@Zmk6Ly!Zi@0|*tSOo9-_`<7;fPCIpLh#-h_d^4E~EG6^}N0 zXHdOdvhNljVzTc0Zr5?FPhy_$sc(pd3k5I z`FTOVHYCkhK}*=Z{@|gY8YlDNB{x;h>ooQhfHRL^v415Yi)B9g5I*3mi2xP1f|SbE z5(DLqS&iA|@P#wgod1~p*_72cEY*B?6>=&V*A+uaB|X@-`O`KY-BcJd!|e654RU;k zWf2lUPIDwKi$0(5b=LERca;GD29hR}pX$w>^2W(v19q*L=k@NZodj zb9BC>MFAD{u?J6lC4f}UlFE}(g7PJ~dTYf?#~W4Kf6@v#sMe?>b2iH5EoA(glS*F) z>k_%+WT>0(5fB?y-V`MREdI+#d-1|c_281b zF|>-bl>EgmFQ&e+4t2`lL`F{3%5lFBYO^%$eX}VmhC7ZEzq$d3;}!660PNMoP}cMTn2fqdFq>XgVyQ;bv)yKf8734{s^id1>)g zB<%C1s?r>#XcS4>yJp3dP@nzvLuQ~RgsH$QGG`8ojHMX)>8p#dD|ohvMSb-V^}lo| z2_=A3$k)sXDpYj>0z&WeVQcE4DUg9U#5*7rK@ny;Z@Ob|4hC75g-KKz1XNum2(kZPuXIs~?^s)+xREUwJOyLx~3#P>uYvSyQ+r zU{mY`N>ZNGTo(QinW{wuWH%qOW^4Jf?!o0%_6mx^_T&~>Ek9$ zyDcrj@+ERdC9Jut%nAkOl!Vh_T1w0OV5T5aPn$7Z9O1qiay#=(JFLnim=dm7C5oW5 zj{%q#y{fTV#=)vZX1Z?MO9S*E<8o@LeA|2PrW5`c36qodVr(Zw<;z0jtD;bCJ>2BR zwG>a!CT80A8;uXe(=~^WEAT|(t`LM7jzYCk`3ErvI4Ey9z^mpo5Ip2nDx^vCHKte-%twS zbYS2a{0S7a$bKS|(rAAk5|2}Olz_4-i}RM`WxZ+SP(J<#59K2T^fLjrb!pk#JfKs0(H@gtbrGU(;*qZ(lX!|nMGTBl)@?;my+hJ+ zJh~v~rwTwnqkWK7^@FMNy{j|gkm?SOujf4yqrDsn3;vwZS~`qZGBlzra+`UMs_}FF*XqU0Io(g<7J)?#9nG-Hmefq zmBy%ot<5O%fPP?o{{Bf6N&FvPH6!t;qt>5-pGv0AX`$EjGCO?yp#r}Iq4mEyarkCA zDnt(Uf9o<)wg2TSo7tR&1x*8%1x~}%RCRVCZ{95hWfehmzGAp7Vao^Ru-z|{QsLUr z%uJjR#6T_BczaQjplQm09O5}ug;Iun=6OFQ&N+8-P=f8$QK=#?Y^qBd$Q?|8s!b{OO=HGt-nJOVO%6CBd%)xkJ3AM?WH>r3aW-W{v4P!1&8H3ZWDd?xYf=^ z=>AlFiRqwVHi7q*T)af=_qfi~gwrET;PZN-^V+?CmIXP$R8dhVfu*=qso!9nL5&TB zzT-k$Y&S|9CH+Bc*}sk%JllXw(G*qxcifiednlu|rujgY;@gYAfUQpWvRw=aym`%& z0=JxG2Kgy6vO-x|LE`Y)He6bUUewz_Me>PI*SgH=XV*tHbu+)?Z3>dQZYiv8_o>02 z!1@9h3jyy477)AE&;VOQADY>SB1rkpal6&PoO9&~(cX>_q~#38Yo1H3LzhLE8SoHe zxq;pee=X-NVlVdaD8S^X$OLX?GcTacY*tizalZCE+%YdhPu?};SAQ#Iaix82XwYk#56NQi!o9m zA{V*h-_#RpDhxkns2JX9$>=fU6Pn5<@$Y0XKHeO)9Qf|9bVjl!aqa9{HxOTo`io27 z{`MTEJj}AVmG;>g@d#`~nIWKf-o^%cxsxfPE*U+ZZ!bspaDgo6Pg@acJfoxY({BbD zUbla8drYMU-<&*U4q8j<%>&=`s|{#`z_3h@H+CzCNd~ofvyCweakk zHVRdxwx5?_Pw9x?JKus1^cI3v-D3vfICcyFeYy;ce^N_J!2(&OvLi_`v{ml|-Nbu= z6+JW2=KKWp46XPJU$)&k&$4^*KD3^%qi^a7FgTm z`7n{h_}5T2v(j4TC>#|Q6I-}O5D3v65O4ezK6i5?i-UGQe3Fg4njI_!vrC2(&fLcL z$MxhOfgf}6=CQJ-h+rEDCzLutJC>r4bF*CO+NP1YzlbbA?;NoP_~_Okug!|1@vA5L zTgX`rM*9g0cgGs`JdpRKBIUN|_pu7L0!@TVy3wkpb%^;XRGNPZ`z|sRw5EM=;VPc; z9Y{s-bE{uRAVBp;=vtyg=sr&M=f8@s0Yj+~DhvASM3 z8Tj{_ZF;dz7g^eXa{UmO{Prrs6{>a9OPVJv8zq1udtEp-f2{z%A(zbR21|9=w8fkj^a$ygL=XS{$iiOs^q)%J48R(9oA>VGTT=SWCu%uC2jI2J{V zZ#y7MjYn!ZKh?{SC2n0{B&~a!#LMx=muB)yI%Z9T&sN3|SEl%e9Gg(Wgb5-J_ACHO zlxF5EtUutL@H6(p`?}x$65b^LD&sOPMTzlmi%xW%Sql|?c?@mdKM)Z6V~pXxK9PY* zn)`o55OGpAK9Cx292=*w@QV0$3$dcYc=4GY_)9!h^5vxawZiMnV+e^2!5?Gr zk`~U~UxMyJR(~C$rw*2J^RDohKlCUbwah=i>$SP3Vthj88MHW9lWw64f0dbj%YMW9 zDmV+95h)TzY=|%_&}4X;Gy8;EE_sL(fsYbLI03|ADPFbshACxrEHvM@p2YHvGu_=L zr{A2U@|q;E(zo<~TH)o{h1M>4y8 zTbq%!G07);`=b!*dR=TJXAfOiJo|%sm9@9{(tv0yG$9mcI4PaJRnb1^K>>UiYqRb7 zt))4Bi=zPpriU*S1dXWQOSdkDO9g179W|cZXBhFMagC@98Pzi9456BTz`}87t;abo z%E8F~g&(OxG={v0LpaUDvdx3j-Tux~8-a^bZUmmoXy!5)t!AR;7gb>ze1kammA)X{ zsMf^$vAF@~zhiNJL;5rzb^wP*#1Pr3Ms3^&zT2#DreV+7_*m_{j~Wq-SM*$0Hz5{gen5D3dH9l;e`pIPAQvae zwC$=_yBqkB)ir*+Au1sua4_ZImQ_cHr5t%HIvwV*j(>ctyvWT9hF(j3`0sHX8}aZi z0A5W$um#>gdawm8;Z39RPsy@09jE6P=Y#VkZz9A=Igb;zZTOZ(x4>tcv7-N`H>RT1 zFyvT)14bd+fGhI*yPxW&@Xc4j43;fLJC46KAaUhAdD$(RD|ZV+duxJ$*#f$`9P%4R zthn#>7*vVV7@%cZN!5wG5YBRgZ-h3BSg5!~&@M&=amK5Ejzo~*$5fy6-t(2%`0G(c z2oTv6Mi_HSF5dz=F&DY#My$6B_WNw|2efx2pSjIS>wM}%l2n>f@dy6n>nB*pQWhsv zT>Y6Ls+(X_8gaXj96tz_qiQ&dy}T*PD~{~C`{aE$l)uTOWbbmP;aJ%x-Unhzd znuUg{sLeHeHPu60&YAZ%Dd-OGC$RG!w`9DM_PLd`bls^)23*)p2}p#r<($X>YM^Vq$K~sMA4Ms)$;vlYg z94D5nPxoCshje)nB-a0euMGrD8-XKIN2;gbn+r^brox%}#EAQ8+|-}`&eV&Ob?A0s zeK{s1&o#gx_e|@6qpTEEqvoC#6~4L6VryWrZBe&%#+>hhO6CPmPs`r^l0XQJ^WduW zQt0~_f3uJ5w-Uw>nXX^UsJ-ys>iwRw4Y4yD36XH5-T{Gq-LthKvRh(@ok@fQHV-!DGm|EHT>|k6Sg^ zGz_I=N=)?jxitFlQ-i387xXh9sZN^9@G-SiW4xAz-zuol0*}uDlHvSqvyNix7*81C z)~Zjm+DfyfU}ocYuI~)`{Ovljaul_s=%~_016ZWYPp~+b7TuO?`?1qnNjjH;#nW(J zZ*O+Hkp=bKN2DHKZgK9BOf>eLdHuT`G<4{#XhZ(+JOPR#8VxF|I&RRu(;&$DwTVdK z1vx}xGa$3q0-a;@xMlw>7<=Ern{FBuBHR^0ozfuZ>|&LvP%Kg9g2_}4A!9l zx1kOWO-e1Nsg!|2Fg`S^CvsYQi!6spL7SaQp0dxkAN3|#YatR5GIPpx)Rkw#6v^ec z+EzU06X$r2f;rzxxl8W({t_SiyA*kyr&qfnPTU$Feyx&%02 zQ>+Tx`6W40}-kmdV^*DcS0ejKJ zJg8B;*SS}fXX7>#u|*ZHp>}Y?sdw@wh!!TAoM;ZWC5Lvf3yGb z27aCJxg=_wYL#+$TsZJBjvyq?N#EW9pKVcdL9Xlh?fC%j3#{le(#dU-Lv;~ z`41SQ(gQQ6}r?;L2qLJ>gX>;zixTFrJFfo-LPSCzc zPE);i$)$ z8Q&{A^P*q>kfl2-|0VYV!ye&x)*s@GJ`sD-l#N7X)Gm>%6Hz_C@PMREaoQ9|;7c#H zPa>5L3NnRie29;_HfVefmjvI>MeP?j+N8V6o?R&ve&y_Z)w0CO^y#bbkKKA@CqA{` z;H8)1Xj`Wu8Ufr^&a)RMSZ)|$+9G5q6ZwCCbS86iEr{zntxrihQ5Mys9ha<>e$5z3 zkTc}MjO{V=P%>lvzlj6zN)iqioi5Et9Ad4eoAUd4-W}3r0+ASyFa;pL{^~6$c!1Mc zO=}LKd7edjFtK!T$S*n$#UOuRM9mGz{j}0eD*t-duGwG!fiJFFGN5K5`9Q{>Ev1x7 z2F(&5nJNvctbQ&BmHEq1(D%11?4vIe#evRBAhuQuf!yCYr#)Vqn;`HzSJ;4SU}WZZ zEWrdGLE7z)N*AtN$`lFg<$pVy40e(h->Kl9y%WUi_ox$+Qill2vfF^YE@Vw(YsZXo zmg{$N?duP!B#dN}k_$L;=fi#8yoHrfCp_^7UcC=WhYEBCFb?pkMw=xAM+l_s3jBGO zP$)4hRAOlG;~n^vsUxd!IZ&Lk+@fbE7S<6_SLXbP3l>CO@-fVeb%%`19M9gz{ zB`C#rIWSHd$9JUd*QOhtj(k>WonT;C=i|VWR{B_1H}|Z*P7QB`D)oSU1E*?Qx1-Qz zyE)amI;y+_?ek6$`m^uxH*oszpu|E`j)0mwcJU7P!}&$M-(#F^hjgd~b}Y#U?L~ab zQanqi2k+C|Cd8jGVgnJ1>KNL}y8B+foX@wUtsd9r#dB+WK6mycF$PZUk;#*enDITg zJQQ^%eCrwG2lawkXsE>Ri-D-t@8ve|zqUX6wfqkz0^(Gn-lg!{S-<96f~~U-w6%Q! z&}>wnX-Q11GY{3m#V7PhzZl<@TMlWjlfM()X>zVPuiu4Yr%SACr${n2A3#v(;0st( zy~2-kAV&rk-z~ck;GTvTFt(XE=`&vzpNKl0m@i~cWIe*MyAD2q|9w)>v`px*;<%*` zxpMX+3EciY1U}w`j0C~MWvl{5(60WRp{Yr?vHzj@YONz^;JX!-PKBjDc`Jk*yPbXVOmBi8MTb^7vp^J{n2=q=&Bq*|_v zN{cHO7~ZR8kA&>L$6N7x&Jst+$HFW<1BU}P31Rd2CXAfr4(}nAlftHlxvr8AkJHtY zIm7`>R~-K~eH1@Bk%vNC>giwcO)5pJaz^;?KIg5Z{l%SjQQzD=(TGHW2VaC8ez02E zL0Vt^fr^5bjQl03)*r7gg&l!jcI$!{$z5X%Lp2`P5j)Pum%Q7tyYZ@my}-sm>Z7~v z#EF8ZqxABPMj2IH_UZ-qeCrmSk_cL!u`CwX?+WT{Lm`QX9OCwzrV%)npwgwGafQGBc%ya7L&xu?5G@ad;NBg%~t zKYresavi9B>!pJFhvKN;(jo)tti=mgn|;@tMl+~-DZuc zq{IANlqj4tEl-6vCB~aBvEwZ0(Ohp_xA%TopHWh^gScZfM(@D|xYX6m>cuBr;+i)2 zw00r-&`WLtU&m;4p9Q3uQfSNnIZJe28G2-A_w{IN$u17d+;V#1Q5{2N$rDlGnHh8@ zMI{P|^NDDfnMepaI9O4V(>PAH>xvB!KQ4U;aFtYzo)FhkZ~XN$4z$YP_|uv0g~H6# zj*A$I2v)JW{@nn$D6l^l6crgZDrkWv=m%fRaay^-+o~e)jBzf)j%+FNebJude*2sa z#Ezm_rCWU5uFs0?dedaYXXxt}MK#u;m1<3%3-Tl;fSkReZ zozSw=j6LS~lA`1+`gtbIKgpOb-G#b6f7!v7(iR-m_JubjmYNB#^Sni&EW`2GmE*%n z0pp3k0y_}{l`sUwLyiMSIM7)B<{!>%j7YaxufJ}-N|bmO+4!QzCT9QBSZKFVA0$fx zT~48ip)>6By2_KpL$~E;l!fPQ;*`jR(|}h96f}m|oGxyfs61(T)eq#3`Fm~L{9T)) zj4fK#m^xtyyjlGeqyBhY%`GAP&e9-GNbOUr^GL0I{gE8g`uX6#B4>!$I*LZ@RFx_o zbpAT?e#|~4tB-LEsh)rp>(4Wz^3X-CHKUCplW_$XkzIaG9kyb6=!U~3G>TsQw! znk%`RFSrX@ai8}tQsf^*Qgg;LTfAR%es#b7omXU~aH(G&DrWrsCYJD^Qb=As{b}>> z?iAw1_ETk*gaYNY}K&8@t zr4&)#6~q~M$n=CG?K}I|fMJsSnHi1AiQ%DFJ-nf)Ry}2!B|L!A_U1`t8OEK*dkM_=-+@1X?CY5;l4d=CqjnI5^09w!qGxd!`m zQ>+Ts&escN=RV(#KfyS<9vVv1Y`)ha>6s6tOe5TiIv!B?F)v`}{lxJP9JODYU6j`@ z0iR)yhqX_d`m(nr?_`b$64Fq?z2}{b$ps>!3F6Jznj?MvLI~K1$K!g$Ph^vSd5Zu( zG)T;TCaNMr5H_7GY2!5g(M>u%hkyh!?qxfqF9}L^K$6>SbqeVGm{bU8k??h&i{y7o zp4#$;l`Jvw7qzfk%w>!20CSca-`X6MZ?X7|afiQ zwCv+WK3-|)48iE^#r}A$0{V-@GDowWh=!2|G1SPj+=G!;yafwR(f{V2$(^a_{Wm?V z^5P3(2!7Wm1p(r+%{hVR#M&zsepM?KA*9`yvJ(vww^+`e4eCrHnCGrS0Y!2;}nx1Nv IvTf-90OlTzSO5S3 literal 0 HcmV?d00001 diff --git a/packages/ui/src/ui-component/dialog/NodeInfoDialog.js b/packages/ui/src/ui-component/dialog/NodeInfoDialog.js index 6f3bec5d..a5dbd411 100644 --- a/packages/ui/src/ui-component/dialog/NodeInfoDialog.js +++ b/packages/ui/src/ui-component/dialog/NodeInfoDialog.js @@ -132,6 +132,35 @@ const NodeInfoDialog = ({ show, dialogProps, onCancel }) => { )} + {dialogProps.data.tags && + dialogProps.data.tags.length && + dialogProps.data.tags.map((tag, index) => ( +
+ + {tag.toLowerCase()} + +
+ ))} diff --git a/packages/ui/src/utils/genericHelper.js b/packages/ui/src/utils/genericHelper.js index b34d6c72..69491fbc 100644 --- a/packages/ui/src/utils/genericHelper.js +++ b/packages/ui/src/utils/genericHelper.js @@ -286,6 +286,7 @@ export const generateExportFlowData = (flowData) => { name: node.data.name, type: node.data.type, baseClasses: node.data.baseClasses, + tags: node.data.tags, category: node.data.category, description: node.data.description, inputParams: node.data.inputParams, diff --git a/packages/ui/src/views/canvas/AddNodes.js b/packages/ui/src/views/canvas/AddNodes.js index 7bf3e7ff..ea813df5 100644 --- a/packages/ui/src/views/canvas/AddNodes.js +++ b/packages/ui/src/views/canvas/AddNodes.js @@ -22,7 +22,9 @@ import { Popper, Stack, Typography, - Chip + Chip, + Tab, + Tabs } from '@mui/material' import ExpandMoreIcon from '@mui/icons-material/ExpandMore' @@ -36,12 +38,20 @@ import { StyledFab } from 'ui-component/button/StyledFab' // icons import { IconPlus, IconSearch, IconMinus, IconX } from '@tabler/icons' +import LlamaindexPNG from 'assets/images/llamaindex.png' +import LangChainPNG from 'assets/images/langchain.png' // const import { baseURL } from 'store/constant' import { SET_COMPONENT_NODES } from 'store/actions' // ==============================|| ADD NODES||============================== // +function a11yProps(index) { + return { + id: `attachment-tab-${index}`, + 'aria-controls': `attachment-tabpanel-${index}` + } +} const AddNodes = ({ nodesData, node }) => { const theme = useTheme() @@ -52,6 +62,7 @@ const AddNodes = ({ nodesData, node }) => { const [nodes, setNodes] = useState({}) const [open, setOpen] = useState(false) const [categoryExpanded, setCategoryExpanded] = useState({}) + const [tabValue, setTabValue] = useState(0) const anchorRef = useRef(null) const prevOpen = useRef(open) @@ -86,6 +97,11 @@ const AddNodes = ({ nodesData, node }) => { } } + const handleTabChange = (event, newValue) => { + setTabValue(newValue) + filterSearch(searchValue, newValue) + } + const getSearchedNodes = (value) => { const passed = nodesData.filter((nd) => { const passesQuery = nd.name.toLowerCase().includes(value.toLowerCase()) @@ -95,23 +111,34 @@ const AddNodes = ({ nodesData, node }) => { return passed } - const filterSearch = (value) => { + const filterSearch = (value, newTabValue) => { setSearchValue(value) setTimeout(() => { if (value) { const returnData = getSearchedNodes(value) - groupByCategory(returnData, true) + groupByCategory(returnData, newTabValue ?? tabValue, true) scrollTop() } else if (value === '') { - groupByCategory(nodesData) + groupByCategory(nodesData, newTabValue ?? tabValue) scrollTop() } }, 500) } - const groupByCategory = (nodes, isFilter) => { + const groupByTags = (nodes, newTabValue = 0) => { + const langchainNodes = nodes.filter((nd) => !nd.tags) + const llmaindexNodes = nodes.filter((nd) => nd.tags && nd.tags.includes('LlamaIndex')) + if (newTabValue === 0) { + return langchainNodes + } else { + return llmaindexNodes + } + } + + const groupByCategory = (nodes, newTabValue, isFilter) => { + const taggedNodes = groupByTags(nodes, newTabValue) const accordianCategories = {} - const result = nodes.reduce(function (r, a) { + const result = taggedNodes.reduce(function (r, a) { r[a.category] = r[a.category] || [] r[a.category].push(a) accordianCategories[a.category] = isFilter ? true : false @@ -244,13 +271,61 @@ const AddNodes = ({ nodesData, node }) => { 'aria-label': 'weight' }} /> + + {['LangChain', 'LlamaIndex'].map((item, index) => ( + + {item} + + } + iconPosition='start' + key={index} + label={ + item === 'LlamaIndex' ? ( + <> +

{item}

+   + + + ) : ( +

{item}

+ ) + } + {...a11yProps(index)} + >
+ ))} +
{ ps.current = el }} - style={{ height: '100%', maxHeight: 'calc(100vh - 320px)', overflowX: 'hidden' }} + style={{ height: '100%', maxHeight: 'calc(100vh - 380px)', overflowX: 'hidden' }} > ({ background: theme.palette.card.main, @@ -179,9 +180,25 @@ const CanvasNode = ({ data }) => { {data.label} +
+ {data.tags && data.tags.includes('LlamaIndex') && ( + <> +
+ LlamaIndex +
+ + )} {warningMessage && ( <> -
{warningMessage}} placement='top'> diff --git a/packages/ui/src/views/marketplaces/MarketplaceCanvasNode.js b/packages/ui/src/views/marketplaces/MarketplaceCanvasNode.js index 8ec5ada3..44cb75e8 100644 --- a/packages/ui/src/views/marketplaces/MarketplaceCanvasNode.js +++ b/packages/ui/src/views/marketplaces/MarketplaceCanvasNode.js @@ -13,6 +13,7 @@ import AdditionalParamsDialog from 'ui-component/dialog/AdditionalParamsDialog' // const import { baseURL } from 'store/constant' +import LlamaindexPNG from 'assets/images/llamaindex.png' const CardWrapper = styled(MainCard)(({ theme }) => ({ background: theme.palette.card.main, @@ -87,6 +88,23 @@ const MarketplaceCanvasNode = ({ data }) => { {data.label} +
+ {data.tags && data.tags.includes('LlamaIndex') && ( + <> +
+ LlamaIndex +
+ + )} {(data.inputAnchors.length > 0 || data.inputParams.length > 0) && ( <> From 2279ffd57dea7e29df3dda8364262708c915980d Mon Sep 17 00:00:00 2001 From: Henry Date: Wed, 24 Jan 2024 14:13:44 +0000 Subject: [PATCH 02/45] update conversational retrieval agent --- .../ConversationalRetrievalAgent.ts | 3 --- 1 file changed, 3 deletions(-) diff --git a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts index b238456a..4cd13d13 100644 --- a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts +++ b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts @@ -70,9 +70,6 @@ class ConversationalRetrievalAgent_Agents implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory as FlowiseMemory const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) - - executor.memory = memory - const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) From 1fb3e25f5327ff16d791b4aca11278fbebf5a012 Mon Sep 17 00:00:00 2001 From: Henry Date: Thu, 25 Jan 2024 21:57:56 +0000 Subject: [PATCH 03/45] update linting issues --- .../ConversationalRetrievalAgent.ts | 1 + .../chatmodels/ChatAnthropic/ChatAnthropic.ts | 63 ++++++++++++++++++- .../nodes/chatmodels/ChatAnthropic/utils.ts | 61 ------------------ .../chatflows/Context Chat Engine.json | 10 ++- 4 files changed, 71 insertions(+), 64 deletions(-) delete mode 100644 packages/components/nodes/chatmodels/ChatAnthropic/utils.ts diff --git a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts index 4cd13d13..406a156f 100644 --- a/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts +++ b/packages/components/nodes/agents/ConversationalRetrievalAgent/ConversationalRetrievalAgent.ts @@ -70,6 +70,7 @@ class ConversationalRetrievalAgent_Agents implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory as FlowiseMemory const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory) + const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) diff --git a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts index 794058bd..599578f5 100644 --- a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts +++ b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts @@ -3,7 +3,6 @@ import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../ import { AnthropicInput, ChatAnthropic } from 'langchain/chat_models/anthropic' import { BaseCache } from 'langchain/schema' import { BaseLLMParams } from 'langchain/llms/base' -import { availableModels } from './utils' class ChatAnthropic_ChatModels implements INode { label: string @@ -43,7 +42,67 @@ class ChatAnthropic_ChatModels implements INode { label: 'Model Name', name: 'modelName', type: 'options', - options: [...availableModels], + options: [ + { + label: 'claude-2', + name: 'claude-2', + description: 'Claude 2 latest major version, automatically get updates to the model as they are released' + }, + { + label: 'claude-2.1', + name: 'claude-2.1', + description: 'Claude 2 latest full version' + }, + { + label: 'claude-instant-1', + name: 'claude-instant-1', + description: 'Claude Instant latest major version, automatically get updates to the model as they are released' + }, + { + label: 'claude-v1', + name: 'claude-v1' + }, + { + label: 'claude-v1-100k', + name: 'claude-v1-100k' + }, + { + label: 'claude-v1.0', + name: 'claude-v1.0' + }, + { + label: 'claude-v1.2', + name: 'claude-v1.2' + }, + { + label: 'claude-v1.3', + name: 'claude-v1.3' + }, + { + label: 'claude-v1.3-100k', + name: 'claude-v1.3-100k' + }, + { + label: 'claude-instant-v1', + name: 'claude-instant-v1' + }, + { + label: 'claude-instant-v1-100k', + name: 'claude-instant-v1-100k' + }, + { + label: 'claude-instant-v1.0', + name: 'claude-instant-v1.0' + }, + { + label: 'claude-instant-v1.1', + name: 'claude-instant-v1.1' + }, + { + label: 'claude-instant-v1.1-100k', + name: 'claude-instant-v1.1-100k' + } + ], default: 'claude-2', optional: true }, diff --git a/packages/components/nodes/chatmodels/ChatAnthropic/utils.ts b/packages/components/nodes/chatmodels/ChatAnthropic/utils.ts deleted file mode 100644 index 209996a6..00000000 --- a/packages/components/nodes/chatmodels/ChatAnthropic/utils.ts +++ /dev/null @@ -1,61 +0,0 @@ -export const availableModels = [ - { - label: 'claude-2', - name: 'claude-2', - description: 'Claude 2 latest major version, automatically get updates to the model as they are released' - }, - { - label: 'claude-2.1', - name: 'claude-2.1', - description: 'Claude 2 latest full version' - }, - { - label: 'claude-instant-1', - name: 'claude-instant-1', - description: 'Claude Instant latest major version, automatically get updates to the model as they are released' - }, - { - label: 'claude-v1', - name: 'claude-v1' - }, - { - label: 'claude-v1-100k', - name: 'claude-v1-100k' - }, - { - label: 'claude-v1.0', - name: 'claude-v1.0' - }, - { - label: 'claude-v1.2', - name: 'claude-v1.2' - }, - { - label: 'claude-v1.3', - name: 'claude-v1.3' - }, - { - label: 'claude-v1.3-100k', - name: 'claude-v1.3-100k' - }, - { - label: 'claude-instant-v1', - name: 'claude-instant-v1' - }, - { - label: 'claude-instant-v1-100k', - name: 'claude-instant-v1-100k' - }, - { - label: 'claude-instant-v1.0', - name: 'claude-instant-v1.0' - }, - { - label: 'claude-instant-v1.1', - name: 'claude-instant-v1.1' - }, - { - label: 'claude-instant-v1.1-100k', - name: 'claude-instant-v1.1-100k' - } -] diff --git a/packages/server/marketplaces/chatflows/Context Chat Engine.json b/packages/server/marketplaces/chatflows/Context Chat Engine.json index e4fdd4f5..7608a550 100644 --- a/packages/server/marketplaces/chatflows/Context Chat Engine.json +++ b/packages/server/marketplaces/chatflows/Context Chat Engine.json @@ -495,6 +495,13 @@ "category": "Engine", "description": "Answer question based on retrieved documents (context) with built-in memory to remember conversation", "inputParams": [ + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true, + "id": "contextChatEngine_0-input-returnSourceDocuments-boolean" + }, { "label": "System Message", "name": "systemMessagePrompt", @@ -529,7 +536,8 @@ "model": "{{chatOpenAI_LlamaIndex_2.data.instance}}", "vectorStoreRetriever": "{{pineconeLlamaIndex_0.data.instance}}", "memory": "{{RedisBackedChatMemory_0.data.instance}}", - "systemMessagePrompt": "" + "systemMessagePrompt": "", + "returnSourceDocuments": true }, "outputAnchors": [ { From 3d670fec817e8d7b0d4b6de5ee262ffc5b64efa8 Mon Sep 17 00:00:00 2001 From: Henry Date: Fri, 26 Jan 2024 00:22:24 +0000 Subject: [PATCH 04/45] update query engine --- .../nodes/engine/QueryEngine/QueryEngine.ts | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/components/nodes/engine/QueryEngine/QueryEngine.ts b/packages/components/nodes/engine/QueryEngine/QueryEngine.ts index 14774703..bd6e040d 100644 --- a/packages/components/nodes/engine/QueryEngine/QueryEngine.ts +++ b/packages/components/nodes/engine/QueryEngine/QueryEngine.ts @@ -68,13 +68,15 @@ class QueryEngine_LlamaIndex implements INode { const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever const responseSynthesizerObj = nodeData.inputs?.responseSynthesizer + let queryEngine = new RetrieverQueryEngine(vectorStoreRetriever) + if (responseSynthesizerObj) { if (responseSynthesizerObj.type === 'TreeSummarize') { const responseSynthesizer = new ResponseSynthesizer({ responseBuilder: new TreeSummarize(vectorStoreRetriever.serviceContext, responseSynthesizerObj.textQAPromptTemplate), serviceContext: vectorStoreRetriever.serviceContext }) - return new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) } else if (responseSynthesizerObj.type === 'CompactAndRefine') { const responseSynthesizer = new ResponseSynthesizer({ responseBuilder: new CompactAndRefine( @@ -84,7 +86,7 @@ class QueryEngine_LlamaIndex implements INode { ), serviceContext: vectorStoreRetriever.serviceContext }) - return new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) } else if (responseSynthesizerObj.type === 'Refine') { const responseSynthesizer = new ResponseSynthesizer({ responseBuilder: new Refine( @@ -94,18 +96,16 @@ class QueryEngine_LlamaIndex implements INode { ), serviceContext: vectorStoreRetriever.serviceContext }) - return new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) } else if (responseSynthesizerObj.type === 'SimpleResponseBuilder') { const responseSynthesizer = new ResponseSynthesizer({ responseBuilder: new SimpleResponseBuilder(vectorStoreRetriever.serviceContext), serviceContext: vectorStoreRetriever.serviceContext }) - return new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) + queryEngine = new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer) } } - const queryEngine = new RetrieverQueryEngine(vectorStoreRetriever) - let text = '' let sourceDocuments: ICommonObject[] = [] let sourceNodes: BaseNode[] = [] From 21c47d8049c920631c1c53fb057b696fee319fca Mon Sep 17 00:00:00 2001 From: Henry Date: Sun, 28 Jan 2024 23:46:55 +0000 Subject: [PATCH 05/45] add subquery engine --- .../ChatOpenAI/ChatOpenAI_LlamaIndex.ts | 8 + .../OpenAIEmbedding_LlamaIndex.ts | 25 ++- .../SubQuestionQueryEngine.ts | 193 ++++++++++++++++++ .../SubQuestionQueryEngine/subQueryEngine.svg | 1 + .../tools/QueryEngineTool/QueryEngineTool.ts | 68 ++++++ .../tools/QueryEngineTool/queryEngineTool.svg | 1 + .../Pinecone/Pinecone_LlamaIndex.ts | 37 +++- .../vectorstores/SimpleStore/SimpleStore.ts | 27 ++- .../chatflows/Context Chat Engine.json | 64 +++++- .../marketplaces/chatflows/Query Engine.json | 48 ++++- packages/server/src/utils/index.ts | 2 +- 11 files changed, 453 insertions(+), 21 deletions(-) create mode 100644 packages/components/nodes/engine/SubQuestionQueryEngine/SubQuestionQueryEngine.ts create mode 100644 packages/components/nodes/engine/SubQuestionQueryEngine/subQueryEngine.svg create mode 100644 packages/components/nodes/tools/QueryEngineTool/QueryEngineTool.ts create mode 100644 packages/components/nodes/tools/QueryEngineTool/queryEngineTool.svg diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts index 58b40823..8b3567a6 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI_LlamaIndex.ts @@ -41,6 +41,14 @@ class ChatOpenAI_LlamaIndex_LLMs implements INode { label: 'gpt-4', name: 'gpt-4' }, + { + label: 'gpt-4-turbo-preview', + name: 'gpt-4-turbo-preview' + }, + { + label: 'gpt-4-0125-preview', + name: 'gpt-4-0125-preview' + }, { label: 'gpt-4-1106-preview', name: 'gpt-4-1106-preview' diff --git a/packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts b/packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts index dfd6bbf5..960197fe 100644 --- a/packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts +++ b/packages/components/nodes/embeddings/OpenAIEmbedding/OpenAIEmbedding_LlamaIndex.ts @@ -32,6 +32,27 @@ class OpenAIEmbedding_LlamaIndex_Embeddings implements INode { credentialNames: ['openAIApi'] } this.inputs = [ + { + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [ + { + label: 'text-embedding-3-large', + name: 'text-embedding-3-large' + }, + { + label: 'text-embedding-3-small', + name: 'text-embedding-3-small' + }, + { + label: 'text-embedding-ada-002', + name: 'text-embedding-ada-002' + } + ], + default: 'text-embedding-ada-002', + optional: true + }, { label: 'Timeout', name: 'timeout', @@ -51,12 +72,14 @@ class OpenAIEmbedding_LlamaIndex_Embeddings implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { const timeout = nodeData.inputs?.timeout as string + const modelName = nodeData.inputs?.modelName as string const credentialData = await getCredentialData(nodeData.credential ?? '', options) const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) const obj: Partial = { - apiKey: openAIApiKey + apiKey: openAIApiKey, + model: modelName } if (timeout) obj.timeout = parseInt(timeout, 10) diff --git a/packages/components/nodes/engine/SubQuestionQueryEngine/SubQuestionQueryEngine.ts b/packages/components/nodes/engine/SubQuestionQueryEngine/SubQuestionQueryEngine.ts new file mode 100644 index 00000000..a872c0a2 --- /dev/null +++ b/packages/components/nodes/engine/SubQuestionQueryEngine/SubQuestionQueryEngine.ts @@ -0,0 +1,193 @@ +import { flatten } from 'lodash' +import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { + TreeSummarize, + SimpleResponseBuilder, + Refine, + BaseEmbedding, + ResponseSynthesizer, + CompactAndRefine, + QueryEngineTool, + LLMQuestionGenerator, + SubQuestionQueryEngine, + BaseNode, + Metadata, + serviceContextFromDefaults +} from 'llamaindex' +import { reformatSourceDocuments } from '../EngineUtils' + +class SubQuestionQueryEngine_LlamaIndex implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + tags: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + sessionId?: string + + constructor(fields?: { sessionId?: string }) { + this.label = 'Sub Question Query Engine' + this.name = 'subQuestionQueryEngine' + this.version = 1.0 + this.type = 'SubQuestionQueryEngine' + this.icon = 'subQueryEngine.svg' + this.category = 'Engine' + this.description = + 'Breaks complex query into sub questions for each relevant data source, then gather all the intermediate reponses and synthesizes a final response' + this.baseClasses = [this.type] + this.tags = ['LlamaIndex'] + this.inputs = [ + { + label: 'QueryEngine Tools', + name: 'queryEngineTools', + type: 'QueryEngineTool', + list: true + }, + { + label: 'Chat Model', + name: 'model', + type: 'BaseChatModel_LlamaIndex' + }, + { + label: 'Embeddings', + name: 'embeddings', + type: 'BaseEmbedding_LlamaIndex' + }, + { + label: 'Response Synthesizer', + name: 'responseSynthesizer', + type: 'ResponseSynthesizer', + description: + 'ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See
more', + optional: true + }, + { + label: 'Return Source Documents', + name: 'returnSourceDocuments', + type: 'boolean', + optional: true + } + ] + this.sessionId = fields?.sessionId + } + + async init(): Promise { + return null + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean + const embeddings = nodeData.inputs?.embeddings as BaseEmbedding + const model = nodeData.inputs?.model + + const serviceContext = serviceContextFromDefaults({ + llm: model, + embedModel: embeddings + }) + + let queryEngineTools = nodeData.inputs?.queryEngineTools as QueryEngineTool[] + queryEngineTools = flatten(queryEngineTools) + + let queryEngine = SubQuestionQueryEngine.fromDefaults({ + serviceContext, + queryEngineTools, + questionGen: new LLMQuestionGenerator({ llm: model }) + }) + + const responseSynthesizerObj = nodeData.inputs?.responseSynthesizer + if (responseSynthesizerObj) { + if (responseSynthesizerObj.type === 'TreeSummarize') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new TreeSummarize(serviceContext, responseSynthesizerObj.textQAPromptTemplate), + serviceContext + }) + queryEngine = SubQuestionQueryEngine.fromDefaults({ + responseSynthesizer, + serviceContext, + queryEngineTools, + questionGen: new LLMQuestionGenerator({ llm: model }) + }) + } else if (responseSynthesizerObj.type === 'CompactAndRefine') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new CompactAndRefine( + serviceContext, + responseSynthesizerObj.textQAPromptTemplate, + responseSynthesizerObj.refinePromptTemplate + ), + serviceContext + }) + queryEngine = SubQuestionQueryEngine.fromDefaults({ + responseSynthesizer, + serviceContext, + queryEngineTools, + questionGen: new LLMQuestionGenerator({ llm: model }) + }) + } else if (responseSynthesizerObj.type === 'Refine') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new Refine( + serviceContext, + responseSynthesizerObj.textQAPromptTemplate, + responseSynthesizerObj.refinePromptTemplate + ), + serviceContext + }) + queryEngine = SubQuestionQueryEngine.fromDefaults({ + responseSynthesizer, + serviceContext, + queryEngineTools, + questionGen: new LLMQuestionGenerator({ llm: model }) + }) + } else if (responseSynthesizerObj.type === 'SimpleResponseBuilder') { + const responseSynthesizer = new ResponseSynthesizer({ + responseBuilder: new SimpleResponseBuilder(serviceContext), + serviceContext + }) + queryEngine = SubQuestionQueryEngine.fromDefaults({ + responseSynthesizer, + serviceContext, + queryEngineTools, + questionGen: new LLMQuestionGenerator({ llm: model }) + }) + } + } + + let text = '' + let sourceDocuments: ICommonObject[] = [] + let sourceNodes: BaseNode[] = [] + let isStreamingStarted = false + const isStreamingEnabled = options.socketIO && options.socketIOClientId + + if (isStreamingEnabled) { + const stream = await queryEngine.query({ query: input, stream: true }) + for await (const chunk of stream) { + text += chunk.response + if (chunk.sourceNodes) sourceNodes = chunk.sourceNodes + if (!isStreamingStarted) { + isStreamingStarted = true + options.socketIO.to(options.socketIOClientId).emit('start', chunk.response) + } + + options.socketIO.to(options.socketIOClientId).emit('token', chunk.response) + } + + if (returnSourceDocuments) { + sourceDocuments = reformatSourceDocuments(sourceNodes) + options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', sourceDocuments) + } + } else { + const response = await queryEngine.query({ query: input }) + text = response?.response + sourceDocuments = reformatSourceDocuments(response?.sourceNodes ?? []) + } + + if (returnSourceDocuments) return { text, sourceDocuments } + else return { text } + } +} + +module.exports = { nodeClass: SubQuestionQueryEngine_LlamaIndex } diff --git a/packages/components/nodes/engine/SubQuestionQueryEngine/subQueryEngine.svg b/packages/components/nodes/engine/SubQuestionQueryEngine/subQueryEngine.svg new file mode 100644 index 00000000..b94c20b5 --- /dev/null +++ b/packages/components/nodes/engine/SubQuestionQueryEngine/subQueryEngine.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/tools/QueryEngineTool/QueryEngineTool.ts b/packages/components/nodes/tools/QueryEngineTool/QueryEngineTool.ts new file mode 100644 index 00000000..163eff76 --- /dev/null +++ b/packages/components/nodes/tools/QueryEngineTool/QueryEngineTool.ts @@ -0,0 +1,68 @@ +import { INode, INodeData, INodeParams } from '../../../src/Interface' +import { VectorStoreIndex } from 'llamaindex' + +class QueryEngine_Tools implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + tags: string[] + baseClasses: string[] + inputs?: INodeParams[] + + constructor() { + this.label = 'QueryEngine Tool' + this.name = 'queryEngineToolLlamaIndex' + this.version = 1.0 + this.type = 'QueryEngineTool' + this.icon = 'queryEngineTool.svg' + this.category = 'Tools' + this.tags = ['LlamaIndex'] + this.description = 'Tool used to invoke query engine' + this.baseClasses = [this.type] + this.inputs = [ + { + label: 'Vector Store Index', + name: 'vectorStoreIndex', + type: 'VectorStoreIndex' + }, + { + label: 'Tool Name', + name: 'toolName', + type: 'string', + description: 'Tool name must be small capital letter with underscore. Ex: my_tool' + }, + { + label: 'Tool Description', + name: 'toolDesc', + type: 'string', + rows: 4 + } + ] + } + + async init(nodeData: INodeData): Promise { + const vectorStoreIndex = nodeData.inputs?.vectorStoreIndex as VectorStoreIndex + const toolName = nodeData.inputs?.toolName as string + const toolDesc = nodeData.inputs?.toolDesc as string + const queryEngineTool = { + queryEngine: vectorStoreIndex.asQueryEngine({ + preFilters: { + ...(vectorStoreIndex as any).metadatafilter + } + }), + metadata: { + name: toolName, + description: toolDesc + }, + vectorStoreIndex + } + + return queryEngineTool + } +} + +module.exports = { nodeClass: QueryEngine_Tools } diff --git a/packages/components/nodes/tools/QueryEngineTool/queryEngineTool.svg b/packages/components/nodes/tools/QueryEngineTool/queryEngineTool.svg new file mode 100644 index 00000000..d49d8375 --- /dev/null +++ b/packages/components/nodes/tools/QueryEngineTool/queryEngineTool.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts b/packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts index a584fede..c0b2e5c1 100644 --- a/packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts +++ b/packages/components/nodes/vectorstores/Pinecone/Pinecone_LlamaIndex.ts @@ -13,7 +13,7 @@ import { import { FetchResponse, Index, Pinecone, ScoredPineconeRecord } from '@pinecone-database/pinecone' import { flatten } from 'lodash' import { Document as LCDocument } from 'langchain/document' -import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' import { flattenObject, getCredentialData, getCredentialParam } from '../../../src/utils' class PineconeLlamaIndex_VectorStores implements INode { @@ -28,6 +28,7 @@ class PineconeLlamaIndex_VectorStores implements INode { baseClasses: string[] inputs: INodeParams[] credential: INodeParams + outputs: INodeOutputsValue[] constructor() { this.label = 'Pinecone' @@ -93,6 +94,18 @@ class PineconeLlamaIndex_VectorStores implements INode { optional: true } ] + this.outputs = [ + { + label: 'Pinecone Retriever', + name: 'retriever', + baseClasses: this.baseClasses + }, + { + label: 'Pinecone Vector Store Index', + name: 'vectorStore', + baseClasses: [this.type, 'VectorStoreIndex'] + } + ] } //@ts-ignore @@ -155,8 +168,10 @@ class PineconeLlamaIndex_VectorStores implements INode { } if (pineconeNamespace) obj.namespace = pineconeNamespace + + let metadatafilter = {} if (pineconeMetadataFilter) { - const metadatafilter = typeof pineconeMetadataFilter === 'object' ? pineconeMetadataFilter : JSON.parse(pineconeMetadataFilter) + metadatafilter = typeof pineconeMetadataFilter === 'object' ? pineconeMetadataFilter : JSON.parse(pineconeMetadataFilter) obj.queryFilter = metadatafilter } @@ -171,11 +186,21 @@ class PineconeLlamaIndex_VectorStores implements INode { serviceContext }) - const retriever = index.asRetriever() - retriever.similarityTopK = k - ;(retriever as any).serviceContext = serviceContext + const output = nodeData.outputs?.output as string - return retriever + if (output === 'retriever') { + const retriever = index.asRetriever() + retriever.similarityTopK = k + ;(retriever as any).serviceContext = serviceContext + return retriever + } else if (output === 'vectorStore') { + ;(index as any).k = k + if (metadatafilter) { + ;(index as any).metadatafilter = metadatafilter + } + return index + } + return index } } diff --git a/packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts b/packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts index eeef6f69..36c383e9 100644 --- a/packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts +++ b/packages/components/nodes/vectorstores/SimpleStore/SimpleStore.ts @@ -63,6 +63,18 @@ class SimpleStoreUpsert_LlamaIndex_VectorStores implements INode { optional: true } ] + this.outputs = [ + { + label: 'SimpleStore Retriever', + name: 'retriever', + baseClasses: this.baseClasses + }, + { + label: 'SimpleStore Vector Store Index', + name: 'vectorStore', + baseClasses: [this.type, 'VectorStoreIndex'] + } + ] } //@ts-ignore @@ -114,10 +126,19 @@ class SimpleStoreUpsert_LlamaIndex_VectorStores implements INode { const storageContext = await storageContextFromDefaults({ persistDir: filePath }) const index = await VectorStoreIndex.init({ storageContext, serviceContext }) - const retriever = index.asRetriever() - retriever.similarityTopK = k - return retriever + const output = nodeData.outputs?.output as string + + if (output === 'retriever') { + const retriever = index.asRetriever() + retriever.similarityTopK = k + ;(retriever as any).serviceContext = serviceContext + return retriever + } else if (output === 'vectorStore') { + ;(index as any).k = k + return index + } + return index } } diff --git a/packages/server/marketplaces/chatflows/Context Chat Engine.json b/packages/server/marketplaces/chatflows/Context Chat Engine.json index 7608a550..475c6b3a 100644 --- a/packages/server/marketplaces/chatflows/Context Chat Engine.json +++ b/packages/server/marketplaces/chatflows/Context Chat Engine.json @@ -181,6 +181,28 @@ "credentialNames": ["openAIApi"], "id": "openAIEmbedding_LlamaIndex_0-input-credential-credential" }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "text-embedding-3-large", + "name": "text-embedding-3-large" + }, + { + "label": "text-embedding-3-small", + "name": "text-embedding-3-small" + }, + { + "label": "text-embedding-ada-002", + "name": "text-embedding-ada-002" + } + ], + "default": "text-embedding-ada-002", + "optional": true, + "id": "openAIEmbedding_LlamaIndex_0-input-modelName-options" + }, { "label": "Timeout", "name": "timeout", @@ -315,13 +337,29 @@ }, "outputAnchors": [ { - "id": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever", - "name": "pineconeLlamaIndex", - "label": "Pinecone", - "type": "Pinecone | VectorIndexRetriever" + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorIndexRetriever", + "name": "retriever", + "label": "Pinecone Retriever", + "type": "Pinecone | VectorIndexRetriever" + }, + { + "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorStoreIndex", + "name": "vectorStore", + "label": "Pinecone Vector Store Index", + "type": "Pinecone | VectorStoreIndex" + } + ], + "default": "retriever" } ], - "outputs": {}, + "outputs": { + "output": "retriever" + }, "selected": false }, "selected": false, @@ -367,6 +405,14 @@ "label": "gpt-4", "name": "gpt-4" }, + { + "label": "gpt-4-turbo-preview", + "name": "gpt-4-turbo-preview" + }, + { + "label": "gpt-4-0125-preview", + "name": "gpt-4-0125-preview" + }, { "label": "gpt-4-1106-preview", "name": "gpt-4-1106-preview" @@ -672,6 +718,14 @@ "label": "gpt-4", "name": "gpt-4" }, + { + "label": "gpt-4-turbo-preview", + "name": "gpt-4-turbo-preview" + }, + { + "label": "gpt-4-0125-preview", + "name": "gpt-4-0125-preview" + }, { "label": "gpt-4-1106-preview", "name": "gpt-4-1106-preview" diff --git a/packages/server/marketplaces/chatflows/Query Engine.json b/packages/server/marketplaces/chatflows/Query Engine.json index 625097cc..82553333 100644 --- a/packages/server/marketplaces/chatflows/Query Engine.json +++ b/packages/server/marketplaces/chatflows/Query Engine.json @@ -163,13 +163,29 @@ }, "outputAnchors": [ { - "id": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever", - "name": "pineconeLlamaIndex", - "label": "Pinecone", - "type": "Pinecone | VectorIndexRetriever" + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorIndexRetriever", + "name": "retriever", + "label": "Pinecone Retriever", + "type": "Pinecone | VectorIndexRetriever" + }, + { + "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorStoreIndex", + "name": "vectorStore", + "label": "Pinecone Vector Store Index", + "type": "Pinecone | VectorStoreIndex" + } + ], + "default": "retriever" } ], - "outputs": {}, + "outputs": { + "output": "retriever" + }, "selected": false }, "selected": false, @@ -206,6 +222,28 @@ "credentialNames": ["openAIApi"], "id": "openAIEmbedding_LlamaIndex_0-input-credential-credential" }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "text-embedding-3-large", + "name": "text-embedding-3-large" + }, + { + "label": "text-embedding-3-small", + "name": "text-embedding-3-small" + }, + { + "label": "text-embedding-ada-002", + "name": "text-embedding-ada-002" + } + ], + "default": "text-embedding-ada-002", + "optional": true, + "id": "openAIEmbedding_LlamaIndex_0-input-modelName-options" + }, { "label": "Timeout", "name": "timeout", diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index d0063343..9a14429d 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -844,7 +844,7 @@ export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNod const whitelistAgents = ['openAIFunctionAgent', 'csvAgent', 'airtableAgent', 'conversationalRetrievalAgent'] isValidChainOrAgent = whitelistAgents.includes(endingNodeData.name) } else if (endingNodeData.category === 'Engine') { - const whitelistEngine = ['contextChatEngine', 'simpleChatEngine', 'queryEngine'] + const whitelistEngine = ['contextChatEngine', 'simpleChatEngine', 'queryEngine', 'subQuestionQueryEngine'] isValidChainOrAgent = whitelistEngine.includes(endingNodeData.name) } From 8d62adec2fbb8449ce650b0489e5ba00c7101326 Mon Sep 17 00:00:00 2001 From: melon Date: Thu, 1 Feb 2024 14:19:39 +0800 Subject: [PATCH 06/45] Refactor session ID assignment in App class --- packages/server/src/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 045e40dd..d58660f0 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1440,7 +1440,7 @@ export class App { chatType, chatId, memoryType: memoryType ?? (chatId ? IsNull() : undefined), - sessionId: sessionId ?? (chatId ? IsNull() : undefined), + sessionId: sessionId ?? undefined, createdDate: toDate && fromDate ? Between(fromDate, toDate) : undefined }, order: { From 6013743705cbc98ef4ce819b013f9314add66a16 Mon Sep 17 00:00:00 2001 From: Henry Date: Sat, 3 Feb 2024 02:14:43 +0000 Subject: [PATCH 07/45] add subquestion query engine marketplace template --- .../chatflows/SubQuestion Query Engine.json | 1201 +++++++++++++++++ 1 file changed, 1201 insertions(+) create mode 100644 packages/server/marketplaces/chatflows/SubQuestion Query Engine.json diff --git a/packages/server/marketplaces/chatflows/SubQuestion Query Engine.json b/packages/server/marketplaces/chatflows/SubQuestion Query Engine.json new file mode 100644 index 00000000..f14607da --- /dev/null +++ b/packages/server/marketplaces/chatflows/SubQuestion Query Engine.json @@ -0,0 +1,1201 @@ +{ + "description": "Breaks down query into sub questions for each relevant data source, then combine into final response", + "badge": "NEW", + "nodes": [ + { + "width": 300, + "height": 749, + "id": "compactrefineLlamaIndex_0", + "position": { + "x": -1214.7329938486841, + "y": 56.52482754447425 + }, + "type": "customNode", + "data": { + "id": "compactrefineLlamaIndex_0", + "label": "Compact and Refine", + "version": 1, + "name": "compactrefineLlamaIndex", + "type": "CompactRefine", + "baseClasses": ["CompactRefine", "ResponseSynthesizer"], + "tags": ["LlamaIndex"], + "category": "Response Synthesizer", + "description": "CompactRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.", + "inputParams": [ + { + "label": "Refine Prompt", + "name": "refinePrompt", + "type": "string", + "rows": 4, + "default": "The original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:", + "warning": "Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}", + "optional": true, + "id": "compactrefineLlamaIndex_0-input-refinePrompt-string" + }, + { + "label": "Text QA Prompt", + "name": "textQAPrompt", + "type": "string", + "rows": 4, + "default": "Context information is below.\n---------------------\n{context}\n---------------------\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}\nAnswer:", + "warning": "Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}", + "optional": true, + "id": "compactrefineLlamaIndex_0-input-textQAPrompt-string" + } + ], + "inputAnchors": [], + "inputs": { + "refinePrompt": "A user has selected a set of SEC filing documents and has asked a question about them.\nThe SEC documents have the following titles:\n- Apple Inc (APPL) FORM 10K 2022\n- Tesla Inc (TSLA) FORM 10K 2022\nThe original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:", + "textQAPrompt": "A user has selected a set of SEC filing documents and has asked a question about them.\nThe SEC documents have the following titles:\n- Apple Inc (APPL) FORM 10K 2022\n- Tesla Inc (TSLA) FORM 10K 2022\nContext information is below.\n---------------------\n{context}\n---------------------\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}\nAnswer:" + }, + "outputAnchors": [ + { + "id": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer", + "name": "compactrefineLlamaIndex", + "label": "CompactRefine", + "type": "CompactRefine | ResponseSynthesizer" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": -1214.7329938486841, + "y": 56.52482754447425 + }, + "dragging": false + }, + { + "width": 300, + "height": 611, + "id": "pineconeLlamaIndex_0", + "position": { + "x": 37.23548045607484, + "y": -119.7364648743818 + }, + "type": "customNode", + "data": { + "id": "pineconeLlamaIndex_0", + "label": "Pinecone", + "version": 1, + "name": "pineconeLlamaIndex", + "type": "Pinecone", + "baseClasses": ["Pinecone", "VectorIndexRetriever"], + "tags": ["LlamaIndex"], + "category": "Vector Stores", + "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["pineconeApi"], + "id": "pineconeLlamaIndex_0-input-credential-credential" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string", + "id": "pineconeLlamaIndex_0-input-pineconeIndex-string" + }, + { + "label": "Pinecone Namespace", + "name": "pineconeNamespace", + "type": "string", + "placeholder": "my-first-namespace", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-pineconeNamespace-string" + }, + { + "label": "Pinecone Metadata Filter", + "name": "pineconeMetadataFilter", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "pineconeLlamaIndex_0-input-pineconeMetadataFilter-json" + }, + { + "label": "Top K", + "name": "topK", + "description": "Number of top results to fetch. Default to 4", + "placeholder": "4", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-topK-number" + } + ], + "inputAnchors": [ + { + "label": "Document", + "name": "document", + "type": "Document", + "list": true, + "optional": true, + "id": "pineconeLlamaIndex_0-input-document-Document" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "BaseEmbedding_LlamaIndex", + "id": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex" + } + ], + "inputs": { + "document": [], + "model": "{{chatOpenAI_LlamaIndex_0.data.instance}}", + "embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}", + "pineconeIndex": "flowiseindex", + "pineconeNamespace": "pinecone-form10k", + "pineconeMetadataFilter": "{\"source\":\"tesla\"}", + "topK": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorIndexRetriever", + "name": "retriever", + "label": "Pinecone Retriever", + "type": "Pinecone | VectorIndexRetriever" + }, + { + "id": "pineconeLlamaIndex_0-output-vectorStore-Pinecone|VectorStoreIndex", + "name": "vectorStore", + "label": "Pinecone Vector Store Index", + "type": "Pinecone | VectorStoreIndex" + } + ], + "default": "retriever" + } + ], + "outputs": { + "output": "vectorStore" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 37.23548045607484, + "y": -119.7364648743818 + }, + "dragging": false + }, + { + "width": 300, + "height": 529, + "id": "chatOpenAI_LlamaIndex_0", + "position": { + "x": -455.232655468177, + "y": -711.0080711676725 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_LlamaIndex_0", + "label": "ChatOpenAI", + "version": 1, + "name": "chatOpenAI_LlamaIndex", + "type": "ChatOpenAI", + "baseClasses": ["ChatOpenAI", "BaseChatModel_LlamaIndex", "BaseLLM"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around OpenAI Chat LLM specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "chatOpenAI_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_LlamaIndex_0-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_LlamaIndex_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_0-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_0-input-topP-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_0-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo-16k", + "temperature": "0", + "maxTokens": "", + "topP": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_LlamaIndex_0-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM", + "name": "chatOpenAI_LlamaIndex", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel_LlamaIndex | BaseLLM" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": -455.232655468177, + "y": -711.0080711676725 + }, + "dragging": false + }, + { + "width": 300, + "height": 334, + "id": "openAIEmbedding_LlamaIndex_0", + "position": { + "x": -451.0082548287243, + "y": -127.15143353229783 + }, + "type": "customNode", + "data": { + "id": "openAIEmbedding_LlamaIndex_0", + "label": "OpenAI Embedding", + "version": 1, + "name": "openAIEmbedding_LlamaIndex", + "type": "OpenAIEmbedding", + "baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"], + "tags": ["LlamaIndex"], + "category": "Embeddings", + "description": "OpenAI Embedding specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "openAIEmbedding_LlamaIndex_0-input-credential-credential" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_0-input-basepath-string" + } + ], + "inputAnchors": [], + "inputs": { + "timeout": "", + "basepath": "" + }, + "outputAnchors": [ + { + "id": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "name": "openAIEmbedding_LlamaIndex", + "label": "OpenAIEmbedding", + "type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": -451.0082548287243, + "y": -127.15143353229783 + } + }, + { + "width": 300, + "height": 511, + "id": "queryEngineToolLlamaIndex_0", + "position": { + "x": 460.37559236135905, + "y": -565.6224030941121 + }, + "type": "customNode", + "data": { + "id": "queryEngineToolLlamaIndex_0", + "label": "QueryEngine Tool", + "version": 1, + "name": "queryEngineToolLlamaIndex", + "type": "QueryEngineTool", + "baseClasses": ["QueryEngineTool"], + "tags": ["LlamaIndex"], + "category": "Tools", + "description": "Execute actions using ChatGPT Plugin Url", + "inputParams": [ + { + "label": "Tool Name", + "name": "toolName", + "type": "string", + "description": "Tool name must be small capital letter with underscore. Ex: my_tool", + "id": "queryEngineToolLlamaIndex_0-input-toolName-string" + }, + { + "label": "Tool Description", + "name": "toolDesc", + "type": "string", + "rows": 4, + "id": "queryEngineToolLlamaIndex_0-input-toolDesc-string" + } + ], + "inputAnchors": [ + { + "label": "Vector Store Index", + "name": "vectorStoreIndex", + "type": "VectorStoreIndex", + "id": "queryEngineToolLlamaIndex_0-input-vectorStoreIndex-VectorStoreIndex" + } + ], + "inputs": { + "vectorStoreIndex": "{{pineconeLlamaIndex_1.data.instance}}", + "toolName": "apple_tool", + "toolDesc": "A SEC Form 10K filing describing the financials of Apple Inc (APPL) for the 2022 time period." + }, + "outputAnchors": [ + { + "id": "queryEngineToolLlamaIndex_0-output-queryEngineToolLlamaIndex-QueryEngineTool", + "name": "queryEngineToolLlamaIndex", + "label": "QueryEngineTool", + "type": "QueryEngineTool" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 460.37559236135905, + "y": -565.6224030941121 + }, + "dragging": false + }, + { + "width": 300, + "height": 611, + "id": "pineconeLlamaIndex_1", + "position": { + "x": 42.17855025460784, + "y": -839.8824444107056 + }, + "type": "customNode", + "data": { + "id": "pineconeLlamaIndex_1", + "label": "Pinecone", + "version": 1, + "name": "pineconeLlamaIndex", + "type": "Pinecone", + "baseClasses": ["Pinecone", "VectorIndexRetriever"], + "tags": ["LlamaIndex"], + "category": "Vector Stores", + "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["pineconeApi"], + "id": "pineconeLlamaIndex_1-input-credential-credential" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string", + "id": "pineconeLlamaIndex_1-input-pineconeIndex-string" + }, + { + "label": "Pinecone Namespace", + "name": "pineconeNamespace", + "type": "string", + "placeholder": "my-first-namespace", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_1-input-pineconeNamespace-string" + }, + { + "label": "Pinecone Metadata Filter", + "name": "pineconeMetadataFilter", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "pineconeLlamaIndex_1-input-pineconeMetadataFilter-json" + }, + { + "label": "Top K", + "name": "topK", + "description": "Number of top results to fetch. Default to 4", + "placeholder": "4", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pineconeLlamaIndex_1-input-topK-number" + } + ], + "inputAnchors": [ + { + "label": "Document", + "name": "document", + "type": "Document", + "list": true, + "optional": true, + "id": "pineconeLlamaIndex_1-input-document-Document" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "pineconeLlamaIndex_1-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "BaseEmbedding_LlamaIndex", + "id": "pineconeLlamaIndex_1-input-embeddings-BaseEmbedding_LlamaIndex" + } + ], + "inputs": { + "document": [], + "model": "{{chatOpenAI_LlamaIndex_0.data.instance}}", + "embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}", + "pineconeIndex": "flowiseindex", + "pineconeNamespace": "pinecone-form10k", + "pineconeMetadataFilter": "{\"source\":\"apple\"}", + "topK": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "pineconeLlamaIndex_1-output-retriever-Pinecone|VectorIndexRetriever", + "name": "retriever", + "label": "Pinecone Retriever", + "type": "Pinecone | VectorIndexRetriever" + }, + { + "id": "pineconeLlamaIndex_1-output-vectorStore-Pinecone|VectorStoreIndex", + "name": "vectorStore", + "label": "Pinecone Vector Store Index", + "type": "Pinecone | VectorStoreIndex" + } + ], + "default": "retriever" + } + ], + "outputs": { + "output": "vectorStore" + }, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 42.17855025460784, + "y": -839.8824444107056 + }, + "dragging": false + }, + { + "width": 300, + "height": 511, + "id": "queryEngineToolLlamaIndex_1", + "position": { + "x": 462.16721384216123, + "y": -17.750065363429798 + }, + "type": "customNode", + "data": { + "id": "queryEngineToolLlamaIndex_1", + "label": "QueryEngine Tool", + "version": 1, + "name": "queryEngineToolLlamaIndex", + "type": "QueryEngineTool", + "baseClasses": ["QueryEngineTool"], + "tags": ["LlamaIndex"], + "category": "Tools", + "description": "Execute actions using ChatGPT Plugin Url", + "inputParams": [ + { + "label": "Tool Name", + "name": "toolName", + "type": "string", + "description": "Tool name must be small capital letter with underscore. Ex: my_tool", + "id": "queryEngineToolLlamaIndex_1-input-toolName-string" + }, + { + "label": "Tool Description", + "name": "toolDesc", + "type": "string", + "rows": 4, + "id": "queryEngineToolLlamaIndex_1-input-toolDesc-string" + } + ], + "inputAnchors": [ + { + "label": "Vector Store Index", + "name": "vectorStoreIndex", + "type": "VectorStoreIndex", + "id": "queryEngineToolLlamaIndex_1-input-vectorStoreIndex-VectorStoreIndex" + } + ], + "inputs": { + "vectorStoreIndex": "{{pineconeLlamaIndex_0.data.instance}}", + "toolName": "tesla_tool", + "toolDesc": "A SEC Form 10K filing describing the financials of Tesla Inc (TSLA) for the 2022 time period." + }, + "outputAnchors": [ + { + "id": "queryEngineToolLlamaIndex_1-output-queryEngineToolLlamaIndex-QueryEngineTool", + "name": "queryEngineToolLlamaIndex", + "label": "QueryEngineTool", + "type": "QueryEngineTool" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 462.16721384216123, + "y": -17.750065363429798 + }, + "dragging": false + }, + { + "width": 300, + "height": 484, + "id": "subQuestionQueryEngine_0", + "position": { + "x": 982.7583030231563, + "y": 349.50858200305896 + }, + "type": "customNode", + "data": { + "id": "subQuestionQueryEngine_0", + "label": "Sub Question Query Engine", + "version": 1, + "name": "subQuestionQueryEngine", + "type": "SubQuestionQueryEngine", + "baseClasses": ["SubQuestionQueryEngine"], + "tags": ["LlamaIndex"], + "category": "Engine", + "description": "Simple query engine built to answer question over your data, without memory", + "inputParams": [ + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true, + "id": "subQuestionQueryEngine_0-input-returnSourceDocuments-boolean" + } + ], + "inputAnchors": [ + { + "label": "QueryEngine Tools", + "name": "queryEngineTools", + "type": "QueryEngineTool", + "list": true, + "id": "subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool" + }, + { + "label": "Chat Model", + "name": "model", + "type": "BaseChatModel_LlamaIndex", + "id": "subQuestionQueryEngine_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "label": "Embeddings", + "name": "embeddings", + "type": "BaseEmbedding_LlamaIndex", + "id": "subQuestionQueryEngine_0-input-embeddings-BaseEmbedding_LlamaIndex" + }, + { + "label": "Response Synthesizer", + "name": "responseSynthesizer", + "type": "ResponseSynthesizer", + "description": "ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See more", + "optional": true, + "id": "subQuestionQueryEngine_0-input-responseSynthesizer-ResponseSynthesizer" + } + ], + "inputs": { + "queryEngineTools": ["{{queryEngineToolLlamaIndex_1.data.instance}}", "{{queryEngineToolLlamaIndex_0.data.instance}}"], + "model": "{{chatOpenAI_LlamaIndex_1.data.instance}}", + "embeddings": "{{openAIEmbedding_LlamaIndex_1.data.instance}}", + "responseSynthesizer": "{{compactrefineLlamaIndex_0.data.instance}}", + "returnSourceDocuments": true + }, + "outputAnchors": [ + { + "id": "subQuestionQueryEngine_0-output-subQuestionQueryEngine-SubQuestionQueryEngine", + "name": "subQuestionQueryEngine", + "label": "SubQuestionQueryEngine", + "type": "SubQuestionQueryEngine" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 982.7583030231563, + "y": 349.50858200305896 + }, + "dragging": false + }, + { + "width": 300, + "height": 529, + "id": "chatOpenAI_LlamaIndex_1", + "position": { + "x": -846.9087470244615, + "y": 23.446501495097493 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_LlamaIndex_1", + "label": "ChatOpenAI", + "version": 1, + "name": "chatOpenAI_LlamaIndex", + "type": "ChatOpenAI", + "baseClasses": ["ChatOpenAI", "BaseChatModel_LlamaIndex", "BaseLLM"], + "tags": ["LlamaIndex"], + "category": "Chat Models", + "description": "Wrapper around OpenAI Chat LLM specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "chatOpenAI_LlamaIndex_1-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_LlamaIndex_1-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_LlamaIndex_1-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-topP-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_LlamaIndex_1-input-timeout-number" + } + ], + "inputAnchors": [], + "inputs": { + "modelName": "gpt-3.5-turbo-16k", + "temperature": "0", + "maxTokens": "", + "topP": "", + "timeout": "" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM", + "name": "chatOpenAI_LlamaIndex", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel_LlamaIndex | BaseLLM" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": -846.9087470244615, + "y": 23.446501495097493 + }, + "dragging": false + }, + { + "width": 300, + "height": 334, + "id": "openAIEmbedding_LlamaIndex_1", + "position": { + "x": -437.3136244622061, + "y": 329.99986619821175 + }, + "type": "customNode", + "data": { + "id": "openAIEmbedding_LlamaIndex_1", + "label": "OpenAI Embedding", + "version": 1, + "name": "openAIEmbedding_LlamaIndex", + "type": "OpenAIEmbedding", + "baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"], + "tags": ["LlamaIndex"], + "category": "Embeddings", + "description": "OpenAI Embedding specific for LlamaIndex", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "openAIEmbedding_LlamaIndex_1-input-credential-credential" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_1-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "openAIEmbedding_LlamaIndex_1-input-basepath-string" + } + ], + "inputAnchors": [], + "inputs": { + "timeout": "", + "basepath": "" + }, + "outputAnchors": [ + { + "id": "openAIEmbedding_LlamaIndex_1-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "name": "openAIEmbedding_LlamaIndex", + "label": "OpenAIEmbedding", + "type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": -437.3136244622061, + "y": 329.99986619821175 + } + }, + { + "width": 300, + "height": 82, + "id": "stickyNote_0", + "position": { + "x": 35.90892935132143, + "y": -936.1282632923861 + }, + "type": "stickyNote", + "data": { + "id": "stickyNote_0", + "label": "Sticky Note", + "version": 1, + "name": "stickyNote", + "type": "StickyNote", + "baseClasses": ["StickyNote"], + "category": "Utilities", + "description": "Add a sticky note", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNote_0-input-note-string" + } + ], + "inputAnchors": [], + "inputs": { + "note": "Query previously upserted documents with corresponding metadata key value pair - \n{ source: \"apple\"}" + }, + "outputAnchors": [ + { + "id": "stickyNote_0-output-stickyNote-StickyNote", + "name": "stickyNote", + "label": "StickyNote", + "type": "StickyNote" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 35.90892935132143, + "y": -936.1282632923861 + }, + "dragging": false + }, + { + "width": 300, + "height": 82, + "id": "stickyNote_1", + "position": { + "x": 37.74909394815296, + "y": -215.17456133022054 + }, + "type": "stickyNote", + "data": { + "id": "stickyNote_1", + "label": "Sticky Note", + "version": 1, + "name": "stickyNote", + "type": "StickyNote", + "baseClasses": ["StickyNote"], + "category": "Utilities", + "description": "Add a sticky note", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNote_1-input-note-string" + } + ], + "inputAnchors": [], + "inputs": { + "note": "Query previously upserted documents with corresponding metadata key value pair - \n{ source: \"tesla\"}" + }, + "outputAnchors": [ + { + "id": "stickyNote_1-output-stickyNote-StickyNote", + "name": "stickyNote", + "label": "StickyNote", + "type": "StickyNote" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 37.74909394815296, + "y": -215.17456133022054 + }, + "dragging": false + }, + { + "width": 300, + "height": 163, + "id": "stickyNote_2", + "position": { + "x": 984.9543031068163, + "y": 171.04264459503852 + }, + "type": "stickyNote", + "data": { + "id": "stickyNote_2", + "label": "Sticky Note", + "version": 1, + "name": "stickyNote", + "type": "StickyNote", + "baseClasses": ["StickyNote"], + "category": "Utilities", + "description": "Add a sticky note", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNote_2-input-note-string" + } + ], + "inputAnchors": [], + "inputs": { + "note": "Break questions into subqueries, then retrieve corresponding context using queryengine tool.\n\nThis implementation does not contains memory, we can use OpenAI Agent to function call this flow" + }, + "outputAnchors": [ + { + "id": "stickyNote_2-output-stickyNote-StickyNote", + "name": "stickyNote", + "label": "StickyNote", + "type": "StickyNote" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 984.9543031068163, + "y": 171.04264459503852 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "chatOpenAI_LlamaIndex_0", + "sourceHandle": "chatOpenAI_LlamaIndex_0-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM", + "target": "pineconeLlamaIndex_1", + "targetHandle": "pineconeLlamaIndex_1-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatOpenAI_LlamaIndex_0-chatOpenAI_LlamaIndex_0-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM-pineconeLlamaIndex_1-pineconeLlamaIndex_1-input-model-BaseChatModel_LlamaIndex" + }, + { + "source": "openAIEmbedding_LlamaIndex_0", + "sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "target": "pineconeLlamaIndex_1", + "targetHandle": "pineconeLlamaIndex_1-input-embeddings-BaseEmbedding_LlamaIndex", + "type": "buttonedge", + "id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_1-pineconeLlamaIndex_1-input-embeddings-BaseEmbedding_LlamaIndex" + }, + { + "source": "pineconeLlamaIndex_1", + "sourceHandle": "pineconeLlamaIndex_1-output-vectorStore-Pinecone|VectorStoreIndex", + "target": "queryEngineToolLlamaIndex_0", + "targetHandle": "queryEngineToolLlamaIndex_0-input-vectorStoreIndex-VectorStoreIndex", + "type": "buttonedge", + "id": "pineconeLlamaIndex_1-pineconeLlamaIndex_1-output-vectorStore-Pinecone|VectorStoreIndex-queryEngineToolLlamaIndex_0-queryEngineToolLlamaIndex_0-input-vectorStoreIndex-VectorStoreIndex" + }, + { + "source": "pineconeLlamaIndex_0", + "sourceHandle": "pineconeLlamaIndex_0-output-vectorStore-Pinecone|VectorStoreIndex", + "target": "queryEngineToolLlamaIndex_1", + "targetHandle": "queryEngineToolLlamaIndex_1-input-vectorStoreIndex-VectorStoreIndex", + "type": "buttonedge", + "id": "pineconeLlamaIndex_0-pineconeLlamaIndex_0-output-vectorStore-Pinecone|VectorStoreIndex-queryEngineToolLlamaIndex_1-queryEngineToolLlamaIndex_1-input-vectorStoreIndex-VectorStoreIndex" + }, + { + "source": "queryEngineToolLlamaIndex_1", + "sourceHandle": "queryEngineToolLlamaIndex_1-output-queryEngineToolLlamaIndex-QueryEngineTool", + "target": "subQuestionQueryEngine_0", + "targetHandle": "subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool", + "type": "buttonedge", + "id": "queryEngineToolLlamaIndex_1-queryEngineToolLlamaIndex_1-output-queryEngineToolLlamaIndex-QueryEngineTool-subQuestionQueryEngine_0-subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool" + }, + { + "source": "queryEngineToolLlamaIndex_0", + "sourceHandle": "queryEngineToolLlamaIndex_0-output-queryEngineToolLlamaIndex-QueryEngineTool", + "target": "subQuestionQueryEngine_0", + "targetHandle": "subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool", + "type": "buttonedge", + "id": "queryEngineToolLlamaIndex_0-queryEngineToolLlamaIndex_0-output-queryEngineToolLlamaIndex-QueryEngineTool-subQuestionQueryEngine_0-subQuestionQueryEngine_0-input-queryEngineTools-QueryEngineTool" + }, + { + "source": "chatOpenAI_LlamaIndex_1", + "sourceHandle": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM", + "target": "subQuestionQueryEngine_0", + "targetHandle": "subQuestionQueryEngine_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatOpenAI_LlamaIndex_1-chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM-subQuestionQueryEngine_0-subQuestionQueryEngine_0-input-model-BaseChatModel_LlamaIndex" + }, + { + "source": "openAIEmbedding_LlamaIndex_1", + "sourceHandle": "openAIEmbedding_LlamaIndex_1-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "target": "subQuestionQueryEngine_0", + "targetHandle": "subQuestionQueryEngine_0-input-embeddings-BaseEmbedding_LlamaIndex", + "type": "buttonedge", + "id": "openAIEmbedding_LlamaIndex_1-openAIEmbedding_LlamaIndex_1-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-subQuestionQueryEngine_0-subQuestionQueryEngine_0-input-embeddings-BaseEmbedding_LlamaIndex" + }, + { + "source": "compactrefineLlamaIndex_0", + "sourceHandle": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer", + "target": "subQuestionQueryEngine_0", + "targetHandle": "subQuestionQueryEngine_0-input-responseSynthesizer-ResponseSynthesizer", + "type": "buttonedge", + "id": "compactrefineLlamaIndex_0-compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer-subQuestionQueryEngine_0-subQuestionQueryEngine_0-input-responseSynthesizer-ResponseSynthesizer" + }, + { + "source": "openAIEmbedding_LlamaIndex_0", + "sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex", + "type": "buttonedge", + "id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex" + }, + { + "source": "chatOpenAI_LlamaIndex_0", + "sourceHandle": "chatOpenAI_LlamaIndex_0-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM", + "target": "pineconeLlamaIndex_0", + "targetHandle": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex", + "type": "buttonedge", + "id": "chatOpenAI_LlamaIndex_0-chatOpenAI_LlamaIndex_0-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex|BaseLLM-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex" + } + ] +} From 288e451161fa206265d5830cb583f0b18f13307c Mon Sep 17 00:00:00 2001 From: Kenny Vaneetvelde Date: Sat, 3 Feb 2024 19:43:44 +0100 Subject: [PATCH 08/45] Improve flexibility of the structured output parser by allowing the user to input an example JSON and automatically converting it to a zod scheme --- .../StructuredOutputParser.ts | 48 +++----- packages/components/package.json | 1 + .../chatflows/Structured Output Parser.json | 114 +++++++----------- 3 files changed, 64 insertions(+), 99 deletions(-) diff --git a/packages/components/nodes/outputparsers/StructuredOutputParser/StructuredOutputParser.ts b/packages/components/nodes/outputparsers/StructuredOutputParser/StructuredOutputParser.ts index fc28fd1c..849d825d 100644 --- a/packages/components/nodes/outputparsers/StructuredOutputParser/StructuredOutputParser.ts +++ b/packages/components/nodes/outputparsers/StructuredOutputParser/StructuredOutputParser.ts @@ -1,8 +1,9 @@ -import { convertSchemaToZod, getBaseClasses, INode, INodeData, INodeParams } from '../../../src' +import { getBaseClasses, INode, INodeData, INodeParams } from '../../../src' import { BaseOutputParser } from 'langchain/schema/output_parser' import { StructuredOutputParser as LangchainStructuredOutputParser } from 'langchain/output_parsers' import { CATEGORY } from '../OutputParserHelpers' import { z } from 'zod' +import { jsonToZod } from 'json-to-zod' class StructuredOutputParser implements INode { label: string @@ -34,44 +35,31 @@ class StructuredOutputParser implements INode { description: 'In the event that the first call fails, will make another call to the model to fix any errors.' }, { - label: 'JSON Structure', - name: 'jsonStructure', - type: 'datagrid', - description: 'JSON structure for LLM to return', - datagrid: [ - { field: 'property', headerName: 'Property', editable: true }, - { - field: 'type', - headerName: 'Type', - type: 'singleSelect', - valueOptions: ['string', 'number', 'boolean'], - editable: true - }, - { field: 'description', headerName: 'Description', editable: true, flex: 1 } - ], - default: [ - { - property: 'answer', - type: 'string', - description: `answer to the user's question` - }, - { - property: 'source', - type: 'string', - description: `sources used to answer the question, should be websites` - } - ], + label: 'Example JSON', + name: 'exampleJson', + type: 'string', + description: 'Example JSON structure for LLM to return', + rows: 10, + default: '{"answer": "the answer", "followupQuestions": ["question1", "question2"]}', additionalParams: true } ] } async init(nodeData: INodeData): Promise { - const jsonStructure = nodeData.inputs?.jsonStructure as string + const exampleJson = nodeData.inputs?.exampleJson as string const autoFix = nodeData.inputs?.autofixParser as boolean + const jsonToZodString = jsonToZod(JSON.parse(exampleJson)) + const splitString = jsonToZodString.split('const schema = ') + const schemaString = splitString[1].trim() + + const fnString = `function proxyFn(z){ return ${schemaString} }` + const zodSchemaFunction = new Function('z', `return ${schemaString}`) + const zodSchema = zodSchemaFunction(z) + try { - const structuredOutputParser = LangchainStructuredOutputParser.fromZodSchema(z.object(convertSchemaToZod(jsonStructure))) + const structuredOutputParser = LangchainStructuredOutputParser.fromZodSchema(zodSchema) // NOTE: When we change Flowise to return a json response, the following has to be changed to: JsonStructuredOutputParser Object.defineProperty(structuredOutputParser, 'autoFix', { diff --git a/packages/components/package.json b/packages/components/package.json index bcb746b0..2efb2143 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -56,6 +56,7 @@ "html-to-text": "^9.0.5", "husky": "^8.0.3", "ioredis": "^5.3.2", + "json-to-zod": "1.1.2", "langchain": "^0.0.214", "langfuse": "2.0.2", "langfuse-langchain": "2.3.3", diff --git a/packages/server/marketplaces/chatflows/Structured Output Parser.json b/packages/server/marketplaces/chatflows/Structured Output Parser.json index 92336443..93bb96bb 100644 --- a/packages/server/marketplaces/chatflows/Structured Output Parser.json +++ b/packages/server/marketplaces/chatflows/Structured Output Parser.json @@ -4,7 +4,7 @@ "nodes": [ { "width": 300, - "height": 574, + "height": 576, "id": "chatOpenAI_0", "position": { "x": 845.3961479115309, @@ -17,7 +17,12 @@ "version": 3, "name": "chatOpenAI", "type": "ChatOpenAI", - "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], + "baseClasses": [ + "ChatOpenAI", + "BaseChatModel", + "BaseLanguageModel", + "Runnable" + ], "category": "Chat Models", "description": "Wrapper around OpenAI large language models that use the Chat endpoint", "inputParams": [ @@ -25,7 +30,9 @@ "label": "Connect Credential", "name": "credential", "type": "credential", - "credentialNames": ["openAIApi"], + "credentialNames": [ + "openAIApi" + ], "id": "chatOpenAI_0-input-credential-credential" }, { @@ -202,7 +209,7 @@ }, { "width": 300, - "height": 456, + "height": 508, "id": "llmChain_0", "position": { "x": 1229.1699649849293, @@ -215,7 +222,11 @@ "version": 3, "name": "llmChain", "type": "LLMChain", - "baseClasses": ["LLMChain", "BaseChain", "Runnable"], + "baseClasses": [ + "LLMChain", + "BaseChain", + "Runnable" + ], "category": "Chains", "description": "Chain to run queries against LLMs", "inputParams": [ @@ -300,7 +311,7 @@ }, { "width": 300, - "height": 652, + "height": 690, "id": "chatPromptTemplate_0", "position": { "x": 501.1597501123828, @@ -313,7 +324,12 @@ "version": 1, "name": "chatPromptTemplate", "type": "ChatPromptTemplate", - "baseClasses": ["ChatPromptTemplate", "BaseChatPromptTemplate", "BasePromptTemplate", "Runnable"], + "baseClasses": [ + "ChatPromptTemplate", + "BaseChatPromptTemplate", + "BasePromptTemplate", + "Runnable" + ], "category": "Prompts", "description": "Schema to represent a chat prompt", "inputParams": [ @@ -369,11 +385,11 @@ }, { "width": 300, - "height": 328, + "height": 329, "id": "structuredOutputParser_0", "position": { - "x": 170.3869571939727, - "y": 343.9298288967859 + "x": 498.2326128526694, + "y": 566.5473204649535 }, "type": "customNode", "data": { @@ -382,7 +398,11 @@ "version": 1, "name": "structuredOutputParser", "type": "StructuredOutputParser", - "baseClasses": ["StructuredOutputParser", "BaseLLMOutputParser", "Runnable"], + "baseClasses": [ + "StructuredOutputParser", + "BaseLLMOutputParser", + "Runnable" + ], "category": "Output Parsers", "description": "Parse the output of an LLM call into a given (JSON) structure.", "inputParams": [ @@ -395,61 +415,20 @@ "id": "structuredOutputParser_0-input-autofixParser-boolean" }, { - "label": "JSON Structure", - "name": "jsonStructure", - "type": "datagrid", - "description": "JSON structure for LLM to return", - "datagrid": [ - { - "field": "property", - "headerName": "Property", - "editable": true - }, - { - "field": "type", - "headerName": "Type", - "type": "singleSelect", - "valueOptions": ["string", "number", "boolean"], - "editable": true - }, - { - "field": "description", - "headerName": "Description", - "editable": true, - "flex": 1 - } - ], - "default": [ - { - "property": "answer", - "type": "string", - "description": "answer to the user's question" - }, - { - "property": "source", - "type": "string", - "description": "sources used to answer the question, should be websites" - } - ], + "label": "Example JSON", + "name": "exampleJson", + "type": "string", + "description": "Example JSON structure for LLM to return", + "rows": 10, + "default": "{\"answer\": \"the answer\", \"followupQuestions\": [\"question1\", \"question2\"]}", "additionalParams": true, - "id": "structuredOutputParser_0-input-jsonStructure-datagrid" + "id": "structuredOutputParser_0-input-exampleJson-string" } ], "inputAnchors": [], "inputs": { "autofixParser": true, - "jsonStructure": [ - { - "property": "answer", - "type": "string", - "description": "answer to the user's question" - }, - { - "property": "source", - "type": "string", - "description": "sources used to answer the question, should be websites" - } - ] + "exampleJson": "{\"answer\": \"the answer\", \"followupQuestions\": [\"question1\", \"question2\"]}" }, "outputAnchors": [ { @@ -463,11 +442,11 @@ "selected": false }, "selected": false, + "dragging": false, "positionAbsolute": { - "x": 170.3869571939727, - "y": 343.9298288967859 - }, - "dragging": false + "x": 498.2326128526694, + "y": 566.5473204649535 + } } ], "edges": [ @@ -499,10 +478,7 @@ "target": "llmChain_0", "targetHandle": "llmChain_0-input-outputParser-BaseLLMOutputParser", "type": "buttonedge", - "id": "structuredOutputParser_0-structuredOutputParser_0-output-structuredOutputParser-StructuredOutputParser|BaseLLMOutputParser|Runnable-llmChain_0-llmChain_0-input-outputParser-BaseLLMOutputParser", - "data": { - "label": "" - } + "id": "structuredOutputParser_0-structuredOutputParser_0-output-structuredOutputParser-StructuredOutputParser|BaseLLMOutputParser|Runnable-llmChain_0-llmChain_0-input-outputParser-BaseLLMOutputParser" } ] -} +} \ No newline at end of file From 95b251f02acfdb6e55f2d9f511732b262e0c0370 Mon Sep 17 00:00:00 2001 From: Kenny Vaneetvelde Date: Sat, 3 Feb 2024 21:22:49 +0100 Subject: [PATCH 09/45] Make it a separate node instead --- .../StructuredOutputParserAdvanced.ts | 80 +++ .../structure.svg | 8 + packages/components/package.json | 1 - .../Advanced Structured Output Parser.json | 463 ++++++++++++++++++ 4 files changed, 551 insertions(+), 1 deletion(-) create mode 100644 packages/components/nodes/outputparsers/StructuredOutputParserAdvanced/StructuredOutputParserAdvanced.ts create mode 100644 packages/components/nodes/outputparsers/StructuredOutputParserAdvanced/structure.svg create mode 100644 packages/server/marketplaces/chatflows/Advanced Structured Output Parser.json diff --git a/packages/components/nodes/outputparsers/StructuredOutputParserAdvanced/StructuredOutputParserAdvanced.ts b/packages/components/nodes/outputparsers/StructuredOutputParserAdvanced/StructuredOutputParserAdvanced.ts new file mode 100644 index 00000000..b0fad136 --- /dev/null +++ b/packages/components/nodes/outputparsers/StructuredOutputParserAdvanced/StructuredOutputParserAdvanced.ts @@ -0,0 +1,80 @@ +import { getBaseClasses, INode, INodeData, INodeParams } from '../../../src' +import { BaseOutputParser } from 'langchain/schema/output_parser' +import { StructuredOutputParser as LangchainStructuredOutputParser } from 'langchain/output_parsers' +import { CATEGORY } from '../OutputParserHelpers' +import { z } from 'zod' + +class AdvancedStructuredOutputParser implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + inputs: INodeParams[] + credential: INodeParams + + constructor() { + this.label = 'Advanced Structured Output Parser' + this.name = 'advancedStructuredOutputParser' + this.version = 1.0 + this.type = 'AdvancedStructuredOutputParser' + this.description = 'Parse the output of an LLM call into a given structure by providing a Zod schema.' + this.icon = 'structure.svg' + this.category = CATEGORY + this.baseClasses = [this.type, ...getBaseClasses(BaseOutputParser)] + this.inputs = [ + { + label: 'Autofix', + name: 'autofixParser', + type: 'boolean', + optional: true, + description: 'In the event that the first call fails, will make another call to the model to fix any errors.' + }, + { + label: 'Example JSON', + name: 'exampleJson', + type: 'string', + description: 'Zod schema for the output of the model', + rows: 10, + default: `z.object({ + title: z.string(), // Title of the movie as a string + yearOfRelease: z.number().int(), // Release year as an integer number, + genres: z.enum([ + "Action", "Comedy", "Drama", "Fantasy", "Horror", + "Mystery", "Romance", "Science Fiction", "Thriller", "Documentary" + ]).array().max(2), // Array of genres, max of 2 from the defined enum + shortDescription: z.string().max(500) // Short description, max 150 characters +})`, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData): Promise { + const schemaString = nodeData.inputs?.exampleJson as string + const autoFix = nodeData.inputs?.autofixParser as boolean + + const zodSchemaFunction = new Function('z', `return ${schemaString}`) + const zodSchema = zodSchemaFunction(z) + + try { + const structuredOutputParser = LangchainStructuredOutputParser.fromZodSchema(zodSchema) + + // NOTE: When we change Flowise to return a json response, the following has to be changed to: JsonStructuredOutputParser + Object.defineProperty(structuredOutputParser, 'autoFix', { + enumerable: true, + configurable: true, + writable: true, + value: autoFix + }) + return structuredOutputParser + } catch (exception) { + throw new Error('Error parsing Zod Schema: ' + exception) + } + } +} + +module.exports = { nodeClass: AdvancedStructuredOutputParser } diff --git a/packages/components/nodes/outputparsers/StructuredOutputParserAdvanced/structure.svg b/packages/components/nodes/outputparsers/StructuredOutputParserAdvanced/structure.svg new file mode 100644 index 00000000..3875982d --- /dev/null +++ b/packages/components/nodes/outputparsers/StructuredOutputParserAdvanced/structure.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/packages/components/package.json b/packages/components/package.json index 2efb2143..bcb746b0 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -56,7 +56,6 @@ "html-to-text": "^9.0.5", "husky": "^8.0.3", "ioredis": "^5.3.2", - "json-to-zod": "1.1.2", "langchain": "^0.0.214", "langfuse": "2.0.2", "langfuse-langchain": "2.3.3", diff --git a/packages/server/marketplaces/chatflows/Advanced Structured Output Parser.json b/packages/server/marketplaces/chatflows/Advanced Structured Output Parser.json new file mode 100644 index 00000000..41cbb08f --- /dev/null +++ b/packages/server/marketplaces/chatflows/Advanced Structured Output Parser.json @@ -0,0 +1,463 @@ +{ + "nodes": [ + { + "width": 300, + "height": 508, + "id": "llmChain_0", + "position": { + "x": 1229.1699649849293, + "y": 245.55173505632646 + }, + "type": "customNode", + "data": { + "id": "llmChain_0", + "label": "LLM Chain", + "version": 3, + "name": "llmChain", + "type": "LLMChain", + "baseClasses": ["LLMChain", "BaseChain", "Runnable"], + "category": "Chains", + "description": "Chain to run queries against LLMs", + "inputParams": [ + { + "label": "Chain Name", + "name": "chainName", + "type": "string", + "placeholder": "Name Your Chain", + "optional": true, + "id": "llmChain_0-input-chainName-string" + } + ], + "inputAnchors": [ + { + "label": "Language Model", + "name": "model", + "type": "BaseLanguageModel", + "id": "llmChain_0-input-model-BaseLanguageModel" + }, + { + "label": "Prompt", + "name": "prompt", + "type": "BasePromptTemplate", + "id": "llmChain_0-input-prompt-BasePromptTemplate" + }, + { + "label": "Output Parser", + "name": "outputParser", + "type": "BaseLLMOutputParser", + "optional": true, + "id": "llmChain_0-input-outputParser-BaseLLMOutputParser" + }, + { + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "llmChain_0-input-inputModeration-Moderation" + } + ], + "inputs": { + "model": "{{chatOpenAI_0.data.instance}}", + "prompt": "{{chatPromptTemplate_0.data.instance}}", + "outputParser": "{{advancedStructuredOutputParser_0.data.instance}}", + "chainName": "", + "inputModeration": "" + }, + "outputAnchors": [ + { + "name": "output", + "label": "Output", + "type": "options", + "options": [ + { + "id": "llmChain_0-output-llmChain-LLMChain|BaseChain|Runnable", + "name": "llmChain", + "label": "LLM Chain", + "type": "LLMChain | BaseChain | Runnable" + }, + { + "id": "llmChain_0-output-outputPrediction-string|json", + "name": "outputPrediction", + "label": "Output Prediction", + "type": "string | json" + } + ], + "default": "llmChain" + } + ], + "outputs": { + "output": "llmChain" + }, + "selected": false + }, + "positionAbsolute": { + "x": 1229.1699649849293, + "y": 245.55173505632646 + }, + "selected": false + }, + { + "width": 300, + "height": 690, + "id": "chatPromptTemplate_0", + "position": { + "x": 493.26582927222483, + "y": -156.20470841335592 + }, + "type": "customNode", + "data": { + "id": "chatPromptTemplate_0", + "label": "Chat Prompt Template", + "version": 1, + "name": "chatPromptTemplate", + "type": "ChatPromptTemplate", + "baseClasses": ["ChatPromptTemplate", "BaseChatPromptTemplate", "BasePromptTemplate", "Runnable"], + "category": "Prompts", + "description": "Schema to represent a chat prompt", + "inputParams": [ + { + "label": "System Message", + "name": "systemMessagePrompt", + "type": "string", + "rows": 4, + "placeholder": "You are a helpful assistant that translates {input_language} to {output_language}.", + "id": "chatPromptTemplate_0-input-systemMessagePrompt-string" + }, + { + "label": "Human Message", + "name": "humanMessagePrompt", + "type": "string", + "rows": 4, + "placeholder": "{text}", + "id": "chatPromptTemplate_0-input-humanMessagePrompt-string" + }, + { + "label": "Format Prompt Values", + "name": "promptValues", + "type": "json", + "optional": true, + "acceptVariable": true, + "list": true, + "id": "chatPromptTemplate_0-input-promptValues-json" + } + ], + "inputAnchors": [], + "inputs": { + "systemMessagePrompt": "This AI is designed to only output information in JSON format without exception. This AI can only output JSON and will never output any other text.\n\nWhen asked to correct itself, this AI will only output the corrected JSON and never any other text.", + "humanMessagePrompt": "{text}", + "promptValues": "" + }, + "outputAnchors": [ + { + "id": "chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable", + "name": "chatPromptTemplate", + "label": "ChatPromptTemplate", + "type": "ChatPromptTemplate | BaseChatPromptTemplate | BasePromptTemplate | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 493.26582927222483, + "y": -156.20470841335592 + }, + "dragging": false + }, + { + "width": 300, + "height": 329, + "id": "advancedStructuredOutputParser_0", + "position": { + "x": 494.20163170226266, + "y": 568.3420937517054 + }, + "type": "customNode", + "data": { + "id": "advancedStructuredOutputParser_0", + "label": "Advanced Structured Output Parser", + "version": 1, + "name": "advancedStructuredOutputParser", + "type": "AdvancedStructuredOutputParser", + "baseClasses": ["AdvancedStructuredOutputParser", "BaseLLMOutputParser", "Runnable"], + "category": "Output Parsers", + "description": "Parse the output of an LLM call into a given structure by providing a Zod schema.", + "inputParams": [ + { + "label": "Autofix", + "name": "autofixParser", + "type": "boolean", + "optional": true, + "description": "In the event that the first call fails, will make another call to the model to fix any errors.", + "id": "advancedStructuredOutputParser_0-input-autofixParser-boolean" + }, + { + "label": "Example JSON", + "name": "exampleJson", + "type": "string", + "description": "Zod schema for the output of the model", + "rows": 10, + "default": "z.object({\n title: z.string(), // Title of the movie as a string\n yearOfRelease: z.number().int(), // Release year as an integer number,\n genres: z.enum([\n \"Action\", \"Comedy\", \"Drama\", \"Fantasy\", \"Horror\",\n \"Mystery\", \"Romance\", \"Science Fiction\", \"Thriller\", \"Documentary\"\n ]).array().max(2), // Array of genres, max of 2 from the defined enum\n shortDescription: z.string().max(500) // Short description, max 150 characters\n})", + "additionalParams": true, + "id": "advancedStructuredOutputParser_0-input-exampleJson-string" + } + ], + "inputAnchors": [], + "inputs": { + "autofixParser": true, + "exampleJson": "z.object({\n title: z.string(), // Title of the movie as a string\n yearOfRelease: z.number().int(), // Release year as an integer number,\n genres: z.enum([\n \"Action\", \"Comedy\", \"Drama\", \"Fantasy\", \"Horror\",\n \"Mystery\", \"Romance\", \"Science Fiction\", \"Thriller\", \"Documentary\"\n ]).array().max(2), // Array of genres, max of 2 from the defined enum\n shortDescription: z.string().max(500) // Short description, max 150 characters\n})" + }, + "outputAnchors": [ + { + "id": "advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable", + "name": "advancedStructuredOutputParser", + "label": "AdvancedStructuredOutputParser", + "type": "AdvancedStructuredOutputParser | BaseLLMOutputParser | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 494.20163170226266, + "y": 568.3420937517054 + }, + "dragging": false + }, + { + "width": 300, + "height": 576, + "id": "chatOpenAI_0", + "position": { + "x": 860.555928011636, + "y": -355.71028569475095 + }, + "type": "customNode", + "data": { + "id": "chatOpenAI_0", + "label": "ChatOpenAI", + "version": 3, + "name": "chatOpenAI", + "type": "ChatOpenAI", + "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], + "category": "Chat Models", + "description": "Wrapper around OpenAI large language models that use the Chat endpoint", + "inputParams": [ + { + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["openAIApi"], + "id": "chatOpenAI_0-input-credential-credential" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-turbo-preview", + "name": "gpt-4-turbo-preview" + }, + { + "label": "gpt-4-0125-preview", + "name": "gpt-4-0125-preview" + }, + { + "label": "gpt-4-1106-preview", + "name": "gpt-4-1106-preview" + }, + { + "label": "gpt-4-vision-preview", + "name": "gpt-4-vision-preview" + }, + { + "label": "gpt-4-0613", + "name": "gpt-4-0613" + }, + { + "label": "gpt-4-32k", + "name": "gpt-4-32k" + }, + { + "label": "gpt-4-32k-0613", + "name": "gpt-4-32k-0613" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-0125", + "name": "gpt-3.5-turbo-0125" + }, + { + "label": "gpt-3.5-turbo-1106", + "name": "gpt-3.5-turbo-1106" + }, + { + "label": "gpt-3.5-turbo-0613", + "name": "gpt-3.5-turbo-0613" + }, + { + "label": "gpt-3.5-turbo-16k", + "name": "gpt-3.5-turbo-16k" + }, + { + "label": "gpt-3.5-turbo-16k-0613", + "name": "gpt-3.5-turbo-16k-0613" + } + ], + "default": "gpt-3.5-turbo", + "optional": true, + "id": "chatOpenAI_0-input-modelName-options" + }, + { + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, + "optional": true, + "id": "chatOpenAI_0-input-temperature-number" + }, + { + "label": "Max Tokens", + "name": "maxTokens", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-maxTokens-number" + }, + { + "label": "Top Probability", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-topP-number" + }, + { + "label": "Frequency Penalty", + "name": "frequencyPenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-frequencyPenalty-number" + }, + { + "label": "Presence Penalty", + "name": "presencePenalty", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-presencePenalty-number" + }, + { + "label": "Timeout", + "name": "timeout", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-timeout-number" + }, + { + "label": "BasePath", + "name": "basepath", + "type": "string", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-basepath-string" + }, + { + "label": "BaseOptions", + "name": "baseOptions", + "type": "json", + "optional": true, + "additionalParams": true, + "id": "chatOpenAI_0-input-baseOptions-json" + } + ], + "inputAnchors": [ + { + "label": "Cache", + "name": "cache", + "type": "BaseCache", + "optional": true, + "id": "chatOpenAI_0-input-cache-BaseCache" + } + ], + "inputs": { + "cache": "", + "modelName": "", + "temperature": "0", + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "http://localhost:8901/v1", + "baseOptions": "" + }, + "outputAnchors": [ + { + "id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "name": "chatOpenAI", + "label": "ChatOpenAI", + "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "positionAbsolute": { + "x": 860.555928011636, + "y": -355.71028569475095 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "chatPromptTemplate_0", + "sourceHandle": "chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable", + "target": "llmChain_0", + "targetHandle": "llmChain_0-input-prompt-BasePromptTemplate", + "type": "buttonedge", + "id": "chatPromptTemplate_0-chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable-llmChain_0-llmChain_0-input-prompt-BasePromptTemplate", + "data": { + "label": "" + } + }, + { + "source": "advancedStructuredOutputParser_0", + "sourceHandle": "advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable", + "target": "llmChain_0", + "targetHandle": "llmChain_0-input-outputParser-BaseLLMOutputParser", + "type": "buttonedge", + "id": "advancedStructuredOutputParser_0-advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable-llmChain_0-llmChain_0-input-outputParser-BaseLLMOutputParser" + }, + { + "source": "chatOpenAI_0", + "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", + "target": "llmChain_0", + "targetHandle": "llmChain_0-input-model-BaseLanguageModel", + "type": "buttonedge", + "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-llmChain_0-llmChain_0-input-model-BaseLanguageModel" + } + ] +} From 62f70ab6efe46f6be2d2cddefc2cc12d8d008672 Mon Sep 17 00:00:00 2001 From: Kenny Vaneetvelde Date: Sat, 3 Feb 2024 21:25:26 +0100 Subject: [PATCH 10/45] Revert the changes to the original parser & template --- .../StructuredOutputParser.ts | 48 +++++--- .../chatflows/Structured Output Parser.json | 114 +++++++++++------- 2 files changed, 99 insertions(+), 63 deletions(-) diff --git a/packages/components/nodes/outputparsers/StructuredOutputParser/StructuredOutputParser.ts b/packages/components/nodes/outputparsers/StructuredOutputParser/StructuredOutputParser.ts index 849d825d..fc28fd1c 100644 --- a/packages/components/nodes/outputparsers/StructuredOutputParser/StructuredOutputParser.ts +++ b/packages/components/nodes/outputparsers/StructuredOutputParser/StructuredOutputParser.ts @@ -1,9 +1,8 @@ -import { getBaseClasses, INode, INodeData, INodeParams } from '../../../src' +import { convertSchemaToZod, getBaseClasses, INode, INodeData, INodeParams } from '../../../src' import { BaseOutputParser } from 'langchain/schema/output_parser' import { StructuredOutputParser as LangchainStructuredOutputParser } from 'langchain/output_parsers' import { CATEGORY } from '../OutputParserHelpers' import { z } from 'zod' -import { jsonToZod } from 'json-to-zod' class StructuredOutputParser implements INode { label: string @@ -35,31 +34,44 @@ class StructuredOutputParser implements INode { description: 'In the event that the first call fails, will make another call to the model to fix any errors.' }, { - label: 'Example JSON', - name: 'exampleJson', - type: 'string', - description: 'Example JSON structure for LLM to return', - rows: 10, - default: '{"answer": "the answer", "followupQuestions": ["question1", "question2"]}', + label: 'JSON Structure', + name: 'jsonStructure', + type: 'datagrid', + description: 'JSON structure for LLM to return', + datagrid: [ + { field: 'property', headerName: 'Property', editable: true }, + { + field: 'type', + headerName: 'Type', + type: 'singleSelect', + valueOptions: ['string', 'number', 'boolean'], + editable: true + }, + { field: 'description', headerName: 'Description', editable: true, flex: 1 } + ], + default: [ + { + property: 'answer', + type: 'string', + description: `answer to the user's question` + }, + { + property: 'source', + type: 'string', + description: `sources used to answer the question, should be websites` + } + ], additionalParams: true } ] } async init(nodeData: INodeData): Promise { - const exampleJson = nodeData.inputs?.exampleJson as string + const jsonStructure = nodeData.inputs?.jsonStructure as string const autoFix = nodeData.inputs?.autofixParser as boolean - const jsonToZodString = jsonToZod(JSON.parse(exampleJson)) - const splitString = jsonToZodString.split('const schema = ') - const schemaString = splitString[1].trim() - - const fnString = `function proxyFn(z){ return ${schemaString} }` - const zodSchemaFunction = new Function('z', `return ${schemaString}`) - const zodSchema = zodSchemaFunction(z) - try { - const structuredOutputParser = LangchainStructuredOutputParser.fromZodSchema(zodSchema) + const structuredOutputParser = LangchainStructuredOutputParser.fromZodSchema(z.object(convertSchemaToZod(jsonStructure))) // NOTE: When we change Flowise to return a json response, the following has to be changed to: JsonStructuredOutputParser Object.defineProperty(structuredOutputParser, 'autoFix', { diff --git a/packages/server/marketplaces/chatflows/Structured Output Parser.json b/packages/server/marketplaces/chatflows/Structured Output Parser.json index 93bb96bb..92336443 100644 --- a/packages/server/marketplaces/chatflows/Structured Output Parser.json +++ b/packages/server/marketplaces/chatflows/Structured Output Parser.json @@ -4,7 +4,7 @@ "nodes": [ { "width": 300, - "height": 576, + "height": 574, "id": "chatOpenAI_0", "position": { "x": 845.3961479115309, @@ -17,12 +17,7 @@ "version": 3, "name": "chatOpenAI", "type": "ChatOpenAI", - "baseClasses": [ - "ChatOpenAI", - "BaseChatModel", - "BaseLanguageModel", - "Runnable" - ], + "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], "category": "Chat Models", "description": "Wrapper around OpenAI large language models that use the Chat endpoint", "inputParams": [ @@ -30,9 +25,7 @@ "label": "Connect Credential", "name": "credential", "type": "credential", - "credentialNames": [ - "openAIApi" - ], + "credentialNames": ["openAIApi"], "id": "chatOpenAI_0-input-credential-credential" }, { @@ -209,7 +202,7 @@ }, { "width": 300, - "height": 508, + "height": 456, "id": "llmChain_0", "position": { "x": 1229.1699649849293, @@ -222,11 +215,7 @@ "version": 3, "name": "llmChain", "type": "LLMChain", - "baseClasses": [ - "LLMChain", - "BaseChain", - "Runnable" - ], + "baseClasses": ["LLMChain", "BaseChain", "Runnable"], "category": "Chains", "description": "Chain to run queries against LLMs", "inputParams": [ @@ -311,7 +300,7 @@ }, { "width": 300, - "height": 690, + "height": 652, "id": "chatPromptTemplate_0", "position": { "x": 501.1597501123828, @@ -324,12 +313,7 @@ "version": 1, "name": "chatPromptTemplate", "type": "ChatPromptTemplate", - "baseClasses": [ - "ChatPromptTemplate", - "BaseChatPromptTemplate", - "BasePromptTemplate", - "Runnable" - ], + "baseClasses": ["ChatPromptTemplate", "BaseChatPromptTemplate", "BasePromptTemplate", "Runnable"], "category": "Prompts", "description": "Schema to represent a chat prompt", "inputParams": [ @@ -385,11 +369,11 @@ }, { "width": 300, - "height": 329, + "height": 328, "id": "structuredOutputParser_0", "position": { - "x": 498.2326128526694, - "y": 566.5473204649535 + "x": 170.3869571939727, + "y": 343.9298288967859 }, "type": "customNode", "data": { @@ -398,11 +382,7 @@ "version": 1, "name": "structuredOutputParser", "type": "StructuredOutputParser", - "baseClasses": [ - "StructuredOutputParser", - "BaseLLMOutputParser", - "Runnable" - ], + "baseClasses": ["StructuredOutputParser", "BaseLLMOutputParser", "Runnable"], "category": "Output Parsers", "description": "Parse the output of an LLM call into a given (JSON) structure.", "inputParams": [ @@ -415,20 +395,61 @@ "id": "structuredOutputParser_0-input-autofixParser-boolean" }, { - "label": "Example JSON", - "name": "exampleJson", - "type": "string", - "description": "Example JSON structure for LLM to return", - "rows": 10, - "default": "{\"answer\": \"the answer\", \"followupQuestions\": [\"question1\", \"question2\"]}", + "label": "JSON Structure", + "name": "jsonStructure", + "type": "datagrid", + "description": "JSON structure for LLM to return", + "datagrid": [ + { + "field": "property", + "headerName": "Property", + "editable": true + }, + { + "field": "type", + "headerName": "Type", + "type": "singleSelect", + "valueOptions": ["string", "number", "boolean"], + "editable": true + }, + { + "field": "description", + "headerName": "Description", + "editable": true, + "flex": 1 + } + ], + "default": [ + { + "property": "answer", + "type": "string", + "description": "answer to the user's question" + }, + { + "property": "source", + "type": "string", + "description": "sources used to answer the question, should be websites" + } + ], "additionalParams": true, - "id": "structuredOutputParser_0-input-exampleJson-string" + "id": "structuredOutputParser_0-input-jsonStructure-datagrid" } ], "inputAnchors": [], "inputs": { "autofixParser": true, - "exampleJson": "{\"answer\": \"the answer\", \"followupQuestions\": [\"question1\", \"question2\"]}" + "jsonStructure": [ + { + "property": "answer", + "type": "string", + "description": "answer to the user's question" + }, + { + "property": "source", + "type": "string", + "description": "sources used to answer the question, should be websites" + } + ] }, "outputAnchors": [ { @@ -442,11 +463,11 @@ "selected": false }, "selected": false, - "dragging": false, "positionAbsolute": { - "x": 498.2326128526694, - "y": 566.5473204649535 - } + "x": 170.3869571939727, + "y": 343.9298288967859 + }, + "dragging": false } ], "edges": [ @@ -478,7 +499,10 @@ "target": "llmChain_0", "targetHandle": "llmChain_0-input-outputParser-BaseLLMOutputParser", "type": "buttonedge", - "id": "structuredOutputParser_0-structuredOutputParser_0-output-structuredOutputParser-StructuredOutputParser|BaseLLMOutputParser|Runnable-llmChain_0-llmChain_0-input-outputParser-BaseLLMOutputParser" + "id": "structuredOutputParser_0-structuredOutputParser_0-output-structuredOutputParser-StructuredOutputParser|BaseLLMOutputParser|Runnable-llmChain_0-llmChain_0-input-outputParser-BaseLLMOutputParser", + "data": { + "label": "" + } } ] -} \ No newline at end of file +} From 113415e2c91a24b9d8a46d66f4106430c8fc8338 Mon Sep 17 00:00:00 2001 From: Kenny Vaneetvelde Date: Sat, 3 Feb 2024 21:32:00 +0100 Subject: [PATCH 11/45] remove basepath value from template --- .../chatflows/Advanced Structured Output Parser.json | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/server/marketplaces/chatflows/Advanced Structured Output Parser.json b/packages/server/marketplaces/chatflows/Advanced Structured Output Parser.json index 41cbb08f..a48f5b2a 100644 --- a/packages/server/marketplaces/chatflows/Advanced Structured Output Parser.json +++ b/packages/server/marketplaces/chatflows/Advanced Structured Output Parser.json @@ -1,4 +1,6 @@ { + "description": "Return response as a JSON structure as specified by a Zod schema", + "badge": "NEW", "nodes": [ { "width": 300, @@ -409,7 +411,7 @@ "frequencyPenalty": "", "presencePenalty": "", "timeout": "", - "basepath": "http://localhost:8901/v1", + "basepath": "", "baseOptions": "" }, "outputAnchors": [ From 5da3e3cc3e62efbf9894f4eb46a6e3958d15ab35 Mon Sep 17 00:00:00 2001 From: Darien Kindlund Date: Sun, 4 Feb 2024 12:27:18 -0500 Subject: [PATCH 12/45] Adding proper TLS/SSL support to poolOptions so that the similaritySearchVectorWithScore function actually connects to Postgres via TLS/SSL when specified in the additionalConfig. --- packages/components/nodes/vectorstores/Postgres/Postgres.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/components/nodes/vectorstores/Postgres/Postgres.ts b/packages/components/nodes/vectorstores/Postgres/Postgres.ts index 4e8bae32..375728e8 100644 --- a/packages/components/nodes/vectorstores/Postgres/Postgres.ts +++ b/packages/components/nodes/vectorstores/Postgres/Postgres.ts @@ -253,7 +253,8 @@ const similaritySearchVectorWithScore = async ( port: postgresConnectionOptions.port, user: postgresConnectionOptions.username, password: postgresConnectionOptions.password, - database: postgresConnectionOptions.database + database: postgresConnectionOptions.database, + ssl: postgresConnectionOptions.extra?.ssl } const pool = new Pool(poolOptions) const conn = await pool.connect() From 5543ef3de457c72170e66203db8a09d99a3022e4 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Sun, 4 Feb 2024 14:29:43 -0500 Subject: [PATCH 13/45] Marketplace: Revamped UI --- packages/server/src/index.ts | 44 ++- packages/ui/craco.config.js | 3 +- packages/ui/src/api/marketplaces.js | 4 +- .../ui-component/table/MarketplaceTable.js | 177 +++++++++ packages/ui/src/views/marketplaces/index.js | 351 +++++++++++++----- 5 files changed, 477 insertions(+), 102 deletions(-) create mode 100644 packages/ui/src/ui-component/table/MarketplaceTable.js diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index dbb5717d..8878da94 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1223,7 +1223,6 @@ export class App { // Marketplaces // ---------------------------------------- - // Get all chatflows for marketplaces this.app.get('/api/v1/marketplaces/chatflows', async (req: Request, res: Response) => { const marketplaceDir = path.join(__dirname, '..', 'marketplaces', 'chatflows') const jsonsInDir = fs.readdirSync(marketplaceDir).filter((file) => path.extname(file) === '.json') @@ -1250,6 +1249,49 @@ export class App { return res.json(templates) }) + // Get all chatflows for marketplaces + this.app.get('/api/v1/marketplaces/templates', async (req: Request, res: Response) => { + let marketplaceDir = path.join(__dirname, '..', 'marketplaces', 'chatflows') + let jsonsInDir = fs.readdirSync(marketplaceDir).filter((file) => path.extname(file) === '.json') + let templates: any[] = [] + jsonsInDir.forEach((file, index) => { + const filePath = path.join(__dirname, '..', 'marketplaces', 'chatflows', file) + const fileData = fs.readFileSync(filePath) + const fileDataObj = JSON.parse(fileData.toString()) + const template = { + id: index, + templateName: file.split('.json')[0], + flowData: fileData.toString(), + badge: fileDataObj?.badge, + type: 'Chatflow', + description: fileDataObj?.description || '' + } + templates.push(template) + }) + + marketplaceDir = path.join(__dirname, '..', 'marketplaces', 'tools') + jsonsInDir = fs.readdirSync(marketplaceDir).filter((file) => path.extname(file) === '.json') + jsonsInDir.forEach((file, index) => { + const filePath = path.join(__dirname, '..', 'marketplaces', 'tools', file) + const fileData = fs.readFileSync(filePath) + const fileDataObj = JSON.parse(fileData.toString()) + const template = { + ...fileDataObj, + id: index, + type: 'Tool', + templateName: file.split('.json')[0] + } + templates.push(template) + }) + const FlowiseDocsQnA = templates.find((tmp) => tmp.name === 'Flowise Docs QnA') + const FlowiseDocsQnAIndex = templates.findIndex((tmp) => tmp.name === 'Flowise Docs QnA') + if (FlowiseDocsQnA && FlowiseDocsQnAIndex > 0) { + templates.splice(FlowiseDocsQnAIndex, 1) + templates.unshift(FlowiseDocsQnA) + } + return res.json(templates.sort((a, b) => a.templateName.localeCompare(b.templateName))) + }) + // Get all tools for marketplaces this.app.get('/api/v1/marketplaces/tools', async (req: Request, res: Response) => { const marketplaceDir = path.join(__dirname, '..', 'marketplaces', 'tools') diff --git a/packages/ui/craco.config.js b/packages/ui/craco.config.js index 142305e0..093e5ece 100644 --- a/packages/ui/craco.config.js +++ b/packages/ui/craco.config.js @@ -10,7 +10,8 @@ module.exports = { } } ] - } + }, + ignoreWarnings: [/Failed to parse source map/] // Ignore warnings about source maps } } } diff --git a/packages/ui/src/api/marketplaces.js b/packages/ui/src/api/marketplaces.js index 3fd4ae87..bba914a7 100644 --- a/packages/ui/src/api/marketplaces.js +++ b/packages/ui/src/api/marketplaces.js @@ -2,8 +2,10 @@ import client from './client' const getAllChatflowsMarketplaces = () => client.get('/marketplaces/chatflows') const getAllToolsMarketplaces = () => client.get('/marketplaces/tools') +const getAllTemplatesFromMarketplaces = () => client.get('/marketplaces/templates') export default { getAllChatflowsMarketplaces, - getAllToolsMarketplaces + getAllToolsMarketplaces, + getAllTemplatesFromMarketplaces } diff --git a/packages/ui/src/ui-component/table/MarketplaceTable.js b/packages/ui/src/ui-component/table/MarketplaceTable.js new file mode 100644 index 00000000..135d19e9 --- /dev/null +++ b/packages/ui/src/ui-component/table/MarketplaceTable.js @@ -0,0 +1,177 @@ +import PropTypes from 'prop-types' +import { useNavigate } from 'react-router-dom' +import { styled } from '@mui/material/styles' +import Table from '@mui/material/Table' +import TableBody from '@mui/material/TableBody' +import TableCell, { tableCellClasses } from '@mui/material/TableCell' +import TableContainer from '@mui/material/TableContainer' +import TableHead from '@mui/material/TableHead' +import TableRow from '@mui/material/TableRow' +import Paper from '@mui/material/Paper' +import Chip from '@mui/material/Chip' +import { Button, Typography } from '@mui/material' + +const StyledTableCell = styled(TableCell)(({ theme }) => ({ + [`&.${tableCellClasses.head}`]: { + backgroundColor: theme.palette.common.black, + color: theme.palette.common.white + }, + [`&.${tableCellClasses.body}`]: { + fontSize: 14 + } +})) + +const StyledTableRow = styled(TableRow)(({ theme }) => ({ + '&:nth-of-type(odd)': { + backgroundColor: theme.palette.action.hover + }, + // hide last border + '&:last-child td, &:last-child th': { + border: 0 + } +})) + +export const MarketplaceTable = ({ data, images, filterFunction, filterByBadge, filterByType }) => { + const navigate = useNavigate() + const openTemplate = (selectedTemplate) => { + if (selectedTemplate.flowData) { + goToCanvas(selectedTemplate) + } else { + goToTool(selectedTemplate) + } + } + + const goToTool = (selectedTool) => { + const dialogProp = { + title: selectedTool.templateName, + type: 'TEMPLATE', + data: selectedTool + } + setToolDialogProps(dialogProp) + setShowToolDialog(true) + } + + const goToCanvas = (selectedChatflow) => { + navigate(`/marketplace/${selectedChatflow.id}`, { state: selectedChatflow }) + } + + return ( + <> + + + + + + Name + + + Type + + + Description + + + Nodes + + +   + + + + + {data + .filter(filterByBadge) + .filter(filterByType) + .filter(filterFunction) + .map((row, index) => ( + + + + + + + + {row.type} + + + + {row.description || ''} + + + + {row.type === 'Chatflow' && images[row.id] && ( +
+ {images[row.id] + .slice(0, images[row.id].length > 5 ? 5 : images[row.id].length) + .map((img) => ( +
+ +
+ ))} + {images[row.id].length > 5 && ( + + + {images[row.id].length - 5} More + + )} +
+ )} +
+ + + {row.badge && + row.badge + .split(';') + .map((tag, index) => ( + + ))} + + +
+ ))} +
+
+
+ + ) +} + +MarketplaceTable.propTypes = { + data: PropTypes.array, + images: PropTypes.object, + filterFunction: PropTypes.func, + filterByBadge: PropTypes.func, + filterByType: PropTypes.func +} diff --git a/packages/ui/src/views/marketplaces/index.js b/packages/ui/src/views/marketplaces/index.js index 665341c4..ac018663 100644 --- a/packages/ui/src/views/marketplaces/index.js +++ b/packages/ui/src/views/marketplaces/index.js @@ -4,9 +4,25 @@ import { useSelector } from 'react-redux' import PropTypes from 'prop-types' // material-ui -import { Grid, Box, Stack, Tabs, Tab, Badge } from '@mui/material' +import { + Grid, + Box, + Stack, + Badge, + Toolbar, + TextField, + InputAdornment, + ButtonGroup, + ToggleButton, + InputLabel, + FormControl, + Select, + OutlinedInput, + Checkbox, + ListItemText +} from '@mui/material' import { useTheme } from '@mui/material/styles' -import { IconHierarchy, IconTool } from '@tabler/icons' +import { IconLayoutGrid, IconList, IconSearch } from '@tabler/icons' // project imports import MainCard from 'ui-component/cards/MainCard' @@ -23,6 +39,10 @@ import useApi from 'hooks/useApi' // const import { baseURL } from 'store/constant' +import * as React from 'react' +import ToggleButtonGroup from '@mui/material/ToggleButtonGroup' +import { MarketplaceTable } from '../../ui-component/table/MarketplaceTable' +import MenuItem from '@mui/material/MenuItem' function TabPanel(props) { const { children, value, index, ...other } = props @@ -45,6 +65,18 @@ TabPanel.propTypes = { value: PropTypes.number.isRequired } +const ITEM_HEIGHT = 48 +const ITEM_PADDING_TOP = 8 +const badges = ['POPULAR', 'NEW'] +const types = ['Chatflow', 'Tool'] +const MenuProps = { + PaperProps: { + style: { + maxHeight: ITEM_HEIGHT * 4.5 + ITEM_PADDING_TOP, + width: 250 + } + } +} // ==============================|| Marketplace ||============================== // const Marketplace = () => { @@ -53,16 +85,62 @@ const Marketplace = () => { const theme = useTheme() const customization = useSelector((state) => state.customization) - const [isChatflowsLoading, setChatflowsLoading] = useState(true) - const [isToolsLoading, setToolsLoading] = useState(true) + const [isLoading, setLoading] = useState(true) const [images, setImages] = useState({}) - const tabItems = ['Chatflows', 'Tools'] - const [value, setValue] = useState(0) + const [showToolDialog, setShowToolDialog] = useState(false) const [toolDialogProps, setToolDialogProps] = useState({}) - const getAllChatflowsMarketplacesApi = useApi(marketplacesApi.getAllChatflowsMarketplaces) - const getAllToolsMarketplacesApi = useApi(marketplacesApi.getAllToolsMarketplaces) + const getAllTemplatesMarketplacesApi = useApi(marketplacesApi.getAllTemplatesFromMarketplaces) + + const [view, setView] = React.useState(localStorage.getItem('mpDisplayStyle') || 'card') + const [search, setSearch] = useState('') + + const [badgeFilter, setBadgeFilter] = useState([]) + const [typeFilter, setTypeFilter] = useState([]) + + const handleBadgeFilterChange = (event) => { + const { + target: { value } + } = event + setBadgeFilter( + // On autofill we get a stringified value. + typeof value === 'string' ? value.split(',') : value + ) + } + const handleTypeFilterChange = (event) => { + const { + target: { value } + } = event + setTypeFilter( + // On autofill we get a stringified value. + typeof value === 'string' ? value.split(',') : value + ) + } + + const handleViewChange = (event, nextView) => { + localStorage.setItem('mpDisplayStyle', nextView) + setView(nextView) + } + + const onSearchChange = (event) => { + setSearch(event.target.value) + } + + function filterFlows(data) { + return ( + data.templateName.toLowerCase().indexOf(search.toLowerCase()) > -1 || + (data.description && data.description.toLowerCase().indexOf(search.toLowerCase()) > -1) + ) + } + + function filterByBadge(data) { + return badgeFilter.length > 0 ? badgeFilter.includes(data.badge) : true + } + + function filterByType(data) { + return typeFilter.length > 0 ? typeFilter.includes(data.type) : true + } const onUseTemplate = (selectedTool) => { const dialogProp = { @@ -90,39 +168,33 @@ const Marketplace = () => { navigate(`/marketplace/${selectedChatflow.id}`, { state: selectedChatflow }) } - const handleChange = (event, newValue) => { - setValue(newValue) - } - useEffect(() => { - getAllChatflowsMarketplacesApi.request() - getAllToolsMarketplacesApi.request() + getAllTemplatesMarketplacesApi.request() // eslint-disable-next-line react-hooks/exhaustive-deps }, []) useEffect(() => { - setChatflowsLoading(getAllChatflowsMarketplacesApi.loading) - }, [getAllChatflowsMarketplacesApi.loading]) + setLoading(getAllTemplatesMarketplacesApi.loading) + }, [getAllTemplatesMarketplacesApi.loading]) useEffect(() => { - setToolsLoading(getAllToolsMarketplacesApi.loading) - }, [getAllToolsMarketplacesApi.loading]) - - useEffect(() => { - if (getAllChatflowsMarketplacesApi.data) { + if (getAllTemplatesMarketplacesApi.data) { try { - const chatflows = getAllChatflowsMarketplacesApi.data + const flows = getAllTemplatesMarketplacesApi.data + const images = {} - for (let i = 0; i < chatflows.length; i += 1) { - const flowDataStr = chatflows[i].flowData - const flowData = JSON.parse(flowDataStr) - const nodes = flowData.nodes || [] - images[chatflows[i].id] = [] - for (let j = 0; j < nodes.length; j += 1) { - const imageSrc = `${baseURL}/api/v1/node-icon/${nodes[j].data.name}` - if (!images[chatflows[i].id].includes(imageSrc)) { - images[chatflows[i].id].push(imageSrc) + for (let i = 0; i < flows.length; i += 1) { + if (flows[i].flowData) { + const flowDataStr = flows[i].flowData + const flowData = JSON.parse(flowDataStr) + const nodes = flowData.nodes || [] + images[flows[i].id] = [] + for (let j = 0; j < nodes.length; j += 1) { + const imageSrc = `${baseURL}/api/v1/node-icon/${nodes[j].data.name}` + if (!images[flows[i].id].includes(imageSrc)) { + images[flows[i].id].push(imageSrc) + } } } } @@ -131,80 +203,161 @@ const Marketplace = () => { console.error(e) } } - }, [getAllChatflowsMarketplacesApi.data]) + }, [getAllTemplatesMarketplacesApi.data]) return ( <> - -

Marketplace

-
- - {tabItems.map((item, index) => ( - : } - iconPosition='start' - label={{item}} + + +

Marketplace

+ + + + ) + }} /> - ))} -
- {tabItems.map((item, index) => ( - - {item === 'Chatflows' && ( - - {!isChatflowsLoading && - getAllChatflowsMarketplacesApi.data && - getAllChatflowsMarketplacesApi.data.map((data, index) => ( - - {data.badge && ( - + + + Type + + + + + + Tag + + + + + + + + + + + + + + + + + + + {!isLoading && (!view || view === 'card') && getAllTemplatesMarketplacesApi.data && ( + <> + + {getAllTemplatesMarketplacesApi.data + .filter(filterByBadge) + .filter(filterByType) + .filter(filterFlows) + .map((data, index) => ( + + {data.badge && ( + + {data.type === 'Chatflow' && ( goToCanvas(data)} data={data} images={images[data.id]} /> - - )} - {!data.badge && ( - goToCanvas(data)} data={data} images={images[data.id]} /> - )} - - ))} - - )} - {item === 'Tools' && ( - - {!isToolsLoading && - getAllToolsMarketplacesApi.data && - getAllToolsMarketplacesApi.data.map((data, index) => ( - - {data.badge && ( - - goToTool(data)} /> - - )} - {!data.badge && goToTool(data)} />} - - ))} - - )} - - ))} - {((!isChatflowsLoading && (!getAllChatflowsMarketplacesApi.data || getAllChatflowsMarketplacesApi.data.length === 0)) || - (!isToolsLoading && (!getAllToolsMarketplacesApi.data || getAllToolsMarketplacesApi.data.length === 0))) && ( + )} + {data.type === 'Tool' && goToTool(data)} />} + + )} + {!data.badge && data.type === 'Chatflow' && ( + goToCanvas(data)} data={data} images={images[data.id]} /> + )} + {!data.badge && data.type === 'Tool' && goToTool(data)} />} + + ))} + + + )} + {!isLoading && view === 'list' && getAllTemplatesMarketplacesApi.data && ( + + )} + + {!isLoading && (!getAllTemplatesMarketplacesApi.data || getAllTemplatesMarketplacesApi.data.length === 0) && ( Date: Sun, 4 Feb 2024 15:49:34 -0500 Subject: [PATCH 14/45] Marketplace: removing unused server API --- packages/server/src/index.ts | 47 +----------------------------------- 1 file changed, 1 insertion(+), 46 deletions(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 8878da94..e69e15ba 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1223,33 +1223,7 @@ export class App { // Marketplaces // ---------------------------------------- - this.app.get('/api/v1/marketplaces/chatflows', async (req: Request, res: Response) => { - const marketplaceDir = path.join(__dirname, '..', 'marketplaces', 'chatflows') - const jsonsInDir = fs.readdirSync(marketplaceDir).filter((file) => path.extname(file) === '.json') - const templates: any[] = [] - jsonsInDir.forEach((file, index) => { - const filePath = path.join(__dirname, '..', 'marketplaces', 'chatflows', file) - const fileData = fs.readFileSync(filePath) - const fileDataObj = JSON.parse(fileData.toString()) - const template = { - id: index, - name: file.split('.json')[0], - flowData: fileData.toString(), - badge: fileDataObj?.badge, - description: fileDataObj?.description || '' - } - templates.push(template) - }) - const FlowiseDocsQnA = templates.find((tmp) => tmp.name === 'Flowise Docs QnA') - const FlowiseDocsQnAIndex = templates.findIndex((tmp) => tmp.name === 'Flowise Docs QnA') - if (FlowiseDocsQnA && FlowiseDocsQnAIndex > 0) { - templates.splice(FlowiseDocsQnAIndex, 1) - templates.unshift(FlowiseDocsQnA) - } - return res.json(templates) - }) - - // Get all chatflows for marketplaces + // Get all templates for marketplaces this.app.get('/api/v1/marketplaces/templates', async (req: Request, res: Response) => { let marketplaceDir = path.join(__dirname, '..', 'marketplaces', 'chatflows') let jsonsInDir = fs.readdirSync(marketplaceDir).filter((file) => path.extname(file) === '.json') @@ -1292,25 +1266,6 @@ export class App { return res.json(templates.sort((a, b) => a.templateName.localeCompare(b.templateName))) }) - // Get all tools for marketplaces - this.app.get('/api/v1/marketplaces/tools', async (req: Request, res: Response) => { - const marketplaceDir = path.join(__dirname, '..', 'marketplaces', 'tools') - const jsonsInDir = fs.readdirSync(marketplaceDir).filter((file) => path.extname(file) === '.json') - const templates: any[] = [] - jsonsInDir.forEach((file, index) => { - const filePath = path.join(__dirname, '..', 'marketplaces', 'tools', file) - const fileData = fs.readFileSync(filePath) - const fileDataObj = JSON.parse(fileData.toString()) - const template = { - ...fileDataObj, - id: index, - templateName: file.split('.json')[0] - } - templates.push(template) - }) - return res.json(templates) - }) - // ---------------------------------------- // Variables // ---------------------------------------- From 011a0a75c332257278c8ae5d8b42d5a596f3060f Mon Sep 17 00:00:00 2001 From: Ilyes Tascou Date: Mon, 5 Feb 2024 17:20:05 +0100 Subject: [PATCH 15/45] add kotlin files to folder-loader --- packages/components/nodes/documentloaders/Folder/Folder.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/components/nodes/documentloaders/Folder/Folder.ts b/packages/components/nodes/documentloaders/Folder/Folder.ts index f5d0c640..f8346e3c 100644 --- a/packages/components/nodes/documentloaders/Folder/Folder.ts +++ b/packages/components/nodes/documentloaders/Folder/Folder.ts @@ -70,6 +70,7 @@ class Folder_DocumentLoaders implements INode { '.css': (path) => new TextLoader(path), '.go': (path) => new TextLoader(path), // Go '.h': (path) => new TextLoader(path), // C++ Header files + '.kt': (path) => new TextLoader(path), // Kotlin '.java': (path) => new TextLoader(path), // Java '.js': (path) => new TextLoader(path), // JavaScript '.less': (path) => new TextLoader(path), // Less files From dcacb02a4758b07d161d9a14c3fbae2a32e871aa Mon Sep 17 00:00:00 2001 From: Henry Date: Tue, 6 Feb 2024 03:01:47 +0800 Subject: [PATCH 16/45] =?UTF-8?q?=F0=9F=A5=B3=20flowise-components@1.6.0?= =?UTF-8?q?=20release?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- packages/components/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/components/package.json b/packages/components/package.json index bcb746b0..62ffba94 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -1,6 +1,6 @@ { "name": "flowise-components", - "version": "1.5.3", + "version": "1.6.0", "description": "Flowiseai Components", "main": "dist/src/index", "types": "dist/src/index.d.ts", From 9189b7013127e6b533376ddad596e831a1cdb6eb Mon Sep 17 00:00:00 2001 From: Henry Date: Tue, 6 Feb 2024 03:02:16 +0800 Subject: [PATCH 17/45] =?UTF-8?q?=F0=9F=A5=B3=20flowise-ui@1.5.0=20release?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- packages/ui/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/ui/package.json b/packages/ui/package.json index 68d78c95..eb3bebda 100644 --- a/packages/ui/package.json +++ b/packages/ui/package.json @@ -1,6 +1,6 @@ { "name": "flowise-ui", - "version": "1.4.9", + "version": "1.5.0", "license": "SEE LICENSE IN LICENSE.md", "homepage": "https://flowiseai.com", "author": { From 7faaf13ccc3705030a4dd87d05cd60e9f69772c3 Mon Sep 17 00:00:00 2001 From: Henry Date: Tue, 6 Feb 2024 03:02:52 +0800 Subject: [PATCH 18/45] =?UTF-8?q?=F0=9F=A5=B3=20flowise@1.5.0=20release?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- package.json | 2 +- packages/server/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index 451f7855..5f5f5812 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "flowise", - "version": "1.4.12", + "version": "1.5.0", "private": true, "homepage": "https://flowiseai.com", "workspaces": [ diff --git a/packages/server/package.json b/packages/server/package.json index c7ed13ac..0d7dea77 100644 --- a/packages/server/package.json +++ b/packages/server/package.json @@ -1,6 +1,6 @@ { "name": "flowise", - "version": "1.4.12", + "version": "1.5.0", "description": "Flowiseai Server", "main": "dist/index", "types": "dist/index.d.ts", From 8990b78e104d20d511335844046b9fc81e7d8c94 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Mon, 5 Feb 2024 15:09:44 -0500 Subject: [PATCH 19/45] Marketplace : Added categories to chatflows --- .../chatflows/API Agent OpenAI.json | 1 + .../marketplaces/chatflows/API Agent.json | 1 + .../marketplaces/chatflows/Antonym.json | 1 + .../marketplaces/chatflows/AutoGPT.json | 1 + .../marketplaces/chatflows/BabyAGI.json | 1 + .../marketplaces/chatflows/CSV Agent.json | 1 + .../chatflows/Chat with a Podcast.json | 1 + .../marketplaces/chatflows/ChatGPTPlugin.json | 1 + .../marketplaces/chatflows/Claude LLM.json | 1 + .../chatflows/Context Chat Engine.json | 1 + .../chatflows/Conversational Agent.json | 1 + .../Conversational Retrieval Agent.json | 1 + .../Conversational Retrieval QA Chain.json | 1 + .../chatflows/Flowise Docs QnA.json | 1 + .../chatflows/HuggingFace LLM Chain.json | 1 + .../server/marketplaces/chatflows/IfElse.json | 1 + .../chatflows/Image Generation.json | 1 + .../chatflows/Input Moderation.json | 1 + .../chatflows/List Output Parser.json | 1 + .../marketplaces/chatflows/Local QnA.json | 1 + .../chatflows/Long Term Memory.json | 1 + .../chatflows/Metadata Filter.json | 1 + .../chatflows/Multi Prompt Chain.json | 1 + .../chatflows/Multi Retrieval QA Chain.json | 1 + .../chatflows/Multiple VectorDB.json | 1 + .../marketplaces/chatflows/OpenAI Agent.json | 1 + .../chatflows/OpenAI Assistant.json | 1 + .../Prompt Chaining with VectorStore.json | 1 + .../chatflows/Prompt Chaining.json | 1 + .../marketplaces/chatflows/Query Engine.json | 1 + .../marketplaces/chatflows/ReAct Agent.json | 1 + .../marketplaces/chatflows/Replicate LLM.json | 1 + .../marketplaces/chatflows/SQL DB Chain.json | 1 + .../marketplaces/chatflows/SQL Prompt.json | 1 + .../chatflows/Simple Chat Engine.json | 1 + .../chatflows/Simple Conversation Chain.json | 1 + .../chatflows/Simple LLM Chain.json | 1 + .../chatflows/Structured Output Parser.json | 1 + .../chatflows/SubQuestion Query Engine.json | 1 + .../marketplaces/chatflows/Translator.json | 1 + .../chatflows/Vectara RAG Chain.json | 2 + .../marketplaces/chatflows/WebBrowser.json | 1 + .../marketplaces/chatflows/WebPage QnA.json | 1 + packages/server/src/index.ts | 2 + .../ui-component/table/MarketplaceTable.js | 58 +++++++------------ packages/ui/src/views/marketplaces/index.js | 1 + 46 files changed, 67 insertions(+), 38 deletions(-) diff --git a/packages/server/marketplaces/chatflows/API Agent OpenAI.json b/packages/server/marketplaces/chatflows/API Agent OpenAI.json index 87f6d6d2..c0497a29 100644 --- a/packages/server/marketplaces/chatflows/API Agent OpenAI.json +++ b/packages/server/marketplaces/chatflows/API Agent OpenAI.json @@ -1,5 +1,6 @@ { "description": "Use OpenAI Function Agent and Chain to automatically decide which API to call, generating url and body request from conversation", + "categories": "Buffer Memory,ChainTool,API Chain,ChatOpenAI,OpenAI Function Agent,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/API Agent.json b/packages/server/marketplaces/chatflows/API Agent.json index af99be9d..32469df9 100644 --- a/packages/server/marketplaces/chatflows/API Agent.json +++ b/packages/server/marketplaces/chatflows/API Agent.json @@ -1,5 +1,6 @@ { "description": "Given API docs, agent automatically decide which API to call, generating url and body request from conversation", + "categories": "Buffer Memory,ChainTool,API Chain,ChatOpenAI,Conversational Agent,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Antonym.json b/packages/server/marketplaces/chatflows/Antonym.json index ef997feb..1240bf30 100644 --- a/packages/server/marketplaces/chatflows/Antonym.json +++ b/packages/server/marketplaces/chatflows/Antonym.json @@ -1,5 +1,6 @@ { "description": "Output antonym of given user input using few-shot prompt template built with examples", + "categories": "Few Shot Prompt,ChatOpenAI,LLM Chain,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/AutoGPT.json b/packages/server/marketplaces/chatflows/AutoGPT.json index 4edbf823..e03a7408 100644 --- a/packages/server/marketplaces/chatflows/AutoGPT.json +++ b/packages/server/marketplaces/chatflows/AutoGPT.json @@ -1,5 +1,6 @@ { "description": "Use AutoGPT - Autonomous agent with chain of thoughts for self-guided task completion", + "categories": "AutoGPT,SERP Tool,File Read/Write,ChatOpenAI,Pinecone,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/BabyAGI.json b/packages/server/marketplaces/chatflows/BabyAGI.json index 3137d511..52198b90 100644 --- a/packages/server/marketplaces/chatflows/BabyAGI.json +++ b/packages/server/marketplaces/chatflows/BabyAGI.json @@ -1,5 +1,6 @@ { "description": "Use BabyAGI to create tasks and reprioritize for a given objective", + "categories": "BabyAGI,ChatOpenAI,Pinecone,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/CSV Agent.json b/packages/server/marketplaces/chatflows/CSV Agent.json index e16377d2..d23e93ad 100644 --- a/packages/server/marketplaces/chatflows/CSV Agent.json +++ b/packages/server/marketplaces/chatflows/CSV Agent.json @@ -1,5 +1,6 @@ { "description": "Analyse and summarize CSV data", + "categories": "CSV Agent,ChatOpenAI,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Chat with a Podcast.json b/packages/server/marketplaces/chatflows/Chat with a Podcast.json index f8d8d26c..c1f216e6 100644 --- a/packages/server/marketplaces/chatflows/Chat with a Podcast.json +++ b/packages/server/marketplaces/chatflows/Chat with a Podcast.json @@ -1,5 +1,6 @@ { "description": "Engage with data sources such as YouTube Transcripts, Google, and more through intelligent Q&A interactions", + "categories": "Memory Vector Store,SearchAPI,ChatOpenAI,Conversational Retrieval QA Chain,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/ChatGPTPlugin.json b/packages/server/marketplaces/chatflows/ChatGPTPlugin.json index 12bea993..d9390f57 100644 --- a/packages/server/marketplaces/chatflows/ChatGPTPlugin.json +++ b/packages/server/marketplaces/chatflows/ChatGPTPlugin.json @@ -1,5 +1,6 @@ { "description": "Use ChatGPT Plugins within LangChain abstractions with GET and POST Tools", + "categories": "ChatGPT Plugin,HTTP GET/POST,ChatOpenAI,MRKL Agent,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Claude LLM.json b/packages/server/marketplaces/chatflows/Claude LLM.json index 7b32de48..39d1e6c6 100644 --- a/packages/server/marketplaces/chatflows/Claude LLM.json +++ b/packages/server/marketplaces/chatflows/Claude LLM.json @@ -1,5 +1,6 @@ { "description": "Use Anthropic Claude with 200k context window to ingest whole document for QnA", + "categories": "Buffer Memory,Prompt Template,Conversation Chain,ChatAnthropic,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Context Chat Engine.json b/packages/server/marketplaces/chatflows/Context Chat Engine.json index 475c6b3a..26efcb1c 100644 --- a/packages/server/marketplaces/chatflows/Context Chat Engine.json +++ b/packages/server/marketplaces/chatflows/Context Chat Engine.json @@ -1,5 +1,6 @@ { "description": "Answer question based on retrieved documents (context) with built-in memory to remember conversation using LlamaIndex", + "categories": "Text File,Prompt Template,ChatOpenAI,Conversation Chain,Pinecone,LlamaIndex,Redis", "badge": "NEW", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Conversational Agent.json b/packages/server/marketplaces/chatflows/Conversational Agent.json index 031a29c0..419e9a79 100644 --- a/packages/server/marketplaces/chatflows/Conversational Agent.json +++ b/packages/server/marketplaces/chatflows/Conversational Agent.json @@ -1,5 +1,6 @@ { "description": "A conversational agent for a chat model which utilize chat specific prompts", + "categories": "Calculator Tool,Buffer Memory,SerpAPI,ChatOpenAI,Conversational Agent,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json b/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json index 40c689f5..37dd3759 100644 --- a/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json +++ b/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json @@ -1,5 +1,6 @@ { "description": "Agent optimized for vector retrieval during conversation and answering questions based on previous dialogue.", + "categories": "Retriever Tool,Buffer Memory,ChatOpenAI,Conversational Retrieval Agent, Pinecone,Langchain", "badge": "POPULAR", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json index e73a9d28..61bc6e39 100644 --- a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json +++ b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json @@ -1,5 +1,6 @@ { "description": "Text file QnA using conversational retrieval QA chain", + "categories": "TextFile,ChatOpenAI,Conversational Retrieval QA Chain,Pinecone,Langchain", "badge": "POPULAR", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Flowise Docs QnA.json b/packages/server/marketplaces/chatflows/Flowise Docs QnA.json index 6975fc68..6eca2944 100644 --- a/packages/server/marketplaces/chatflows/Flowise Docs QnA.json +++ b/packages/server/marketplaces/chatflows/Flowise Docs QnA.json @@ -1,5 +1,6 @@ { "description": "Flowise Docs Github QnA using conversational retrieval QA chain", + "categories": "Memory Vector Store,Github Loader,ChatOpenAI,Conversational Retrieval QA Chain,Langchain", "badge": "POPULAR", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/HuggingFace LLM Chain.json b/packages/server/marketplaces/chatflows/HuggingFace LLM Chain.json index 93009574..f87bf65a 100644 --- a/packages/server/marketplaces/chatflows/HuggingFace LLM Chain.json +++ b/packages/server/marketplaces/chatflows/HuggingFace LLM Chain.json @@ -1,5 +1,6 @@ { "description": "Simple LLM Chain using HuggingFace Inference API on falcon-7b-instruct model", + "categories": "HuggingFace,LLM Chain,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/IfElse.json b/packages/server/marketplaces/chatflows/IfElse.json index f3fddebf..c856eac0 100644 --- a/packages/server/marketplaces/chatflows/IfElse.json +++ b/packages/server/marketplaces/chatflows/IfElse.json @@ -1,5 +1,6 @@ { "description": "Split flows based on if else condition", + "categories": "IfElse Function,ChatOpenAI,OpenAI,LLM Chain,Langchain", "badge": "new", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Image Generation.json b/packages/server/marketplaces/chatflows/Image Generation.json index 7dafcedf..8b07ae86 100644 --- a/packages/server/marketplaces/chatflows/Image Generation.json +++ b/packages/server/marketplaces/chatflows/Image Generation.json @@ -1,6 +1,7 @@ { "description": "Generate image using Replicate Stability text-to-image generative AI model", "badge": "NEW", + "categories": "Replicate,ChatOpenAI,LLM Chain,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Input Moderation.json b/packages/server/marketplaces/chatflows/Input Moderation.json index ed823a21..cd7f2cb5 100644 --- a/packages/server/marketplaces/chatflows/Input Moderation.json +++ b/packages/server/marketplaces/chatflows/Input Moderation.json @@ -1,6 +1,7 @@ { "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", "badge": "NEW", + "categories": "Moderation,ChatOpenAI,LLM Chain,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/List Output Parser.json b/packages/server/marketplaces/chatflows/List Output Parser.json index eaf56dff..a67fb72b 100644 --- a/packages/server/marketplaces/chatflows/List Output Parser.json +++ b/packages/server/marketplaces/chatflows/List Output Parser.json @@ -1,6 +1,7 @@ { "description": "Return response as a list (array) instead of a string/text", "badge": "NEW", + "categories": "CSV Output Parser,ChatOpenAI,LLM Chain,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Local QnA.json b/packages/server/marketplaces/chatflows/Local QnA.json index 6f78cb05..11deae15 100644 --- a/packages/server/marketplaces/chatflows/Local QnA.json +++ b/packages/server/marketplaces/chatflows/Local QnA.json @@ -1,6 +1,7 @@ { "description": "QnA chain using Ollama local LLM, LocalAI embedding model, and Faiss local vector store", "badge": "POPULAR", + "categories": "Text File,ChatOllama,Conversational Retrieval QA Chain,Faiss,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Long Term Memory.json b/packages/server/marketplaces/chatflows/Long Term Memory.json index 1b3e48e1..ba2d2330 100644 --- a/packages/server/marketplaces/chatflows/Long Term Memory.json +++ b/packages/server/marketplaces/chatflows/Long Term Memory.json @@ -1,5 +1,6 @@ { "description": "Use long term memory like Zep to differentiate conversations between users with sessionId", + "categories": "ChatOpenAI,Conversational Retrieval QA Chain,Zep Memory,Qdrant,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Metadata Filter.json b/packages/server/marketplaces/chatflows/Metadata Filter.json index ef928854..13ca8745 100644 --- a/packages/server/marketplaces/chatflows/Metadata Filter.json +++ b/packages/server/marketplaces/chatflows/Metadata Filter.json @@ -1,5 +1,6 @@ { "description": "Upsert multiple files with metadata and filter by it using conversational retrieval QA chain", + "categories": "Text File,PDF File,ChatOpenAI,Conversational Retrieval QA Chain,Pinecone,Langchain", "badge": "POPULAR", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Multi Prompt Chain.json b/packages/server/marketplaces/chatflows/Multi Prompt Chain.json index 314e24a6..89935fcd 100644 --- a/packages/server/marketplaces/chatflows/Multi Prompt Chain.json +++ b/packages/server/marketplaces/chatflows/Multi Prompt Chain.json @@ -1,5 +1,6 @@ { "description": "A chain that automatically picks an appropriate prompt from multiple prompts", + "categories": "ChatOpenAI,Multi Prompt Chain,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json b/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json index 8c9e8537..63a02edb 100644 --- a/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json +++ b/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json @@ -1,5 +1,6 @@ { "description": "A chain that automatically picks an appropriate retriever from multiple different vector databases", + "categories": "ChatOpenAI,Multi Retrieval QA Chain,Pinecone,Chroma,Supabase,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Multiple VectorDB.json b/packages/server/marketplaces/chatflows/Multiple VectorDB.json index e5a16caa..7ffc040a 100644 --- a/packages/server/marketplaces/chatflows/Multiple VectorDB.json +++ b/packages/server/marketplaces/chatflows/Multiple VectorDB.json @@ -1,5 +1,6 @@ { "description": "Use the agent to choose between multiple different vector databases, with the ability to use other tools", + "categories": "Buffer Memory,ChatOpenAI,Chain Tool,Retrieval QA Chain,Redis,Faiss,Conversational Agent,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/OpenAI Agent.json b/packages/server/marketplaces/chatflows/OpenAI Agent.json index e3e80dcc..c4cf11f2 100644 --- a/packages/server/marketplaces/chatflows/OpenAI Agent.json +++ b/packages/server/marketplaces/chatflows/OpenAI Agent.json @@ -1,5 +1,6 @@ { "description": "An agent that uses OpenAI's Function Calling functionality to pick the tool and args to call", + "categories": "Buffer Memory,Custom Tool, SerpAPI,OpenAI Function,Calculator Tool,ChatOpenAI,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/OpenAI Assistant.json b/packages/server/marketplaces/chatflows/OpenAI Assistant.json index e9311c97..f1595fd6 100644 --- a/packages/server/marketplaces/chatflows/OpenAI Assistant.json +++ b/packages/server/marketplaces/chatflows/OpenAI Assistant.json @@ -1,5 +1,6 @@ { "description": "OpenAI Assistant that has instructions and can leverage models, tools, and knowledge to respond to user queries", + "categories": "Custom Tool, SerpAPI,OpenAI Assistant,Calculator Tool,Langchain", "badge": "NEW", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json b/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json index c2060e79..3b8507b9 100644 --- a/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json +++ b/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json @@ -1,5 +1,6 @@ { "description": "Use chat history to rephrase user question, and answer the rephrased question using retrieved docs from vector store", + "categories": "ChatOpenAI,LLM Chain,SingleStore,Langchain", "badge": "POPULAR", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Prompt Chaining.json b/packages/server/marketplaces/chatflows/Prompt Chaining.json index 267d8222..3ed5f95c 100644 --- a/packages/server/marketplaces/chatflows/Prompt Chaining.json +++ b/packages/server/marketplaces/chatflows/Prompt Chaining.json @@ -1,5 +1,6 @@ { "description": "Use output from a chain as prompt for another chain", + "categories": "Custom Tool,OpenAI,LLM Chain,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Query Engine.json b/packages/server/marketplaces/chatflows/Query Engine.json index 82553333..5697735b 100644 --- a/packages/server/marketplaces/chatflows/Query Engine.json +++ b/packages/server/marketplaces/chatflows/Query Engine.json @@ -1,5 +1,6 @@ { "description": "Stateless query engine designed to answer question over your data using LlamaIndex", + "categories": "ChatAnthropic,Compact and Refine,Pinecone,LlamaIndex", "badge": "NEW", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/ReAct Agent.json b/packages/server/marketplaces/chatflows/ReAct Agent.json index e4a7fab8..9831bcf6 100644 --- a/packages/server/marketplaces/chatflows/ReAct Agent.json +++ b/packages/server/marketplaces/chatflows/ReAct Agent.json @@ -1,5 +1,6 @@ { "description": "An agent that uses ReAct logic to decide what action to take", + "categories": "Calculator Tool,SerpAPI,ChatOpenAI,MRKL Agent,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Replicate LLM.json b/packages/server/marketplaces/chatflows/Replicate LLM.json index 832e85c7..ef8ab7f2 100644 --- a/packages/server/marketplaces/chatflows/Replicate LLM.json +++ b/packages/server/marketplaces/chatflows/Replicate LLM.json @@ -1,5 +1,6 @@ { "description": "Use Replicate API that runs Llama 13b v2 model with LLMChain", + "categories": "Replicate,LLM Chain,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/SQL DB Chain.json b/packages/server/marketplaces/chatflows/SQL DB Chain.json index 92e42178..3c143a33 100644 --- a/packages/server/marketplaces/chatflows/SQL DB Chain.json +++ b/packages/server/marketplaces/chatflows/SQL DB Chain.json @@ -1,5 +1,6 @@ { "description": "Answer questions over a SQL database", + "categories": "ChatOpenAI,Sql Database Chain,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/SQL Prompt.json b/packages/server/marketplaces/chatflows/SQL Prompt.json index 406c2e52..e79a95d0 100644 --- a/packages/server/marketplaces/chatflows/SQL Prompt.json +++ b/packages/server/marketplaces/chatflows/SQL Prompt.json @@ -1,5 +1,6 @@ { "description": "Manually construct prompts to query a SQL database", + "categories": "IfElse Function,Variable Set/Get,Custom JS Function,ChatOpenAI,LLM Chain,Langchain", "badge": "new", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Simple Chat Engine.json b/packages/server/marketplaces/chatflows/Simple Chat Engine.json index 630b6833..5510873d 100644 --- a/packages/server/marketplaces/chatflows/Simple Chat Engine.json +++ b/packages/server/marketplaces/chatflows/Simple Chat Engine.json @@ -1,5 +1,6 @@ { "description": "Simple chat engine to handle back and forth conversations using LlamaIndex", + "categories": "BufferMemory,AzureChatOpenAI,LlamaIndex", "badge": "NEW", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Simple Conversation Chain.json b/packages/server/marketplaces/chatflows/Simple Conversation Chain.json index 1ffbee44..e6c934ac 100644 --- a/packages/server/marketplaces/chatflows/Simple Conversation Chain.json +++ b/packages/server/marketplaces/chatflows/Simple Conversation Chain.json @@ -1,5 +1,6 @@ { "description": "Basic example of Conversation Chain with built-in memory - works exactly like ChatGPT", + "categories": "Buffer Memory,ChatOpenAI,Conversation Chain,Langchain", "badge": "POPULAR", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Simple LLM Chain.json b/packages/server/marketplaces/chatflows/Simple LLM Chain.json index f2e3a4a2..12298441 100644 --- a/packages/server/marketplaces/chatflows/Simple LLM Chain.json +++ b/packages/server/marketplaces/chatflows/Simple LLM Chain.json @@ -1,5 +1,6 @@ { "description": "Basic example of stateless (no memory) LLM Chain with a Prompt Template and LLM Model", + "categories": "OpenAI,LLM Chain,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Structured Output Parser.json b/packages/server/marketplaces/chatflows/Structured Output Parser.json index 92336443..20385a4b 100644 --- a/packages/server/marketplaces/chatflows/Structured Output Parser.json +++ b/packages/server/marketplaces/chatflows/Structured Output Parser.json @@ -1,5 +1,6 @@ { "description": "Return response as a specified JSON structure instead of a string/text", + "categories": "Structured Output Parser,ChatOpenAI,LLM Chain,Langchain", "badge": "NEW", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/SubQuestion Query Engine.json b/packages/server/marketplaces/chatflows/SubQuestion Query Engine.json index f14607da..2042625b 100644 --- a/packages/server/marketplaces/chatflows/SubQuestion Query Engine.json +++ b/packages/server/marketplaces/chatflows/SubQuestion Query Engine.json @@ -1,5 +1,6 @@ { "description": "Breaks down query into sub questions for each relevant data source, then combine into final response", + "categories": "Sub Question Query Engine,Sticky Note,QueryEngine Tool,Compact and Refine,ChatOpenAI,Pinecone,LlamaIndex", "badge": "NEW", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Translator.json b/packages/server/marketplaces/chatflows/Translator.json index 0bf49252..118457ef 100644 --- a/packages/server/marketplaces/chatflows/Translator.json +++ b/packages/server/marketplaces/chatflows/Translator.json @@ -1,5 +1,6 @@ { "description": "Language translation using LLM Chain with a Chat Prompt Template and Chat Model", + "categories": "Chat Prompt Template,ChatOpenAI,LLM Chain,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Vectara RAG Chain.json b/packages/server/marketplaces/chatflows/Vectara RAG Chain.json index d3bb5bf8..82e341c4 100644 --- a/packages/server/marketplaces/chatflows/Vectara RAG Chain.json +++ b/packages/server/marketplaces/chatflows/Vectara RAG Chain.json @@ -1,4 +1,6 @@ { + "description": "QA chain for Vectara", + "categories": "Vectara QA Chain,Vectara,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/WebBrowser.json b/packages/server/marketplaces/chatflows/WebBrowser.json index 2376e29e..75e3cf0a 100644 --- a/packages/server/marketplaces/chatflows/WebBrowser.json +++ b/packages/server/marketplaces/chatflows/WebBrowser.json @@ -1,5 +1,6 @@ { "description": "Conversational Agent with ability to visit a website and extract information", + "categories": "Buffer Memory,Web Browser,ChatOpenAI,Conversational Agent,Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/WebPage QnA.json b/packages/server/marketplaces/chatflows/WebPage QnA.json index a5a53233..089d195d 100644 --- a/packages/server/marketplaces/chatflows/WebPage QnA.json +++ b/packages/server/marketplaces/chatflows/WebPage QnA.json @@ -1,5 +1,6 @@ { "description": "Scrape web pages for QnA with long term memory Motorhead and return source documents", + "categories": "HtmlToMarkdown,Cheerio Web Scraper,ChatOpenAI,Redis,Pinecone,Langchain", "badge": "POPULAR", "nodes": [ { diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index e69e15ba..a65fabf3 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1237,6 +1237,7 @@ export class App { templateName: file.split('.json')[0], flowData: fileData.toString(), badge: fileDataObj?.badge, + categories: fileDataObj?.categories, type: 'Chatflow', description: fileDataObj?.description || '' } @@ -1253,6 +1254,7 @@ export class App { ...fileDataObj, id: index, type: 'Tool', + categories: '', templateName: file.split('.json')[0] } templates.push(template) diff --git a/packages/ui/src/ui-component/table/MarketplaceTable.js b/packages/ui/src/ui-component/table/MarketplaceTable.js index 135d19e9..714f10b0 100644 --- a/packages/ui/src/ui-component/table/MarketplaceTable.js +++ b/packages/ui/src/ui-component/table/MarketplaceTable.js @@ -103,45 +103,27 @@ export const MarketplaceTable = ({ data, images, filterFunction, filterByBadge, - {row.type === 'Chatflow' && images[row.id] && ( -
- {images[row.id] - .slice(0, images[row.id].length > 5 ? 5 : images[row.id].length) - .map((img) => ( -
- -
+
+ {row.categories && + row.categories + .split(',') + .map((tag, index) => ( + ))} - {images[row.id].length > 5 && ( - - + {images[row.id].length - 5} More - - )} -
- )} +
diff --git a/packages/ui/src/views/marketplaces/index.js b/packages/ui/src/views/marketplaces/index.js index ac018663..f2055da6 100644 --- a/packages/ui/src/views/marketplaces/index.js +++ b/packages/ui/src/views/marketplaces/index.js @@ -129,6 +129,7 @@ const Marketplace = () => { function filterFlows(data) { return ( + data.categories?.toLowerCase().indexOf(search.toLowerCase()) > -1 || data.templateName.toLowerCase().indexOf(search.toLowerCase()) > -1 || (data.description && data.description.toLowerCase().indexOf(search.toLowerCase()) > -1) ) From 842d70bf0de1e8fb974bca2fa09aabd37adcfc23 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Mon, 5 Feb 2024 18:09:46 -0500 Subject: [PATCH 20/45] Marketplace: Adding filters and a collapsible panel show/hide --- .../chatflows/API Agent OpenAI.json | 1 + .../marketplaces/chatflows/API Agent.json | 1 + .../marketplaces/chatflows/Antonym.json | 1 + .../marketplaces/chatflows/AutoGPT.json | 1 + .../marketplaces/chatflows/BabyAGI.json | 1 + .../marketplaces/chatflows/CSV Agent.json | 1 + .../chatflows/Chat with a Podcast.json | 1 + .../marketplaces/chatflows/ChatGPTPlugin.json | 1 + .../marketplaces/chatflows/Claude LLM.json | 1 + .../chatflows/Context Chat Engine.json | 1 + .../chatflows/Conversational Agent.json | 1 + .../Conversational Retrieval Agent.json | 1 + .../Conversational Retrieval QA Chain.json | 1 + .../chatflows/Flowise Docs QnA.json | 1 + .../chatflows/HuggingFace LLM Chain.json | 1 + .../server/marketplaces/chatflows/IfElse.json | 1 + .../chatflows/Image Generation.json | 1 + .../chatflows/Input Moderation.json | 1 + .../chatflows/List Output Parser.json | 1 + .../marketplaces/chatflows/Local QnA.json | 1 + .../chatflows/Long Term Memory.json | 1 + .../chatflows/Metadata Filter.json | 1 + .../chatflows/Multi Prompt Chain.json | 1 + .../chatflows/Multi Retrieval QA Chain.json | 1 + .../chatflows/Multiple VectorDB.json | 1 + .../marketplaces/chatflows/OpenAI Agent.json | 1 + .../chatflows/OpenAI Assistant.json | 1 + .../Prompt Chaining with VectorStore.json | 1 + .../chatflows/Prompt Chaining.json | 1 + .../marketplaces/chatflows/Query Engine.json | 1 + .../marketplaces/chatflows/ReAct Agent.json | 1 + .../marketplaces/chatflows/Replicate LLM.json | 1 + .../marketplaces/chatflows/SQL DB Chain.json | 1 + .../marketplaces/chatflows/SQL Prompt.json | 1 + .../chatflows/Simple Chat Engine.json | 1 + .../chatflows/Simple Conversation Chain.json | 1 + .../chatflows/Simple LLM Chain.json | 1 + .../chatflows/Structured Output Parser.json | 1 + .../chatflows/SubQuestion Query Engine.json | 1 + .../marketplaces/chatflows/Translator.json | 1 + .../chatflows/Vectara RAG Chain.json | 1 + .../marketplaces/chatflows/WebBrowser.json | 3 +- .../marketplaces/chatflows/WebPage QnA.json | 1 + .../tools/Add Hubspot Contact.json | 1 + .../tools/Create Airtable Record.json | 1 + .../tools/Get Current DateTime.json | 1 + .../marketplaces/tools/Get Stock Mover.json | 1 + .../marketplaces/tools/Make Webhook.json | 1 + .../tools/Send Discord Message.json | 1 + .../tools/Send Slack Message.json | 1 + .../tools/Send Teams Message.json | 1 + .../marketplaces/tools/SendGrid Email.json | 1 + packages/server/src/index.ts | 3 + .../ui-component/table/MarketplaceTable.js | 32 +++- packages/ui/src/views/marketplaces/index.js | 170 ++++++++++++------ 55 files changed, 199 insertions(+), 60 deletions(-) diff --git a/packages/server/marketplaces/chatflows/API Agent OpenAI.json b/packages/server/marketplaces/chatflows/API Agent OpenAI.json index c0497a29..621529fc 100644 --- a/packages/server/marketplaces/chatflows/API Agent OpenAI.json +++ b/packages/server/marketplaces/chatflows/API Agent OpenAI.json @@ -1,6 +1,7 @@ { "description": "Use OpenAI Function Agent and Chain to automatically decide which API to call, generating url and body request from conversation", "categories": "Buffer Memory,ChainTool,API Chain,ChatOpenAI,OpenAI Function Agent,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/API Agent.json b/packages/server/marketplaces/chatflows/API Agent.json index 32469df9..9d5a6c54 100644 --- a/packages/server/marketplaces/chatflows/API Agent.json +++ b/packages/server/marketplaces/chatflows/API Agent.json @@ -1,6 +1,7 @@ { "description": "Given API docs, agent automatically decide which API to call, generating url and body request from conversation", "categories": "Buffer Memory,ChainTool,API Chain,ChatOpenAI,Conversational Agent,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Antonym.json b/packages/server/marketplaces/chatflows/Antonym.json index 1240bf30..97c5af71 100644 --- a/packages/server/marketplaces/chatflows/Antonym.json +++ b/packages/server/marketplaces/chatflows/Antonym.json @@ -1,6 +1,7 @@ { "description": "Output antonym of given user input using few-shot prompt template built with examples", "categories": "Few Shot Prompt,ChatOpenAI,LLM Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/AutoGPT.json b/packages/server/marketplaces/chatflows/AutoGPT.json index e03a7408..c0ed0807 100644 --- a/packages/server/marketplaces/chatflows/AutoGPT.json +++ b/packages/server/marketplaces/chatflows/AutoGPT.json @@ -1,6 +1,7 @@ { "description": "Use AutoGPT - Autonomous agent with chain of thoughts for self-guided task completion", "categories": "AutoGPT,SERP Tool,File Read/Write,ChatOpenAI,Pinecone,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/BabyAGI.json b/packages/server/marketplaces/chatflows/BabyAGI.json index 52198b90..14976ad3 100644 --- a/packages/server/marketplaces/chatflows/BabyAGI.json +++ b/packages/server/marketplaces/chatflows/BabyAGI.json @@ -1,6 +1,7 @@ { "description": "Use BabyAGI to create tasks and reprioritize for a given objective", "categories": "BabyAGI,ChatOpenAI,Pinecone,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/CSV Agent.json b/packages/server/marketplaces/chatflows/CSV Agent.json index d23e93ad..3439625b 100644 --- a/packages/server/marketplaces/chatflows/CSV Agent.json +++ b/packages/server/marketplaces/chatflows/CSV Agent.json @@ -1,6 +1,7 @@ { "description": "Analyse and summarize CSV data", "categories": "CSV Agent,ChatOpenAI,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Chat with a Podcast.json b/packages/server/marketplaces/chatflows/Chat with a Podcast.json index c1f216e6..c87b3f2c 100644 --- a/packages/server/marketplaces/chatflows/Chat with a Podcast.json +++ b/packages/server/marketplaces/chatflows/Chat with a Podcast.json @@ -1,6 +1,7 @@ { "description": "Engage with data sources such as YouTube Transcripts, Google, and more through intelligent Q&A interactions", "categories": "Memory Vector Store,SearchAPI,ChatOpenAI,Conversational Retrieval QA Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/ChatGPTPlugin.json b/packages/server/marketplaces/chatflows/ChatGPTPlugin.json index d9390f57..3777b637 100644 --- a/packages/server/marketplaces/chatflows/ChatGPTPlugin.json +++ b/packages/server/marketplaces/chatflows/ChatGPTPlugin.json @@ -1,6 +1,7 @@ { "description": "Use ChatGPT Plugins within LangChain abstractions with GET and POST Tools", "categories": "ChatGPT Plugin,HTTP GET/POST,ChatOpenAI,MRKL Agent,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Claude LLM.json b/packages/server/marketplaces/chatflows/Claude LLM.json index 39d1e6c6..a0fd4e1a 100644 --- a/packages/server/marketplaces/chatflows/Claude LLM.json +++ b/packages/server/marketplaces/chatflows/Claude LLM.json @@ -1,6 +1,7 @@ { "description": "Use Anthropic Claude with 200k context window to ingest whole document for QnA", "categories": "Buffer Memory,Prompt Template,Conversation Chain,ChatAnthropic,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Context Chat Engine.json b/packages/server/marketplaces/chatflows/Context Chat Engine.json index 26efcb1c..15d3dade 100644 --- a/packages/server/marketplaces/chatflows/Context Chat Engine.json +++ b/packages/server/marketplaces/chatflows/Context Chat Engine.json @@ -1,6 +1,7 @@ { "description": "Answer question based on retrieved documents (context) with built-in memory to remember conversation using LlamaIndex", "categories": "Text File,Prompt Template,ChatOpenAI,Conversation Chain,Pinecone,LlamaIndex,Redis", + "framework": "LlamaIndex", "badge": "NEW", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Conversational Agent.json b/packages/server/marketplaces/chatflows/Conversational Agent.json index 419e9a79..4cb736a0 100644 --- a/packages/server/marketplaces/chatflows/Conversational Agent.json +++ b/packages/server/marketplaces/chatflows/Conversational Agent.json @@ -1,6 +1,7 @@ { "description": "A conversational agent for a chat model which utilize chat specific prompts", "categories": "Calculator Tool,Buffer Memory,SerpAPI,ChatOpenAI,Conversational Agent,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json b/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json index 37dd3759..a4ec6b5b 100644 --- a/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json +++ b/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json @@ -2,6 +2,7 @@ "description": "Agent optimized for vector retrieval during conversation and answering questions based on previous dialogue.", "categories": "Retriever Tool,Buffer Memory,ChatOpenAI,Conversational Retrieval Agent, Pinecone,Langchain", "badge": "POPULAR", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json index 61bc6e39..e360141d 100644 --- a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json +++ b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json @@ -2,6 +2,7 @@ "description": "Text file QnA using conversational retrieval QA chain", "categories": "TextFile,ChatOpenAI,Conversational Retrieval QA Chain,Pinecone,Langchain", "badge": "POPULAR", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Flowise Docs QnA.json b/packages/server/marketplaces/chatflows/Flowise Docs QnA.json index 6eca2944..31d65c48 100644 --- a/packages/server/marketplaces/chatflows/Flowise Docs QnA.json +++ b/packages/server/marketplaces/chatflows/Flowise Docs QnA.json @@ -2,6 +2,7 @@ "description": "Flowise Docs Github QnA using conversational retrieval QA chain", "categories": "Memory Vector Store,Github Loader,ChatOpenAI,Conversational Retrieval QA Chain,Langchain", "badge": "POPULAR", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/HuggingFace LLM Chain.json b/packages/server/marketplaces/chatflows/HuggingFace LLM Chain.json index f87bf65a..6e7154b7 100644 --- a/packages/server/marketplaces/chatflows/HuggingFace LLM Chain.json +++ b/packages/server/marketplaces/chatflows/HuggingFace LLM Chain.json @@ -1,6 +1,7 @@ { "description": "Simple LLM Chain using HuggingFace Inference API on falcon-7b-instruct model", "categories": "HuggingFace,LLM Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/IfElse.json b/packages/server/marketplaces/chatflows/IfElse.json index c856eac0..e3b66f44 100644 --- a/packages/server/marketplaces/chatflows/IfElse.json +++ b/packages/server/marketplaces/chatflows/IfElse.json @@ -1,6 +1,7 @@ { "description": "Split flows based on if else condition", "categories": "IfElse Function,ChatOpenAI,OpenAI,LLM Chain,Langchain", + "framework": "Langchain", "badge": "new", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Image Generation.json b/packages/server/marketplaces/chatflows/Image Generation.json index 8b07ae86..46cb79ec 100644 --- a/packages/server/marketplaces/chatflows/Image Generation.json +++ b/packages/server/marketplaces/chatflows/Image Generation.json @@ -2,6 +2,7 @@ "description": "Generate image using Replicate Stability text-to-image generative AI model", "badge": "NEW", "categories": "Replicate,ChatOpenAI,LLM Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Input Moderation.json b/packages/server/marketplaces/chatflows/Input Moderation.json index cd7f2cb5..bd449777 100644 --- a/packages/server/marketplaces/chatflows/Input Moderation.json +++ b/packages/server/marketplaces/chatflows/Input Moderation.json @@ -2,6 +2,7 @@ "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", "badge": "NEW", "categories": "Moderation,ChatOpenAI,LLM Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/List Output Parser.json b/packages/server/marketplaces/chatflows/List Output Parser.json index a67fb72b..0eb269b4 100644 --- a/packages/server/marketplaces/chatflows/List Output Parser.json +++ b/packages/server/marketplaces/chatflows/List Output Parser.json @@ -2,6 +2,7 @@ "description": "Return response as a list (array) instead of a string/text", "badge": "NEW", "categories": "CSV Output Parser,ChatOpenAI,LLM Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Local QnA.json b/packages/server/marketplaces/chatflows/Local QnA.json index 11deae15..2637f259 100644 --- a/packages/server/marketplaces/chatflows/Local QnA.json +++ b/packages/server/marketplaces/chatflows/Local QnA.json @@ -2,6 +2,7 @@ "description": "QnA chain using Ollama local LLM, LocalAI embedding model, and Faiss local vector store", "badge": "POPULAR", "categories": "Text File,ChatOllama,Conversational Retrieval QA Chain,Faiss,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Long Term Memory.json b/packages/server/marketplaces/chatflows/Long Term Memory.json index ba2d2330..c5681d3d 100644 --- a/packages/server/marketplaces/chatflows/Long Term Memory.json +++ b/packages/server/marketplaces/chatflows/Long Term Memory.json @@ -1,6 +1,7 @@ { "description": "Use long term memory like Zep to differentiate conversations between users with sessionId", "categories": "ChatOpenAI,Conversational Retrieval QA Chain,Zep Memory,Qdrant,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Metadata Filter.json b/packages/server/marketplaces/chatflows/Metadata Filter.json index 13ca8745..38ad9211 100644 --- a/packages/server/marketplaces/chatflows/Metadata Filter.json +++ b/packages/server/marketplaces/chatflows/Metadata Filter.json @@ -2,6 +2,7 @@ "description": "Upsert multiple files with metadata and filter by it using conversational retrieval QA chain", "categories": "Text File,PDF File,ChatOpenAI,Conversational Retrieval QA Chain,Pinecone,Langchain", "badge": "POPULAR", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Multi Prompt Chain.json b/packages/server/marketplaces/chatflows/Multi Prompt Chain.json index 89935fcd..97cca308 100644 --- a/packages/server/marketplaces/chatflows/Multi Prompt Chain.json +++ b/packages/server/marketplaces/chatflows/Multi Prompt Chain.json @@ -1,6 +1,7 @@ { "description": "A chain that automatically picks an appropriate prompt from multiple prompts", "categories": "ChatOpenAI,Multi Prompt Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json b/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json index 63a02edb..6b8f2c33 100644 --- a/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json +++ b/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json @@ -1,6 +1,7 @@ { "description": "A chain that automatically picks an appropriate retriever from multiple different vector databases", "categories": "ChatOpenAI,Multi Retrieval QA Chain,Pinecone,Chroma,Supabase,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Multiple VectorDB.json b/packages/server/marketplaces/chatflows/Multiple VectorDB.json index 7ffc040a..e8141ad7 100644 --- a/packages/server/marketplaces/chatflows/Multiple VectorDB.json +++ b/packages/server/marketplaces/chatflows/Multiple VectorDB.json @@ -1,6 +1,7 @@ { "description": "Use the agent to choose between multiple different vector databases, with the ability to use other tools", "categories": "Buffer Memory,ChatOpenAI,Chain Tool,Retrieval QA Chain,Redis,Faiss,Conversational Agent,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/OpenAI Agent.json b/packages/server/marketplaces/chatflows/OpenAI Agent.json index c4cf11f2..6f35e595 100644 --- a/packages/server/marketplaces/chatflows/OpenAI Agent.json +++ b/packages/server/marketplaces/chatflows/OpenAI Agent.json @@ -1,6 +1,7 @@ { "description": "An agent that uses OpenAI's Function Calling functionality to pick the tool and args to call", "categories": "Buffer Memory,Custom Tool, SerpAPI,OpenAI Function,Calculator Tool,ChatOpenAI,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/OpenAI Assistant.json b/packages/server/marketplaces/chatflows/OpenAI Assistant.json index f1595fd6..73c01413 100644 --- a/packages/server/marketplaces/chatflows/OpenAI Assistant.json +++ b/packages/server/marketplaces/chatflows/OpenAI Assistant.json @@ -1,6 +1,7 @@ { "description": "OpenAI Assistant that has instructions and can leverage models, tools, and knowledge to respond to user queries", "categories": "Custom Tool, SerpAPI,OpenAI Assistant,Calculator Tool,Langchain", + "framework": "Langchain", "badge": "NEW", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json b/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json index 3b8507b9..d225e41a 100644 --- a/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json +++ b/packages/server/marketplaces/chatflows/Prompt Chaining with VectorStore.json @@ -2,6 +2,7 @@ "description": "Use chat history to rephrase user question, and answer the rephrased question using retrieved docs from vector store", "categories": "ChatOpenAI,LLM Chain,SingleStore,Langchain", "badge": "POPULAR", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Prompt Chaining.json b/packages/server/marketplaces/chatflows/Prompt Chaining.json index 3ed5f95c..42debac8 100644 --- a/packages/server/marketplaces/chatflows/Prompt Chaining.json +++ b/packages/server/marketplaces/chatflows/Prompt Chaining.json @@ -1,6 +1,7 @@ { "description": "Use output from a chain as prompt for another chain", "categories": "Custom Tool,OpenAI,LLM Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Query Engine.json b/packages/server/marketplaces/chatflows/Query Engine.json index 5697735b..b3a3c292 100644 --- a/packages/server/marketplaces/chatflows/Query Engine.json +++ b/packages/server/marketplaces/chatflows/Query Engine.json @@ -2,6 +2,7 @@ "description": "Stateless query engine designed to answer question over your data using LlamaIndex", "categories": "ChatAnthropic,Compact and Refine,Pinecone,LlamaIndex", "badge": "NEW", + "framework": "LlamaIndex", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/ReAct Agent.json b/packages/server/marketplaces/chatflows/ReAct Agent.json index 9831bcf6..5fd191fe 100644 --- a/packages/server/marketplaces/chatflows/ReAct Agent.json +++ b/packages/server/marketplaces/chatflows/ReAct Agent.json @@ -1,6 +1,7 @@ { "description": "An agent that uses ReAct logic to decide what action to take", "categories": "Calculator Tool,SerpAPI,ChatOpenAI,MRKL Agent,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Replicate LLM.json b/packages/server/marketplaces/chatflows/Replicate LLM.json index ef8ab7f2..578983cf 100644 --- a/packages/server/marketplaces/chatflows/Replicate LLM.json +++ b/packages/server/marketplaces/chatflows/Replicate LLM.json @@ -1,6 +1,7 @@ { "description": "Use Replicate API that runs Llama 13b v2 model with LLMChain", "categories": "Replicate,LLM Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/SQL DB Chain.json b/packages/server/marketplaces/chatflows/SQL DB Chain.json index 3c143a33..ec9d465d 100644 --- a/packages/server/marketplaces/chatflows/SQL DB Chain.json +++ b/packages/server/marketplaces/chatflows/SQL DB Chain.json @@ -1,6 +1,7 @@ { "description": "Answer questions over a SQL database", "categories": "ChatOpenAI,Sql Database Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/SQL Prompt.json b/packages/server/marketplaces/chatflows/SQL Prompt.json index e79a95d0..8d2691c6 100644 --- a/packages/server/marketplaces/chatflows/SQL Prompt.json +++ b/packages/server/marketplaces/chatflows/SQL Prompt.json @@ -1,6 +1,7 @@ { "description": "Manually construct prompts to query a SQL database", "categories": "IfElse Function,Variable Set/Get,Custom JS Function,ChatOpenAI,LLM Chain,Langchain", + "framework": "Langchain", "badge": "new", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Simple Chat Engine.json b/packages/server/marketplaces/chatflows/Simple Chat Engine.json index 5510873d..fd17ded1 100644 --- a/packages/server/marketplaces/chatflows/Simple Chat Engine.json +++ b/packages/server/marketplaces/chatflows/Simple Chat Engine.json @@ -1,6 +1,7 @@ { "description": "Simple chat engine to handle back and forth conversations using LlamaIndex", "categories": "BufferMemory,AzureChatOpenAI,LlamaIndex", + "framework": "LlamaIndex", "badge": "NEW", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Simple Conversation Chain.json b/packages/server/marketplaces/chatflows/Simple Conversation Chain.json index e6c934ac..53cfeace 100644 --- a/packages/server/marketplaces/chatflows/Simple Conversation Chain.json +++ b/packages/server/marketplaces/chatflows/Simple Conversation Chain.json @@ -1,6 +1,7 @@ { "description": "Basic example of Conversation Chain with built-in memory - works exactly like ChatGPT", "categories": "Buffer Memory,ChatOpenAI,Conversation Chain,Langchain", + "framework": "Langchain", "badge": "POPULAR", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Simple LLM Chain.json b/packages/server/marketplaces/chatflows/Simple LLM Chain.json index 12298441..36a5a8d8 100644 --- a/packages/server/marketplaces/chatflows/Simple LLM Chain.json +++ b/packages/server/marketplaces/chatflows/Simple LLM Chain.json @@ -1,6 +1,7 @@ { "description": "Basic example of stateless (no memory) LLM Chain with a Prompt Template and LLM Model", "categories": "OpenAI,LLM Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Structured Output Parser.json b/packages/server/marketplaces/chatflows/Structured Output Parser.json index 20385a4b..9801a90f 100644 --- a/packages/server/marketplaces/chatflows/Structured Output Parser.json +++ b/packages/server/marketplaces/chatflows/Structured Output Parser.json @@ -1,6 +1,7 @@ { "description": "Return response as a specified JSON structure instead of a string/text", "categories": "Structured Output Parser,ChatOpenAI,LLM Chain,Langchain", + "framework": "Langchain", "badge": "NEW", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/SubQuestion Query Engine.json b/packages/server/marketplaces/chatflows/SubQuestion Query Engine.json index 2042625b..620712c4 100644 --- a/packages/server/marketplaces/chatflows/SubQuestion Query Engine.json +++ b/packages/server/marketplaces/chatflows/SubQuestion Query Engine.json @@ -1,6 +1,7 @@ { "description": "Breaks down query into sub questions for each relevant data source, then combine into final response", "categories": "Sub Question Query Engine,Sticky Note,QueryEngine Tool,Compact and Refine,ChatOpenAI,Pinecone,LlamaIndex", + "framework": "LlamaIndex", "badge": "NEW", "nodes": [ { diff --git a/packages/server/marketplaces/chatflows/Translator.json b/packages/server/marketplaces/chatflows/Translator.json index 118457ef..5c8a3cc5 100644 --- a/packages/server/marketplaces/chatflows/Translator.json +++ b/packages/server/marketplaces/chatflows/Translator.json @@ -1,6 +1,7 @@ { "description": "Language translation using LLM Chain with a Chat Prompt Template and Chat Model", "categories": "Chat Prompt Template,ChatOpenAI,LLM Chain,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/Vectara RAG Chain.json b/packages/server/marketplaces/chatflows/Vectara RAG Chain.json index 82e341c4..2ef1474a 100644 --- a/packages/server/marketplaces/chatflows/Vectara RAG Chain.json +++ b/packages/server/marketplaces/chatflows/Vectara RAG Chain.json @@ -1,6 +1,7 @@ { "description": "QA chain for Vectara", "categories": "Vectara QA Chain,Vectara,Langchain", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/WebBrowser.json b/packages/server/marketplaces/chatflows/WebBrowser.json index 75e3cf0a..232bd83e 100644 --- a/packages/server/marketplaces/chatflows/WebBrowser.json +++ b/packages/server/marketplaces/chatflows/WebBrowser.json @@ -1,6 +1,7 @@ { "description": "Conversational Agent with ability to visit a website and extract information", - "categories": "Buffer Memory,Web Browser,ChatOpenAI,Conversational Agent,Langchain", + "categories": "Buffer Memory,Web Browser,ChatOpenAI,Conversational Agent", + "framework": "Langchain", "nodes": [ { "width": 300, diff --git a/packages/server/marketplaces/chatflows/WebPage QnA.json b/packages/server/marketplaces/chatflows/WebPage QnA.json index 089d195d..50806161 100644 --- a/packages/server/marketplaces/chatflows/WebPage QnA.json +++ b/packages/server/marketplaces/chatflows/WebPage QnA.json @@ -1,6 +1,7 @@ { "description": "Scrape web pages for QnA with long term memory Motorhead and return source documents", "categories": "HtmlToMarkdown,Cheerio Web Scraper,ChatOpenAI,Redis,Pinecone,Langchain", + "framework": "Langchain", "badge": "POPULAR", "nodes": [ { diff --git a/packages/server/marketplaces/tools/Add Hubspot Contact.json b/packages/server/marketplaces/tools/Add Hubspot Contact.json index 584df4c3..f8715dcd 100644 --- a/packages/server/marketplaces/tools/Add Hubspot Contact.json +++ b/packages/server/marketplaces/tools/Add Hubspot Contact.json @@ -1,5 +1,6 @@ { "name": "add_contact_hubspot", + "framework": "Langchain", "description": "Add new contact to Hubspot", "color": "linear-gradient(rgb(85,198,123), rgb(0,230,99))", "iconSrc": "https://cdn.worldvectorlogo.com/logos/hubspot-1.svg", diff --git a/packages/server/marketplaces/tools/Create Airtable Record.json b/packages/server/marketplaces/tools/Create Airtable Record.json index c52c9199..5471b650 100644 --- a/packages/server/marketplaces/tools/Create Airtable Record.json +++ b/packages/server/marketplaces/tools/Create Airtable Record.json @@ -1,5 +1,6 @@ { "name": "add_airtable", + "framework": "Langchain", "description": "Add column1, column2 to Airtable", "color": "linear-gradient(rgb(125,71,222), rgb(128,102,23))", "iconSrc": "https://raw.githubusercontent.com/gilbarbara/logos/main/logos/airtable.svg", diff --git a/packages/server/marketplaces/tools/Get Current DateTime.json b/packages/server/marketplaces/tools/Get Current DateTime.json index b6860b30..b8279e33 100644 --- a/packages/server/marketplaces/tools/Get Current DateTime.json +++ b/packages/server/marketplaces/tools/Get Current DateTime.json @@ -1,5 +1,6 @@ { "name": "todays_date_time", + "framework": "Langchain", "description": "Useful to get todays day, date and time.", "color": "linear-gradient(rgb(117,118,129), rgb(230,10,250))", "iconSrc": "https://raw.githubusercontent.com/gilbarbara/logos/main/logos/javascript.svg", diff --git a/packages/server/marketplaces/tools/Get Stock Mover.json b/packages/server/marketplaces/tools/Get Stock Mover.json index 9108cc50..27d444b2 100644 --- a/packages/server/marketplaces/tools/Get Stock Mover.json +++ b/packages/server/marketplaces/tools/Get Stock Mover.json @@ -1,5 +1,6 @@ { "name": "get_stock_movers", + "framework": "Langchain", "description": "Get the stocks that has biggest price/volume moves, e.g. actives, gainers, losers, etc.", "iconSrc": "https://rapidapi.com/cdn/images?url=https://rapidapi-prod-apis.s3.amazonaws.com/9c/e743343bdd41edad39a3fdffd5b974/016c33699f51603ae6fe4420c439124b.png", "color": "linear-gradient(rgb(191,202,167), rgb(143,202,246))", diff --git a/packages/server/marketplaces/tools/Make Webhook.json b/packages/server/marketplaces/tools/Make Webhook.json index 24d00900..93e67a3f 100644 --- a/packages/server/marketplaces/tools/Make Webhook.json +++ b/packages/server/marketplaces/tools/Make Webhook.json @@ -1,5 +1,6 @@ { "name": "make_webhook", + "framework": "Langchain", "description": "Useful when you need to send message to Discord", "color": "linear-gradient(rgb(19,94,2), rgb(19,124,59))", "iconSrc": "https://github.com/FlowiseAI/Flowise/assets/26460777/517fdab2-8a6e-4781-b3c8-fb92cc78aa0b", diff --git a/packages/server/marketplaces/tools/Send Discord Message.json b/packages/server/marketplaces/tools/Send Discord Message.json index bbfaaa90..2d7adcac 100644 --- a/packages/server/marketplaces/tools/Send Discord Message.json +++ b/packages/server/marketplaces/tools/Send Discord Message.json @@ -1,5 +1,6 @@ { "name": "send_message_to_discord_channel", + "framework": "Langchain", "description": "Send message to Discord channel", "color": "linear-gradient(rgb(155,190,84), rgb(176,69,245))", "iconSrc": "https://raw.githubusercontent.com/gilbarbara/logos/main/logos/discord-icon.svg", diff --git a/packages/server/marketplaces/tools/Send Slack Message.json b/packages/server/marketplaces/tools/Send Slack Message.json index f15d4050..5516b69a 100644 --- a/packages/server/marketplaces/tools/Send Slack Message.json +++ b/packages/server/marketplaces/tools/Send Slack Message.json @@ -1,5 +1,6 @@ { "name": "send_message_to_slack_channel", + "framework": "Langchain", "description": "Send message to Slack channel", "color": "linear-gradient(rgb(155,190,84), rgb(176,69,245))", "iconSrc": "https://raw.githubusercontent.com/gilbarbara/logos/main/logos/slack-icon.svg", diff --git a/packages/server/marketplaces/tools/Send Teams Message.json b/packages/server/marketplaces/tools/Send Teams Message.json index 1af8111b..8ec32abd 100644 --- a/packages/server/marketplaces/tools/Send Teams Message.json +++ b/packages/server/marketplaces/tools/Send Teams Message.json @@ -1,5 +1,6 @@ { "name": "send_message_to_teams_channel", + "framework": "Langchain", "description": "Send message to Teams channel", "color": "linear-gradient(rgb(155,190,84), rgb(176,69,245))", "iconSrc": "https://raw.githubusercontent.com/gilbarbara/logos/main/logos/microsoft-teams.svg", diff --git a/packages/server/marketplaces/tools/SendGrid Email.json b/packages/server/marketplaces/tools/SendGrid Email.json index 8a6bf993..b454f2c5 100644 --- a/packages/server/marketplaces/tools/SendGrid Email.json +++ b/packages/server/marketplaces/tools/SendGrid Email.json @@ -1,5 +1,6 @@ { "name": "sendgrid_email", + "framework": "Langchain", "description": "Send email using SendGrid", "color": "linear-gradient(rgb(230,108,70), rgb(222,4,98))", "iconSrc": "https://raw.githubusercontent.com/gilbarbara/logos/main/logos/sendgrid-icon.svg", diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index a65fabf3..86595dc1 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1237,6 +1237,7 @@ export class App { templateName: file.split('.json')[0], flowData: fileData.toString(), badge: fileDataObj?.badge, + framework: fileDataObj?.framework, categories: fileDataObj?.categories, type: 'Chatflow', description: fileDataObj?.description || '' @@ -1254,6 +1255,8 @@ export class App { ...fileDataObj, id: index, type: 'Tool', + framework: fileDataObj?.framework, + badge: fileDataObj?.badge, categories: '', templateName: file.split('.json')[0] } diff --git a/packages/ui/src/ui-component/table/MarketplaceTable.js b/packages/ui/src/ui-component/table/MarketplaceTable.js index 714f10b0..4fe4aee6 100644 --- a/packages/ui/src/ui-component/table/MarketplaceTable.js +++ b/packages/ui/src/ui-component/table/MarketplaceTable.js @@ -10,6 +10,8 @@ import TableRow from '@mui/material/TableRow' import Paper from '@mui/material/Paper' import Chip from '@mui/material/Chip' import { Button, Typography } from '@mui/material' +import langchainPNG from 'assets/images/langchain.png' +import llamaIndexPNG from 'assets/images/llamaindex.png' const StyledTableCell = styled(TableCell)(({ theme }) => ({ [`&.${tableCellClasses.head}`]: { @@ -31,7 +33,7 @@ const StyledTableRow = styled(TableRow)(({ theme }) => ({ } })) -export const MarketplaceTable = ({ data, images, filterFunction, filterByBadge, filterByType }) => { +export const MarketplaceTable = ({ data, filterFunction, filterByBadge, filterByType, filterByFramework }) => { const navigate = useNavigate() const openTemplate = (selectedTemplate) => { if (selectedTemplate.flowData) { @@ -61,10 +63,13 @@ export const MarketplaceTable = ({ data, images, filterFunction, filterByBadge, + + {''} + Name - + Type @@ -83,9 +88,20 @@ export const MarketplaceTable = ({ data, images, filterFunction, filterByBadge, .filter(filterByBadge) .filter(filterByType) .filter(filterFunction) + .filter(filterByFramework) .map((row, index) => ( + + {row.framework === 'Langchain' && ( + langchain + )} + {row.framework === 'LlamaIndex' && ( + llamaIndex + )} + + + @@ -94,15 +110,15 @@ export const MarketplaceTable = ({ data, images, filterFunction, filterByBadge, - + {row.type} - + {row.description || ''} - +
- + {row.badge && row.badge @@ -152,8 +168,8 @@ export const MarketplaceTable = ({ data, images, filterFunction, filterByBadge, MarketplaceTable.propTypes = { data: PropTypes.array, - images: PropTypes.object, filterFunction: PropTypes.func, filterByBadge: PropTypes.func, - filterByType: PropTypes.func + filterByType: PropTypes.func, + filterByFramework: PropTypes.func } diff --git a/packages/ui/src/views/marketplaces/index.js b/packages/ui/src/views/marketplaces/index.js index f2055da6..68258a47 100644 --- a/packages/ui/src/views/marketplaces/index.js +++ b/packages/ui/src/views/marketplaces/index.js @@ -19,10 +19,11 @@ import { Select, OutlinedInput, Checkbox, - ListItemText + ListItemText, + Button } from '@mui/material' import { useTheme } from '@mui/material/styles' -import { IconLayoutGrid, IconList, IconSearch } from '@tabler/icons' +import { IconChevronsDown, IconChevronsUp, IconLayoutGrid, IconList, IconSearch } from '@tabler/icons' // project imports import MainCard from 'ui-component/cards/MainCard' @@ -69,6 +70,7 @@ const ITEM_HEIGHT = 48 const ITEM_PADDING_TOP = 8 const badges = ['POPULAR', 'NEW'] const types = ['Chatflow', 'Tool'] +const framework = ['Langchain', 'LlamaIndex'] const MenuProps = { PaperProps: { style: { @@ -98,7 +100,8 @@ const Marketplace = () => { const [badgeFilter, setBadgeFilter] = useState([]) const [typeFilter, setTypeFilter] = useState([]) - + const [frameworkFilter, setFrameworkFilter] = useState([]) + const [open, setOpen] = useState(false) const handleBadgeFilterChange = (event) => { const { target: { value } @@ -117,6 +120,15 @@ const Marketplace = () => { typeof value === 'string' ? value.split(',') : value ) } + const handleFrameworkFilterChange = (event) => { + const { + target: { value } + } = event + setFrameworkFilter( + // On autofill we get a stringified value. + typeof value === 'string' ? value.split(',') : value + ) + } const handleViewChange = (event, nextView) => { localStorage.setItem('mpDisplayStyle', nextView) @@ -143,6 +155,10 @@ const Marketplace = () => { return typeFilter.length > 0 ? typeFilter.includes(data.type) : true } + function filterByFramework(data) { + return frameworkFilter.length > 0 ? frameworkFilter.includes(data.framework) : true + } + const onUseTemplate = (selectedTool) => { const dialogProp = { title: 'Add New Tool', @@ -224,9 +240,11 @@ const Marketplace = () => {

Marketplace

{ ) }} /> - - - Type - - - - - - Tag - - - + @@ -313,6 +293,93 @@ const Marketplace = () => { + {open && ( + + + + + Tag + + + + + + Type + + + + + + Framework + + + + + + )} + {!isLoading && (!view || view === 'card') && getAllTemplatesMarketplacesApi.data && ( <> @@ -320,6 +387,7 @@ const Marketplace = () => { .filter(filterByBadge) .filter(filterByType) .filter(filterFlows) + .filter(filterByFramework) .map((data, index) => ( {data.badge && ( @@ -351,10 +419,10 @@ const Marketplace = () => { )} From 8e80b582bbb5a91f7c6383af23ee4343b56d5a9e Mon Sep 17 00:00:00 2001 From: chungyau97 Date: Tue, 6 Feb 2024 12:02:35 +0800 Subject: [PATCH 21/45] update return msg to correct url --- packages/server/src/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index dbb5717d..4d06f297 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -184,7 +184,7 @@ export class App { this.app.get('/api/v1/ip', (request, response) => { response.send({ ip: request.ip, - msg: 'See the returned IP address in the response. If it matches your current IP address ( which you can get by going to http://ip.nfriedly.com/ or https://api.ipify.org/ ), then the number of proxies is correct and the rate limiter should now work correctly. If not, increase the number of proxies by 1 until the IP address matches your own. Visit https://docs.flowiseai.com/deployment#rate-limit-setup-guide for more information.' + msg: 'Check the returned IP address in the response. If it matches your current IP address ( which you can get by going to http://ip.nfriedly.com/ or https://api.ipify.org/ ), then the number of proxies is correct and the rate limiter should now work correctly. If not, increase the number of proxies by 1 until the IP address matches your own. Visit https://docs.flowiseai.com/configuration/rate-limit#rate-limit-setup-guide for more information.' }) }) From 7486d33237c46bc0112fb7b3fb3e48f6cd1c89bf Mon Sep 17 00:00:00 2001 From: Ilango Date: Tue, 6 Feb 2024 10:36:27 +0530 Subject: [PATCH 22/45] Fix issue with relativeLinksMethod and limit not applying to manage links --- packages/server/src/index.ts | 3 ++- packages/ui/src/api/scraper.js | 6 +++--- .../src/ui-component/dialog/ManageScrapedLinksDialog.js | 2 +- packages/ui/src/views/canvas/NodeInputHandler.js | 8 ++++++-- 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index dbb5717d..7ceba556 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1148,8 +1148,9 @@ export class App { this.app.get('/api/v1/fetch-links', async (req: Request, res: Response) => { const url = decodeURIComponent(req.query.url as string) const relativeLinksMethod = req.query.relativeLinksMethod as string + const limit = parseInt(req.query.limit as string) if (process.env.DEBUG === 'true') console.info(`Start ${relativeLinksMethod}`) - const links: string[] = relativeLinksMethod === 'webCrawl' ? await webCrawl(url, 0) : await xmlScrape(url, 0) + const links: string[] = relativeLinksMethod === 'webCrawl' ? await webCrawl(url, limit) : await xmlScrape(url, limit) res.json({ status: 'OK', links }) }) diff --git a/packages/ui/src/api/scraper.js b/packages/ui/src/api/scraper.js index 382a9263..89333156 100644 --- a/packages/ui/src/api/scraper.js +++ b/packages/ui/src/api/scraper.js @@ -1,8 +1,8 @@ import client from './client' -const fetchAllLinks = (url, relativeLinksMethod) => - client.get(`/fetch-links?url=${encodeURIComponent(url)}&relativeLinksMethod=${relativeLinksMethod}`) +const fetchLinks = (url, relativeLinksMethod, relativeLinksLimit) => + client.get(`/fetch-links?url=${encodeURIComponent(url)}&relativeLinksMethod=${relativeLinksMethod}&limit=${relativeLinksLimit}`) export default { - fetchAllLinks + fetchLinks } diff --git a/packages/ui/src/ui-component/dialog/ManageScrapedLinksDialog.js b/packages/ui/src/ui-component/dialog/ManageScrapedLinksDialog.js index a707d82e..9a846ce9 100644 --- a/packages/ui/src/ui-component/dialog/ManageScrapedLinksDialog.js +++ b/packages/ui/src/ui-component/dialog/ManageScrapedLinksDialog.js @@ -53,7 +53,7 @@ const ManageScrapedLinksDialog = ({ show, dialogProps, onCancel, onSave }) => { const handleFetchLinks = async () => { setLoading(true) - const fetchLinksResp = await scraperApi.fetchAllLinks(url, 'webCrawl') + const fetchLinksResp = await scraperApi.fetchLinks(url, dialogProps.relativeLinksMethod, dialogProps.limit) if (fetchLinksResp.data) { setSelectedLinks(fetchLinksResp.data.links) } diff --git a/packages/ui/src/views/canvas/NodeInputHandler.js b/packages/ui/src/views/canvas/NodeInputHandler.js index bc877c9f..560fb34e 100644 --- a/packages/ui/src/views/canvas/NodeInputHandler.js +++ b/packages/ui/src/views/canvas/NodeInputHandler.js @@ -91,9 +91,11 @@ const NodeInputHandler = ({ inputAnchor, inputParam, data, disabled = false, isA } } - const onManageLinksDialogClicked = (url, selectedLinks) => { + const onManageLinksDialogClicked = (url, selectedLinks, relativeLinksMethod, limit) => { const dialogProps = { url, + relativeLinksMethod, + limit, selectedLinks, confirmButtonName: 'Save', cancelButtonName: 'Cancel' @@ -475,7 +477,9 @@ const NodeInputHandler = ({ inputAnchor, inputParam, data, disabled = false, isA onClick={() => onManageLinksDialogClicked( data.inputs[inputParam.name] ?? inputParam.default ?? '', - data.inputs.selectedLinks + data.inputs.selectedLinks, + data.inputs['relativeLinksMethod'] ?? 'webCrawl', + parseInt(data.inputs['limit']) ?? 0 ) } > From c2ae7e138cbf8a4355cafc109df5b7b5a0eb0a21 Mon Sep 17 00:00:00 2001 From: Ilango Date: Tue, 6 Feb 2024 14:40:19 +0530 Subject: [PATCH 23/45] Apply limit to selectedLinks even when relative links method is not specified --- packages/components/nodes/documentloaders/Cheerio/Cheerio.ts | 2 +- .../components/nodes/documentloaders/Playwright/Playwright.ts | 2 +- .../components/nodes/documentloaders/Puppeteer/Puppeteer.ts | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/components/nodes/documentloaders/Cheerio/Cheerio.ts b/packages/components/nodes/documentloaders/Cheerio/Cheerio.ts index 3eba0ece..6af1f9a9 100644 --- a/packages/components/nodes/documentloaders/Cheerio/Cheerio.ts +++ b/packages/components/nodes/documentloaders/Cheerio/Cheerio.ts @@ -143,7 +143,7 @@ class Cheerio_DocumentLoaders implements INode { } else if (selectedLinks && selectedLinks.length > 0) { if (process.env.DEBUG === 'true') options.logger.info(`pages: ${JSON.stringify(selectedLinks)}, length: ${selectedLinks.length}`) - for (const page of selectedLinks) { + for (const page of selectedLinks.slice(0, limit)) { docs.push(...(await cheerioLoader(page))) } } else { diff --git a/packages/components/nodes/documentloaders/Playwright/Playwright.ts b/packages/components/nodes/documentloaders/Playwright/Playwright.ts index 2de166ce..2ba60d0f 100644 --- a/packages/components/nodes/documentloaders/Playwright/Playwright.ts +++ b/packages/components/nodes/documentloaders/Playwright/Playwright.ts @@ -184,7 +184,7 @@ class Playwright_DocumentLoaders implements INode { } else if (selectedLinks && selectedLinks.length > 0) { if (process.env.DEBUG === 'true') options.logger.info(`pages: ${JSON.stringify(selectedLinks)}, length: ${selectedLinks.length}`) - for (const page of selectedLinks) { + for (const page of selectedLinks.slice(0, limit)) { docs.push(...(await playwrightLoader(page))) } } else { diff --git a/packages/components/nodes/documentloaders/Puppeteer/Puppeteer.ts b/packages/components/nodes/documentloaders/Puppeteer/Puppeteer.ts index 3d28f310..1f8c8f3f 100644 --- a/packages/components/nodes/documentloaders/Puppeteer/Puppeteer.ts +++ b/packages/components/nodes/documentloaders/Puppeteer/Puppeteer.ts @@ -185,7 +185,7 @@ class Puppeteer_DocumentLoaders implements INode { } else if (selectedLinks && selectedLinks.length > 0) { if (process.env.DEBUG === 'true') options.logger.info(`pages: ${JSON.stringify(selectedLinks)}, length: ${selectedLinks.length}`) - for (const page of selectedLinks) { + for (const page of selectedLinks.slice(0, limit)) { docs.push(...(await puppeteerLoader(page))) } } else { From 90e6a804e462dc9ea1db6fd598840830b142364b Mon Sep 17 00:00:00 2001 From: Kenny Vaneetvelde Date: Tue, 6 Feb 2024 13:02:30 +0100 Subject: [PATCH 24/45] Make the zod schema a main parameter instead of an additional param --- .../StructuredOutputParserAdvanced.ts | 5 +- .../Advanced Structured Output Parser.json | 137 +++++++++--------- 2 files changed, 70 insertions(+), 72 deletions(-) diff --git a/packages/components/nodes/outputparsers/StructuredOutputParserAdvanced/StructuredOutputParserAdvanced.ts b/packages/components/nodes/outputparsers/StructuredOutputParserAdvanced/StructuredOutputParserAdvanced.ts index b0fad136..e7fe8ea7 100644 --- a/packages/components/nodes/outputparsers/StructuredOutputParserAdvanced/StructuredOutputParserAdvanced.ts +++ b/packages/components/nodes/outputparsers/StructuredOutputParserAdvanced/StructuredOutputParserAdvanced.ts @@ -46,9 +46,8 @@ class AdvancedStructuredOutputParser implements INode { "Action", "Comedy", "Drama", "Fantasy", "Horror", "Mystery", "Romance", "Science Fiction", "Thriller", "Documentary" ]).array().max(2), // Array of genres, max of 2 from the defined enum - shortDescription: z.string().max(500) // Short description, max 150 characters -})`, - additionalParams: true + shortDescription: z.string().max(500) // Short description, max 500 characters +})` } ] } diff --git a/packages/server/marketplaces/chatflows/Advanced Structured Output Parser.json b/packages/server/marketplaces/chatflows/Advanced Structured Output Parser.json index a48f5b2a..3fd71988 100644 --- a/packages/server/marketplaces/chatflows/Advanced Structured Output Parser.json +++ b/packages/server/marketplaces/chatflows/Advanced Structured Output Parser.json @@ -169,67 +169,6 @@ }, "dragging": false }, - { - "width": 300, - "height": 329, - "id": "advancedStructuredOutputParser_0", - "position": { - "x": 494.20163170226266, - "y": 568.3420937517054 - }, - "type": "customNode", - "data": { - "id": "advancedStructuredOutputParser_0", - "label": "Advanced Structured Output Parser", - "version": 1, - "name": "advancedStructuredOutputParser", - "type": "AdvancedStructuredOutputParser", - "baseClasses": ["AdvancedStructuredOutputParser", "BaseLLMOutputParser", "Runnable"], - "category": "Output Parsers", - "description": "Parse the output of an LLM call into a given structure by providing a Zod schema.", - "inputParams": [ - { - "label": "Autofix", - "name": "autofixParser", - "type": "boolean", - "optional": true, - "description": "In the event that the first call fails, will make another call to the model to fix any errors.", - "id": "advancedStructuredOutputParser_0-input-autofixParser-boolean" - }, - { - "label": "Example JSON", - "name": "exampleJson", - "type": "string", - "description": "Zod schema for the output of the model", - "rows": 10, - "default": "z.object({\n title: z.string(), // Title of the movie as a string\n yearOfRelease: z.number().int(), // Release year as an integer number,\n genres: z.enum([\n \"Action\", \"Comedy\", \"Drama\", \"Fantasy\", \"Horror\",\n \"Mystery\", \"Romance\", \"Science Fiction\", \"Thriller\", \"Documentary\"\n ]).array().max(2), // Array of genres, max of 2 from the defined enum\n shortDescription: z.string().max(500) // Short description, max 150 characters\n})", - "additionalParams": true, - "id": "advancedStructuredOutputParser_0-input-exampleJson-string" - } - ], - "inputAnchors": [], - "inputs": { - "autofixParser": true, - "exampleJson": "z.object({\n title: z.string(), // Title of the movie as a string\n yearOfRelease: z.number().int(), // Release year as an integer number,\n genres: z.enum([\n \"Action\", \"Comedy\", \"Drama\", \"Fantasy\", \"Horror\",\n \"Mystery\", \"Romance\", \"Science Fiction\", \"Thriller\", \"Documentary\"\n ]).array().max(2), // Array of genres, max of 2 from the defined enum\n shortDescription: z.string().max(500) // Short description, max 150 characters\n})" - }, - "outputAnchors": [ - { - "id": "advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable", - "name": "advancedStructuredOutputParser", - "label": "AdvancedStructuredOutputParser", - "type": "AdvancedStructuredOutputParser | BaseLLMOutputParser | Runnable" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 494.20163170226266, - "y": 568.3420937517054 - }, - "dragging": false - }, { "width": 300, "height": 576, @@ -431,6 +370,66 @@ "y": -355.71028569475095 }, "dragging": false + }, + { + "width": 300, + "height": 454, + "id": "advancedStructuredOutputParser_0", + "position": { + "x": 489.3637511211284, + "y": 580.0628053662244 + }, + "type": "customNode", + "data": { + "id": "advancedStructuredOutputParser_0", + "label": "Advanced Structured Output Parser", + "version": 1, + "name": "advancedStructuredOutputParser", + "type": "AdvancedStructuredOutputParser", + "baseClasses": ["AdvancedStructuredOutputParser", "BaseLLMOutputParser", "Runnable"], + "category": "Output Parsers", + "description": "Parse the output of an LLM call into a given structure by providing a Zod schema.", + "inputParams": [ + { + "label": "Autofix", + "name": "autofixParser", + "type": "boolean", + "optional": true, + "description": "In the event that the first call fails, will make another call to the model to fix any errors.", + "id": "advancedStructuredOutputParser_0-input-autofixParser-boolean" + }, + { + "label": "Example JSON", + "name": "exampleJson", + "type": "string", + "description": "Zod schema for the output of the model", + "rows": 10, + "default": "z.object({\n title: z.string(), // Title of the movie as a string\n yearOfRelease: z.number().int(), // Release year as an integer number,\n genres: z.enum([\n \"Action\", \"Comedy\", \"Drama\", \"Fantasy\", \"Horror\",\n \"Mystery\", \"Romance\", \"Science Fiction\", \"Thriller\", \"Documentary\"\n ]).array().max(2), // Array of genres, max of 2 from the defined enum\n shortDescription: z.string().max(500) // Short description, max 500 characters\n})", + "id": "advancedStructuredOutputParser_0-input-exampleJson-string" + } + ], + "inputAnchors": [], + "inputs": { + "autofixParser": "", + "exampleJson": "z.object({\n title: z.string(), // Title of the movie as a string\n yearOfRelease: z.number().int(), // Release year as an integer number,\n genres: z.enum([\n \"Action\", \"Comedy\", \"Drama\", \"Fantasy\", \"Horror\",\n \"Mystery\", \"Romance\", \"Science Fiction\", \"Thriller\", \"Documentary\"\n ]).array().max(2), // Array of genres, max of 2 from the defined enum\n shortDescription: z.string().max(500) // Short description, max 500 characters\n})" + }, + "outputAnchors": [ + { + "id": "advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable", + "name": "advancedStructuredOutputParser", + "label": "AdvancedStructuredOutputParser", + "type": "AdvancedStructuredOutputParser | BaseLLMOutputParser | Runnable" + } + ], + "outputs": {}, + "selected": false + }, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 489.3637511211284, + "y": 580.0628053662244 + } } ], "edges": [ @@ -445,14 +444,6 @@ "label": "" } }, - { - "source": "advancedStructuredOutputParser_0", - "sourceHandle": "advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable", - "target": "llmChain_0", - "targetHandle": "llmChain_0-input-outputParser-BaseLLMOutputParser", - "type": "buttonedge", - "id": "advancedStructuredOutputParser_0-advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable-llmChain_0-llmChain_0-input-outputParser-BaseLLMOutputParser" - }, { "source": "chatOpenAI_0", "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", @@ -460,6 +451,14 @@ "targetHandle": "llmChain_0-input-model-BaseLanguageModel", "type": "buttonedge", "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-llmChain_0-llmChain_0-input-model-BaseLanguageModel" + }, + { + "source": "advancedStructuredOutputParser_0", + "sourceHandle": "advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable", + "target": "llmChain_0", + "targetHandle": "llmChain_0-input-outputParser-BaseLLMOutputParser", + "type": "buttonedge", + "id": "advancedStructuredOutputParser_0-advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable-llmChain_0-llmChain_0-input-outputParser-BaseLLMOutputParser" } ] } From 2bb2a7588a0172a937bde34aefce5173e24f34a7 Mon Sep 17 00:00:00 2001 From: Ilyes Tascou Date: Tue, 6 Feb 2024 14:25:40 +0100 Subject: [PATCH 25/45] add recursive option for folder-loader --- .../nodes/documentloaders/Folder/Folder.ts | 96 +++++++++++-------- 1 file changed, 54 insertions(+), 42 deletions(-) diff --git a/packages/components/nodes/documentloaders/Folder/Folder.ts b/packages/components/nodes/documentloaders/Folder/Folder.ts index f8346e3c..ab770562 100644 --- a/packages/components/nodes/documentloaders/Folder/Folder.ts +++ b/packages/components/nodes/documentloaders/Folder/Folder.ts @@ -34,6 +34,12 @@ class Folder_DocumentLoaders implements INode { type: 'string', placeholder: '' }, + { + label: 'Recursive', + name: 'recursive', + type: 'boolean', + additionalParams: false + }, { label: 'Text Splitter', name: 'textSplitter', @@ -54,49 +60,55 @@ class Folder_DocumentLoaders implements INode { const textSplitter = nodeData.inputs?.textSplitter as TextSplitter const folderPath = nodeData.inputs?.folderPath as string const metadata = nodeData.inputs?.metadata + const recursive = nodeData.inputs?.recursive as boolean - const loader = new DirectoryLoader(folderPath, { - '.json': (path) => new JSONLoader(path), - '.txt': (path) => new TextLoader(path), - '.csv': (path) => new CSVLoader(path), - '.docx': (path) => new DocxLoader(path), - // @ts-ignore - '.pdf': (path) => new PDFLoader(path, { pdfjs: () => import('pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js') }), - '.aspx': (path) => new TextLoader(path), - '.asp': (path) => new TextLoader(path), - '.cpp': (path) => new TextLoader(path), // C++ - '.c': (path) => new TextLoader(path), - '.cs': (path) => new TextLoader(path), - '.css': (path) => new TextLoader(path), - '.go': (path) => new TextLoader(path), // Go - '.h': (path) => new TextLoader(path), // C++ Header files - '.kt': (path) => new TextLoader(path), // Kotlin - '.java': (path) => new TextLoader(path), // Java - '.js': (path) => new TextLoader(path), // JavaScript - '.less': (path) => new TextLoader(path), // Less files - '.ts': (path) => new TextLoader(path), // TypeScript - '.php': (path) => new TextLoader(path), // PHP - '.proto': (path) => new TextLoader(path), // Protocol Buffers - '.python': (path) => new TextLoader(path), // Python - '.py': (path) => new TextLoader(path), // Python - '.rst': (path) => new TextLoader(path), // reStructuredText - '.ruby': (path) => new TextLoader(path), // Ruby - '.rb': (path) => new TextLoader(path), // Ruby - '.rs': (path) => new TextLoader(path), // Rust - '.scala': (path) => new TextLoader(path), // Scala - '.sc': (path) => new TextLoader(path), // Scala - '.scss': (path) => new TextLoader(path), // Sass - '.sol': (path) => new TextLoader(path), // Solidity - '.sql': (path) => new TextLoader(path), //SQL - '.swift': (path) => new TextLoader(path), // Swift - '.markdown': (path) => new TextLoader(path), // Markdown - '.md': (path) => new TextLoader(path), // Markdown - '.tex': (path) => new TextLoader(path), // LaTeX - '.ltx': (path) => new TextLoader(path), // LaTeX - '.html': (path) => new TextLoader(path), // HTML - '.vb': (path) => new TextLoader(path), // Visual Basic - '.xml': (path) => new TextLoader(path) // XML - }) + console.log('Recursive: ', recursive) + const loader = new DirectoryLoader( + folderPath, + { + '.json': (path) => new JSONLoader(path), + '.txt': (path) => new TextLoader(path), + '.csv': (path) => new CSVLoader(path), + '.docx': (path) => new DocxLoader(path), + // @ts-ignore + '.pdf': (path) => new PDFLoader(path, { pdfjs: () => import('pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js') }), + '.aspx': (path) => new TextLoader(path), + '.asp': (path) => new TextLoader(path), + '.cpp': (path) => new TextLoader(path), // C++ + '.c': (path) => new TextLoader(path), + '.cs': (path) => new TextLoader(path), + '.css': (path) => new TextLoader(path), + '.go': (path) => new TextLoader(path), // Go + '.h': (path) => new TextLoader(path), // C++ Header files + '.kt': (path) => new TextLoader(path), // Kotlin + '.java': (path) => new TextLoader(path), // Java + '.js': (path) => new TextLoader(path), // JavaScript + '.less': (path) => new TextLoader(path), // Less files + '.ts': (path) => new TextLoader(path), // TypeScript + '.php': (path) => new TextLoader(path), // PHP + '.proto': (path) => new TextLoader(path), // Protocol Buffers + '.python': (path) => new TextLoader(path), // Python + '.py': (path) => new TextLoader(path), // Python + '.rst': (path) => new TextLoader(path), // reStructuredText + '.ruby': (path) => new TextLoader(path), // Ruby + '.rb': (path) => new TextLoader(path), // Ruby + '.rs': (path) => new TextLoader(path), // Rust + '.scala': (path) => new TextLoader(path), // Scala + '.sc': (path) => new TextLoader(path), // Scala + '.scss': (path) => new TextLoader(path), // Sass + '.sol': (path) => new TextLoader(path), // Solidity + '.sql': (path) => new TextLoader(path), //SQL + '.swift': (path) => new TextLoader(path), // Swift + '.markdown': (path) => new TextLoader(path), // Markdown + '.md': (path) => new TextLoader(path), // Markdown + '.tex': (path) => new TextLoader(path), // LaTeX + '.ltx': (path) => new TextLoader(path), // LaTeX + '.html': (path) => new TextLoader(path), // HTML + '.vb': (path) => new TextLoader(path), // Visual Basic + '.xml': (path) => new TextLoader(path) // XML + }, + recursive + ) let docs = [] if (textSplitter) { From 19fb13baf05642a09e10a57ed302bca8e339e2dd Mon Sep 17 00:00:00 2001 From: Ilyes Tascou Date: Tue, 6 Feb 2024 14:36:32 +0100 Subject: [PATCH 26/45] fix for linting --- packages/components/nodes/documentloaders/Folder/Folder.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/components/nodes/documentloaders/Folder/Folder.ts b/packages/components/nodes/documentloaders/Folder/Folder.ts index ab770562..fb3db8e8 100644 --- a/packages/components/nodes/documentloaders/Folder/Folder.ts +++ b/packages/components/nodes/documentloaders/Folder/Folder.ts @@ -62,7 +62,6 @@ class Folder_DocumentLoaders implements INode { const metadata = nodeData.inputs?.metadata const recursive = nodeData.inputs?.recursive as boolean - console.log('Recursive: ', recursive) const loader = new DirectoryLoader( folderPath, { From f14039736dd5147418e2f28425f3ffc64347daa2 Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Tue, 6 Feb 2024 11:29:50 -0500 Subject: [PATCH 27/45] Marketplace : removing icon column --- .../ui-component/table/MarketplaceTable.js | 23 ++++--------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/packages/ui/src/ui-component/table/MarketplaceTable.js b/packages/ui/src/ui-component/table/MarketplaceTable.js index 4fe4aee6..ebbc8537 100644 --- a/packages/ui/src/ui-component/table/MarketplaceTable.js +++ b/packages/ui/src/ui-component/table/MarketplaceTable.js @@ -10,8 +10,6 @@ import TableRow from '@mui/material/TableRow' import Paper from '@mui/material/Paper' import Chip from '@mui/material/Chip' import { Button, Typography } from '@mui/material' -import langchainPNG from 'assets/images/langchain.png' -import llamaIndexPNG from 'assets/images/llamaindex.png' const StyledTableCell = styled(TableCell)(({ theme }) => ({ [`&.${tableCellClasses.head}`]: { @@ -63,9 +61,6 @@ export const MarketplaceTable = ({ data, filterFunction, filterByBadge, filterBy
- - {''} - Name @@ -92,16 +87,6 @@ export const MarketplaceTable = ({ data, filterFunction, filterByBadge, filterBy .map((row, index) => ( - - {row.framework === 'Langchain' && ( - langchain - )} - {row.framework === 'LlamaIndex' && ( - llamaIndex - )} - - - @@ -110,15 +95,15 @@ export const MarketplaceTable = ({ data, filterFunction, filterByBadge, filterBy - + {row.type} - + {row.description || ''} - +
- + {row.badge && row.badge From 4be28c4050135b6fc2354e43efccf30d3787acbc Mon Sep 17 00:00:00 2001 From: chungyau97 Date: Wed, 7 Feb 2024 19:32:48 +0800 Subject: [PATCH 28/45] add finish log --- packages/server/src/index.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 7ceba556..bd44b739 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1151,6 +1151,7 @@ export class App { const limit = parseInt(req.query.limit as string) if (process.env.DEBUG === 'true') console.info(`Start ${relativeLinksMethod}`) const links: string[] = relativeLinksMethod === 'webCrawl' ? await webCrawl(url, limit) : await xmlScrape(url, limit) + if (process.env.DEBUG === 'true') console.info(`Finish ${relativeLinksMethod}`) res.json({ status: 'OK', links }) }) From 08c07802f5680c6de447ee1577dda498ba67699c Mon Sep 17 00:00:00 2001 From: vinodkiran Date: Wed, 7 Feb 2024 07:08:55 -0500 Subject: [PATCH 29/45] Fix for Tool Opening. --- .../ui-component/table/MarketplaceTable.js | 22 ++++--------------- packages/ui/src/views/marketplaces/index.js | 2 ++ 2 files changed, 6 insertions(+), 18 deletions(-) diff --git a/packages/ui/src/ui-component/table/MarketplaceTable.js b/packages/ui/src/ui-component/table/MarketplaceTable.js index ebbc8537..3b66409b 100644 --- a/packages/ui/src/ui-component/table/MarketplaceTable.js +++ b/packages/ui/src/ui-component/table/MarketplaceTable.js @@ -1,5 +1,4 @@ import PropTypes from 'prop-types' -import { useNavigate } from 'react-router-dom' import { styled } from '@mui/material/styles' import Table from '@mui/material/Table' import TableBody from '@mui/material/TableBody' @@ -31,8 +30,7 @@ const StyledTableRow = styled(TableRow)(({ theme }) => ({ } })) -export const MarketplaceTable = ({ data, filterFunction, filterByBadge, filterByType, filterByFramework }) => { - const navigate = useNavigate() +export const MarketplaceTable = ({ data, filterFunction, filterByBadge, filterByType, filterByFramework, goToCanvas, goToTool }) => { const openTemplate = (selectedTemplate) => { if (selectedTemplate.flowData) { goToCanvas(selectedTemplate) @@ -41,20 +39,6 @@ export const MarketplaceTable = ({ data, filterFunction, filterByBadge, filterBy } } - const goToTool = (selectedTool) => { - const dialogProp = { - title: selectedTool.templateName, - type: 'TEMPLATE', - data: selectedTool - } - setToolDialogProps(dialogProp) - setShowToolDialog(true) - } - - const goToCanvas = (selectedChatflow) => { - navigate(`/marketplace/${selectedChatflow.id}`, { state: selectedChatflow }) - } - return ( <> @@ -156,5 +140,7 @@ MarketplaceTable.propTypes = { filterFunction: PropTypes.func, filterByBadge: PropTypes.func, filterByType: PropTypes.func, - filterByFramework: PropTypes.func + filterByFramework: PropTypes.func, + goToTool: PropTypes.func, + goToCanvas: PropTypes.func } diff --git a/packages/ui/src/views/marketplaces/index.js b/packages/ui/src/views/marketplaces/index.js index 68258a47..e5a65cb9 100644 --- a/packages/ui/src/views/marketplaces/index.js +++ b/packages/ui/src/views/marketplaces/index.js @@ -423,6 +423,8 @@ const Marketplace = () => { filterByType={filterByType} filterByBadge={filterByBadge} filterByFramework={filterByFramework} + goToTool={goToTool} + goToCanvas={goToCanvas} /> )} From 3f0f0e4d28ff0aad1a74720153cf8235b1067161 Mon Sep 17 00:00:00 2001 From: Jared Tracy Date: Wed, 7 Feb 2024 20:18:06 -0600 Subject: [PATCH 30/45] DynamoDB Chat Memory fix Fixes #1624 Please note comment in class BufferMemoryExtended for further discussion, if necessary --- .../components/nodes/memory/DynamoDb/DynamoDb.ts | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/packages/components/nodes/memory/DynamoDb/DynamoDb.ts b/packages/components/nodes/memory/DynamoDb/DynamoDb.ts index 91c1d369..864ff7c8 100644 --- a/packages/components/nodes/memory/DynamoDb/DynamoDb.ts +++ b/packages/components/nodes/memory/DynamoDb/DynamoDb.ts @@ -142,18 +142,26 @@ interface DynamoDBSerializedChatMessage { } class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { + tableName = '' sessionId = '' dynamodbClient: DynamoDBClient + dynamoKey = '' + partitionKey = '' constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) { super(fields) this.sessionId = fields.sessionId this.dynamodbClient = fields.dynamodbClient + + // These fields are coming in on chatHistory, but should they be on the dynamodbClient instead? + this.partitionKey = (fields?.chatHistory as unknown as { partitionKey: string }).partitionKey + this.dynamoKey = (fields?.chatHistory as unknown as { dynamoKey: string }).dynamoKey + this.tableName = (fields?.chatHistory as unknown as { tableName: string }).tableName } overrideDynamoKey(overrideSessionId = '') { - const existingDynamoKey = (this as any).dynamoKey - const partitionKey = (this as any).partitionKey + const existingDynamoKey = this.dynamoKey + const partitionKey = this.partitionKey let newDynamoKey: Record = {} @@ -210,7 +218,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { if (!this.dynamodbClient) return [] const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey - const tableName = (this as any).tableName + const tableName = this.tableName const messageAttributeName = (this as any).messageAttributeName const params: GetItemCommandInput = { From bc054d2fe1aa06de3fb44cc865fa39336041f103 Mon Sep 17 00:00:00 2001 From: Henry Date: Thu, 8 Feb 2024 11:55:30 +0800 Subject: [PATCH 31/45] add fix for override session id --- .../components/nodes/memory/MongoDBMemory/MongoDBMemory.ts | 6 +++--- .../nodes/memory/MotorheadMemory/MotorheadMemory.ts | 6 +++--- .../memory/RedisBackedChatMemory/RedisBackedChatMemory.ts | 6 +++--- .../UpstashRedisBackedChatMemory.ts | 6 +++--- packages/components/nodes/memory/ZepMemory/ZepMemory.ts | 6 +++--- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts b/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts index b7309dcd..e2ee9f44 100644 --- a/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts +++ b/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts @@ -154,7 +154,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise { if (!this.collection) return [] - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId const document = await this.collection.findOne({ sessionId: id }) const messages = document?.messages || [] const baseMessages = messages.map(mapStoredMessageToChatMessage) @@ -164,7 +164,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { if (!this.collection) return - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId const input = msgArray.find((msg) => msg.type === 'userMessage') const output = msgArray.find((msg) => msg.type === 'apiMessage') @@ -196,7 +196,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async clearChatMessages(overrideSessionId = ''): Promise { if (!this.collection) return - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId await this.collection.deleteOne({ sessionId: id }) await this.clear() } diff --git a/packages/components/nodes/memory/MotorheadMemory/MotorheadMemory.ts b/packages/components/nodes/memory/MotorheadMemory/MotorheadMemory.ts index 19506fc1..0b8f3800 100644 --- a/packages/components/nodes/memory/MotorheadMemory/MotorheadMemory.ts +++ b/packages/components/nodes/memory/MotorheadMemory/MotorheadMemory.ts @@ -141,7 +141,7 @@ class MotorheadMemoryExtended extends MotorheadMemory implements MemoryMethods { } async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise { - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId try { const resp = await this.caller.call(fetch, `${this.url}/sessions/${id}/memory`, { //@ts-ignore @@ -172,7 +172,7 @@ class MotorheadMemoryExtended extends MotorheadMemory implements MemoryMethods { } async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId const input = msgArray.find((msg) => msg.type === 'userMessage') const output = msgArray.find((msg) => msg.type === 'apiMessage') const inputValues = { [this.inputKey ?? 'input']: input?.text } @@ -182,7 +182,7 @@ class MotorheadMemoryExtended extends MotorheadMemory implements MemoryMethods { } async clearChatMessages(overrideSessionId = ''): Promise { - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId await this.clear(id) } } diff --git a/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts b/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts index c54e07b5..965b6760 100644 --- a/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts +++ b/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts @@ -189,7 +189,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise { if (!this.redisClient) return [] - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId const rawStoredMessages = await this.redisClient.lrange(id, this.windowSize ? this.windowSize * -1 : 0, -1) const orderedMessages = rawStoredMessages.reverse().map((message) => JSON.parse(message)) const baseMessages = orderedMessages.map(mapStoredMessageToChatMessage) @@ -199,7 +199,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { if (!this.redisClient) return - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId const input = msgArray.find((msg) => msg.type === 'userMessage') const output = msgArray.find((msg) => msg.type === 'apiMessage') @@ -219,7 +219,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async clearChatMessages(overrideSessionId = ''): Promise { if (!this.redisClient) return - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId await this.redisClient.del(id) await this.clear() } diff --git a/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts b/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts index 3d7f6dbf..98a704ab 100644 --- a/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts +++ b/packages/components/nodes/memory/UpstashRedisBackedChatMemory/UpstashRedisBackedChatMemory.ts @@ -114,7 +114,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise { if (!this.redisClient) return [] - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId const rawStoredMessages: StoredMessage[] = await this.redisClient.lrange(id, 0, -1) const orderedMessages = rawStoredMessages.reverse() const previousMessages = orderedMessages.filter((x): x is StoredMessage => x.type !== undefined && x.data.content !== undefined) @@ -125,7 +125,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { if (!this.redisClient) return - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId const input = msgArray.find((msg) => msg.type === 'userMessage') const output = msgArray.find((msg) => msg.type === 'apiMessage') @@ -145,7 +145,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async clearChatMessages(overrideSessionId = ''): Promise { if (!this.redisClient) return - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId await this.redisClient.del(id) await this.clear() } diff --git a/packages/components/nodes/memory/ZepMemory/ZepMemory.ts b/packages/components/nodes/memory/ZepMemory/ZepMemory.ts index 597eee8a..360a76d4 100644 --- a/packages/components/nodes/memory/ZepMemory/ZepMemory.ts +++ b/packages/components/nodes/memory/ZepMemory/ZepMemory.ts @@ -163,14 +163,14 @@ class ZepMemoryExtended extends ZepMemory implements MemoryMethods { } async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise { - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId const memoryVariables = await this.loadMemoryVariables({}, id) const baseMessages = memoryVariables[this.memoryKey] return returnBaseMessages ? baseMessages : convertBaseMessagetoIMessage(baseMessages) } async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId const input = msgArray.find((msg) => msg.type === 'userMessage') const output = msgArray.find((msg) => msg.type === 'apiMessage') const inputValues = { [this.inputKey ?? 'input']: input?.text } @@ -180,7 +180,7 @@ class ZepMemoryExtended extends ZepMemory implements MemoryMethods { } async clearChatMessages(overrideSessionId = ''): Promise { - const id = overrideSessionId ?? this.sessionId + const id = overrideSessionId ? overrideSessionId : this.sessionId await this.clear(id) } } From d7f9c0738151368b990ec24044528c51f4c7a036 Mon Sep 17 00:00:00 2001 From: Jared Tracy Date: Wed, 7 Feb 2024 22:07:12 -0600 Subject: [PATCH 32/45] return tableName, partiionKey, dynamoKey from initalizeDynamoDB --- .../nodes/memory/DynamoDb/DynamoDb.ts | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/packages/components/nodes/memory/DynamoDb/DynamoDb.ts b/packages/components/nodes/memory/DynamoDb/DynamoDb.ts index 864ff7c8..c2085b91 100644 --- a/packages/components/nodes/memory/DynamoDb/DynamoDb.ts +++ b/packages/components/nodes/memory/DynamoDb/DynamoDb.ts @@ -117,7 +117,10 @@ const initalizeDynamoDB = async (nodeData: INodeData, options: ICommonObject): P memoryKey: memoryKey ?? 'chat_history', chatHistory: dynamoDb, sessionId, - dynamodbClient: client + dynamodbClient: client, + tableName, + partitionKey, + dynamoKey: { [partitionKey]: { S: sessionId } } }) return memory } @@ -125,6 +128,9 @@ const initalizeDynamoDB = async (nodeData: INodeData, options: ICommonObject): P interface BufferMemoryExtendedInput { dynamodbClient: DynamoDBClient sessionId: string + tableName: string + partitionKey: string + dynamoKey: Record } interface DynamoDBSerializedChatMessage { @@ -143,10 +149,10 @@ interface DynamoDBSerializedChatMessage { class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { tableName = '' + partitionKey = '' + dynamoKey: Record sessionId = '' dynamodbClient: DynamoDBClient - dynamoKey = '' - partitionKey = '' constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) { super(fields) @@ -154,9 +160,9 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { this.dynamodbClient = fields.dynamodbClient // These fields are coming in on chatHistory, but should they be on the dynamodbClient instead? - this.partitionKey = (fields?.chatHistory as unknown as { partitionKey: string }).partitionKey - this.dynamoKey = (fields?.chatHistory as unknown as { dynamoKey: string }).dynamoKey - this.tableName = (fields?.chatHistory as unknown as { tableName: string }).tableName + this.tableName = fields.tableName + this.partitionKey = fields.partitionKey + this.dynamoKey = fields.dynamoKey } overrideDynamoKey(overrideSessionId = '') { From d0b1980482968b3cc4c165f391a28f81c00cf518 Mon Sep 17 00:00:00 2001 From: Jared Tracy Date: Wed, 7 Feb 2024 22:08:41 -0600 Subject: [PATCH 33/45] Removes prior comment --- packages/components/nodes/memory/DynamoDb/DynamoDb.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/packages/components/nodes/memory/DynamoDb/DynamoDb.ts b/packages/components/nodes/memory/DynamoDb/DynamoDb.ts index c2085b91..02ecc31f 100644 --- a/packages/components/nodes/memory/DynamoDb/DynamoDb.ts +++ b/packages/components/nodes/memory/DynamoDb/DynamoDb.ts @@ -158,8 +158,6 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { super(fields) this.sessionId = fields.sessionId this.dynamodbClient = fields.dynamodbClient - - // These fields are coming in on chatHistory, but should they be on the dynamodbClient instead? this.tableName = fields.tableName this.partitionKey = fields.partitionKey this.dynamoKey = fields.dynamoKey From dd89af8a098e48ef42556593127ad6b4b4e0b560 Mon Sep 17 00:00:00 2001 From: Jared Tracy Date: Thu, 8 Feb 2024 09:04:46 -0600 Subject: [PATCH 34/45] refactor (this as any) usage --- .../nodes/memory/DynamoDb/DynamoDb.ts | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/packages/components/nodes/memory/DynamoDb/DynamoDb.ts b/packages/components/nodes/memory/DynamoDb/DynamoDb.ts index 02ecc31f..22da396e 100644 --- a/packages/components/nodes/memory/DynamoDb/DynamoDb.ts +++ b/packages/components/nodes/memory/DynamoDb/DynamoDb.ts @@ -148,9 +148,10 @@ interface DynamoDBSerializedChatMessage { } class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { - tableName = '' - partitionKey = '' - dynamoKey: Record + private tableName = '' + private partitionKey = '' + private dynamoKey: Record + private messageAttributeName: string sessionId = '' dynamodbClient: DynamoDBClient @@ -221,9 +222,9 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async getChatMessages(overrideSessionId = '', returnBaseMessages = false): Promise { if (!this.dynamodbClient) return [] - const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey + const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : this.dynamoKey const tableName = this.tableName - const messageAttributeName = (this as any).messageAttributeName + const messageAttributeName = this.messageAttributeName const params: GetItemCommandInput = { TableName: tableName, @@ -248,9 +249,9 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise { if (!this.dynamodbClient) return - const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey - const tableName = (this as any).tableName - const messageAttributeName = (this as any).messageAttributeName + const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : this.dynamoKey + const tableName = this.tableName + const messageAttributeName = this.messageAttributeName const input = msgArray.find((msg) => msg.type === 'userMessage') const output = msgArray.find((msg) => msg.type === 'apiMessage') @@ -271,8 +272,8 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { async clearChatMessages(overrideSessionId = ''): Promise { if (!this.dynamodbClient) return - const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey - const tableName = (this as any).tableName + const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : this.dynamoKey + const tableName = this.tableName const params: DeleteItemCommandInput = { TableName: tableName, From e110a49a32a4423e7d0127bd1f31f6995f84cdf0 Mon Sep 17 00:00:00 2001 From: Darien Kindlund Date: Thu, 8 Feb 2024 10:46:30 -0500 Subject: [PATCH 35/45] Migrating all TLS/SSL settings into additionalConfig so that there is consistency in how every function connects with Postgres in the same manner. --- .../nodes/vectorstores/Postgres/Postgres.ts | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/packages/components/nodes/vectorstores/Postgres/Postgres.ts b/packages/components/nodes/vectorstores/Postgres/Postgres.ts index 375728e8..78e9cd75 100644 --- a/packages/components/nodes/vectorstores/Postgres/Postgres.ts +++ b/packages/components/nodes/vectorstores/Postgres/Postgres.ts @@ -60,13 +60,6 @@ class Postgres_VectorStores implements INode { name: 'database', type: 'string' }, - { - label: 'SSL Connection', - name: 'sslConnection', - type: 'boolean', - default: false, - optional: false - }, { label: 'Port', name: 'port', @@ -124,7 +117,6 @@ class Postgres_VectorStores implements INode { const docs = nodeData.inputs?.document as Document[] const embeddings = nodeData.inputs?.embeddings as Embeddings const additionalConfig = nodeData.inputs?.additionalConfig as string - const sslConnection = nodeData.inputs?.sslConnection as boolean let additionalConfiguration = {} if (additionalConfig) { @@ -143,7 +135,6 @@ class Postgres_VectorStores implements INode { username: user, password: password, database: nodeData.inputs?.database as string, - ssl: sslConnection } const args = { @@ -248,15 +239,7 @@ const similaritySearchVectorWithScore = async ( ORDER BY "_distance" ASC LIMIT $3;` - const poolOptions = { - host: postgresConnectionOptions.host, - port: postgresConnectionOptions.port, - user: postgresConnectionOptions.username, - password: postgresConnectionOptions.password, - database: postgresConnectionOptions.database, - ssl: postgresConnectionOptions.extra?.ssl - } - const pool = new Pool(poolOptions) + const pool = new Pool(postgresConnectionOptions) const conn = await pool.connect() const documents = await conn.query(queryString, [embeddingString, _filter, k]) From 9af4eaaa8c5a23e0421213bb9c82e49ff2bb2ee6 Mon Sep 17 00:00:00 2001 From: Darien Kindlund Date: Thu, 8 Feb 2024 10:53:33 -0500 Subject: [PATCH 36/45] Fixing linting issues. --- packages/components/nodes/vectorstores/Postgres/Postgres.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/components/nodes/vectorstores/Postgres/Postgres.ts b/packages/components/nodes/vectorstores/Postgres/Postgres.ts index 78e9cd75..5c5ef813 100644 --- a/packages/components/nodes/vectorstores/Postgres/Postgres.ts +++ b/packages/components/nodes/vectorstores/Postgres/Postgres.ts @@ -134,7 +134,7 @@ class Postgres_VectorStores implements INode { port: nodeData.inputs?.port as number, username: user, password: password, - database: nodeData.inputs?.database as string, + database: nodeData.inputs?.database as string } const args = { From 4a75396325d4bcba3aa589a5fc41e3912cbd3ee4 Mon Sep 17 00:00:00 2001 From: Darien Kindlund Date: Thu, 8 Feb 2024 11:26:45 -0500 Subject: [PATCH 37/45] So apparently TypeORMVectorStore requires the field 'username' while Pool expects it to be 'user'. Need to populate both to avoid error messages. --- packages/components/nodes/vectorstores/Postgres/Postgres.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/components/nodes/vectorstores/Postgres/Postgres.ts b/packages/components/nodes/vectorstores/Postgres/Postgres.ts index 5c5ef813..b3cc7918 100644 --- a/packages/components/nodes/vectorstores/Postgres/Postgres.ts +++ b/packages/components/nodes/vectorstores/Postgres/Postgres.ts @@ -189,7 +189,8 @@ class Postgres_VectorStores implements INode { type: 'postgres', host: nodeData.inputs?.host as string, port: nodeData.inputs?.port as number, - username: user, + username: user, // Required by TypeORMVectorStore + user: user, // Required by Pool in similaritySearchVectorWithScore password: password, database: nodeData.inputs?.database as string } From 907d5c7ef7a0124d6d4a3a44d33db521fcd5b49a Mon Sep 17 00:00:00 2001 From: Darien Kindlund Date: Thu, 8 Feb 2024 12:06:50 -0500 Subject: [PATCH 38/45] Bumping version since this will change might break legacy Postgres chatflows that used TLS/SSL. This will help users remind them to upgrade their corresponding nodes. --- packages/components/nodes/vectorstores/Postgres/Postgres.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/components/nodes/vectorstores/Postgres/Postgres.ts b/packages/components/nodes/vectorstores/Postgres/Postgres.ts index b3cc7918..be7784cc 100644 --- a/packages/components/nodes/vectorstores/Postgres/Postgres.ts +++ b/packages/components/nodes/vectorstores/Postgres/Postgres.ts @@ -24,7 +24,7 @@ class Postgres_VectorStores implements INode { constructor() { this.label = 'Postgres' this.name = 'postgres' - this.version = 2.0 + this.version = 3.0 this.type = 'Postgres' this.icon = 'postgres.svg' this.category = 'Vector Stores' From cf79176ca63fe47b570368a2caaadf8bf9ed4f50 Mon Sep 17 00:00:00 2001 From: chungyau97 Date: Fri, 9 Feb 2024 12:00:55 +0800 Subject: [PATCH 39/45] add missing steps into rate limit msg --- packages/server/src/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 99bae048..48f93249 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -184,7 +184,7 @@ export class App { this.app.get('/api/v1/ip', (request, response) => { response.send({ ip: request.ip, - msg: 'Check the returned IP address in the response. If it matches your current IP address ( which you can get by going to http://ip.nfriedly.com/ or https://api.ipify.org/ ), then the number of proxies is correct and the rate limiter should now work correctly. If not, increase the number of proxies by 1 until the IP address matches your own. Visit https://docs.flowiseai.com/configuration/rate-limit#rate-limit-setup-guide for more information.' + msg: 'Check returned IP address in the response. If it matches your current IP address ( which you can get by going to http://ip.nfriedly.com/ or https://api.ipify.org/ ), then the number of proxies is correct and the rate limiter should now work correctly. If not, increase the number of proxies by 1 and restart Cloud-Hosted Flowise until the IP address matches your own. Visit https://docs.flowiseai.com/configuration/rate-limit#cloud-hosted-rate-limit-setup-guide for more information.' }) }) From caf54bf31b2025d02283d357c1750a20c1ab8d49 Mon Sep 17 00:00:00 2001 From: Henry Date: Fri, 9 Feb 2024 16:07:34 +0800 Subject: [PATCH 40/45] add document json output --- .../nodes/documentloaders/PlainText/PlainText.ts | 4 +++- .../nodes/documentloaders/Text/Text.ts | 4 +++- .../VectorStoreToDocument.ts | 2 ++ .../CohereRerankRetriever.ts | 4 +++- .../EmbeddingsFilterRetriever.ts | 4 +++- .../retrievers/HydeRetriever/HydeRetriever.ts | 4 +++- .../LLMFilterCompressionRetriever.ts | 4 +++- .../retrievers/RRFRetriever/RRFRetriever.ts | 4 +++- .../SimilarityThresholdRetriever.ts | 4 +++- .../marketplaces/chatflows/Claude LLM.json | 4 ++-- .../chatflows/Context Chat Engine.json | 8 ++++---- .../Conversational Retrieval QA Chain.json | 8 ++++---- .../server/marketplaces/chatflows/Local QnA.json | 8 ++++---- .../marketplaces/chatflows/Metadata Filter.json | 8 ++++---- .../chatflows/Multiple VectorDB.json | 16 ++++++++-------- packages/ui/src/utils/genericHelper.js | 3 +++ 16 files changed, 55 insertions(+), 34 deletions(-) diff --git a/packages/components/nodes/documentloaders/PlainText/PlainText.ts b/packages/components/nodes/documentloaders/PlainText/PlainText.ts index c2adceeb..c0c697a3 100644 --- a/packages/components/nodes/documentloaders/PlainText/PlainText.ts +++ b/packages/components/nodes/documentloaders/PlainText/PlainText.ts @@ -51,11 +51,13 @@ class PlainText_DocumentLoaders implements INode { { label: 'Document', name: 'document', - baseClasses: this.baseClasses + description: 'Array of document objects containing metadata and pageContent', + baseClasses: [...this.baseClasses, 'json'] }, { label: 'Text', name: 'text', + description: 'Concatenated string from pageContent of documents', baseClasses: ['string', 'json'] } ] diff --git a/packages/components/nodes/documentloaders/Text/Text.ts b/packages/components/nodes/documentloaders/Text/Text.ts index e41c5a9f..1eea709e 100644 --- a/packages/components/nodes/documentloaders/Text/Text.ts +++ b/packages/components/nodes/documentloaders/Text/Text.ts @@ -51,11 +51,13 @@ class Text_DocumentLoaders implements INode { { label: 'Document', name: 'document', - baseClasses: this.baseClasses + description: 'Array of document objects containing metadata and pageContent', + baseClasses: [...this.baseClasses, 'json'] }, { label: 'Text', name: 'text', + description: 'Concatenated string from pageContent of documents', baseClasses: ['string', 'json'] } ] diff --git a/packages/components/nodes/documentloaders/VectorStoreToDocument/VectorStoreToDocument.ts b/packages/components/nodes/documentloaders/VectorStoreToDocument/VectorStoreToDocument.ts index c087e000..27ef36f5 100644 --- a/packages/components/nodes/documentloaders/VectorStoreToDocument/VectorStoreToDocument.ts +++ b/packages/components/nodes/documentloaders/VectorStoreToDocument/VectorStoreToDocument.ts @@ -51,11 +51,13 @@ class VectorStoreToDocument_DocumentLoaders implements INode { { label: 'Document', name: 'document', + description: 'Array of document objects containing metadata and pageContent', baseClasses: [...this.baseClasses, 'json'] }, { label: 'Text', name: 'text', + description: 'Concatenated string from pageContent of documents', baseClasses: ['string', 'json'] } ] diff --git a/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerankRetriever.ts b/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerankRetriever.ts index 442fdc7a..5e92505e 100644 --- a/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerankRetriever.ts +++ b/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerankRetriever.ts @@ -94,11 +94,13 @@ class CohereRerankRetriever_Retrievers implements INode { { label: 'Document', name: 'document', - baseClasses: ['Document'] + description: 'Array of document objects containing metadata and pageContent', + baseClasses: ['Document', 'json'] }, { label: 'Text', name: 'text', + description: 'Concatenated string from pageContent of documents', baseClasses: ['string', 'json'] } ] diff --git a/packages/components/nodes/retrievers/EmbeddingsFilterRetriever/EmbeddingsFilterRetriever.ts b/packages/components/nodes/retrievers/EmbeddingsFilterRetriever/EmbeddingsFilterRetriever.ts index d1049fa4..16d40790 100644 --- a/packages/components/nodes/retrievers/EmbeddingsFilterRetriever/EmbeddingsFilterRetriever.ts +++ b/packages/components/nodes/retrievers/EmbeddingsFilterRetriever/EmbeddingsFilterRetriever.ts @@ -78,11 +78,13 @@ class EmbeddingsFilterRetriever_Retrievers implements INode { { label: 'Document', name: 'document', - baseClasses: ['Document'] + description: 'Array of document objects containing metadata and pageContent', + baseClasses: ['Document', 'json'] }, { label: 'Text', name: 'text', + description: 'Concatenated string from pageContent of documents', baseClasses: ['string', 'json'] } ] diff --git a/packages/components/nodes/retrievers/HydeRetriever/HydeRetriever.ts b/packages/components/nodes/retrievers/HydeRetriever/HydeRetriever.ts index 10fff764..a7cd9829 100644 --- a/packages/components/nodes/retrievers/HydeRetriever/HydeRetriever.ts +++ b/packages/components/nodes/retrievers/HydeRetriever/HydeRetriever.ts @@ -140,11 +140,13 @@ Passage:` { label: 'Document', name: 'document', - baseClasses: ['Document'] + description: 'Array of document objects containing metadata and pageContent', + baseClasses: ['Document', 'json'] }, { label: 'Text', name: 'text', + description: 'Concatenated string from pageContent of documents', baseClasses: ['string', 'json'] } ] diff --git a/packages/components/nodes/retrievers/LLMFilterRetriever/LLMFilterCompressionRetriever.ts b/packages/components/nodes/retrievers/LLMFilterRetriever/LLMFilterCompressionRetriever.ts index 6b710cf3..9bace712 100644 --- a/packages/components/nodes/retrievers/LLMFilterRetriever/LLMFilterCompressionRetriever.ts +++ b/packages/components/nodes/retrievers/LLMFilterRetriever/LLMFilterCompressionRetriever.ts @@ -58,11 +58,13 @@ class LLMFilterCompressionRetriever_Retrievers implements INode { { label: 'Document', name: 'document', - baseClasses: ['Document'] + description: 'Array of document objects containing metadata and pageContent', + baseClasses: ['Document', 'json'] }, { label: 'Text', name: 'text', + description: 'Concatenated string from pageContent of documents', baseClasses: ['string', 'json'] } ] diff --git a/packages/components/nodes/retrievers/RRFRetriever/RRFRetriever.ts b/packages/components/nodes/retrievers/RRFRetriever/RRFRetriever.ts index ed15ed24..9788f095 100644 --- a/packages/components/nodes/retrievers/RRFRetriever/RRFRetriever.ts +++ b/packages/components/nodes/retrievers/RRFRetriever/RRFRetriever.ts @@ -89,11 +89,13 @@ class RRFRetriever_Retrievers implements INode { { label: 'Document', name: 'document', - baseClasses: ['Document'] + description: 'Array of document objects containing metadata and pageContent', + baseClasses: ['Document', 'json'] }, { label: 'Text', name: 'text', + description: 'Concatenated string from pageContent of documents', baseClasses: ['string', 'json'] } ] diff --git a/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts b/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts index 5f5a9ed0..6a6976a5 100644 --- a/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts +++ b/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts @@ -74,11 +74,13 @@ class SimilarityThresholdRetriever_Retrievers implements INode { { label: 'Document', name: 'document', - baseClasses: ['Document'] + description: 'Array of document objects containing metadata and pageContent', + baseClasses: ['Document', 'json'] }, { label: 'Text', name: 'text', + description: 'Concatenated string from pageContent of documents', baseClasses: ['string', 'json'] } ] diff --git a/packages/server/marketplaces/chatflows/Claude LLM.json b/packages/server/marketplaces/chatflows/Claude LLM.json index a0fd4e1a..48be286d 100644 --- a/packages/server/marketplaces/chatflows/Claude LLM.json +++ b/packages/server/marketplaces/chatflows/Claude LLM.json @@ -441,10 +441,10 @@ "type": "options", "options": [ { - "id": "plainText_0-output-document-Document", + "id": "plainText_0-output-document-Document|json", "name": "document", "label": "Document", - "type": "Document" + "type": "Document | json" }, { "id": "plainText_0-output-text-string|json", diff --git a/packages/server/marketplaces/chatflows/Context Chat Engine.json b/packages/server/marketplaces/chatflows/Context Chat Engine.json index 15d3dade..3f1152f2 100644 --- a/packages/server/marketplaces/chatflows/Context Chat Engine.json +++ b/packages/server/marketplaces/chatflows/Context Chat Engine.json @@ -59,10 +59,10 @@ "type": "options", "options": [ { - "id": "textFile_0-output-document-Document", + "id": "textFile_0-output-document-Document|json", "name": "document", "label": "Document", - "type": "Document" + "type": "Document | json" }, { "id": "textFile_0-output-text-string|json", @@ -851,11 +851,11 @@ }, { "source": "textFile_0", - "sourceHandle": "textFile_0-output-document-Document", + "sourceHandle": "textFile_0-output-document-Document|json", "target": "pineconeLlamaIndex_0", "targetHandle": "pineconeLlamaIndex_0-input-document-Document", "type": "buttonedge", - "id": "textFile_0-textFile_0-output-document-Document-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-document-Document", + "id": "textFile_0-textFile_0-output-document-Document|json-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-document-Document", "data": { "label": "" } diff --git a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json index e360141d..f76e89e6 100644 --- a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json +++ b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json @@ -235,10 +235,10 @@ "type": "options", "options": [ { - "id": "textFile_0-output-document-Document", + "id": "textFile_0-output-document-Document|json", "name": "document", "label": "Document", - "type": "Document" + "type": "Document | json" }, { "id": "textFile_0-output-text-string|json", @@ -732,11 +732,11 @@ }, { "source": "textFile_0", - "sourceHandle": "textFile_0-output-document-Document", + "sourceHandle": "textFile_0-output-document-Document|json", "target": "pinecone_0", "targetHandle": "pinecone_0-input-document-Document", "type": "buttonedge", - "id": "textFile_0-textFile_0-output-document-Document-pinecone_0-pinecone_0-input-document-Document", + "id": "textFile_0-textFile_0-output-document-Document|json-pinecone_0-pinecone_0-input-document-Document", "data": { "label": "" } diff --git a/packages/server/marketplaces/chatflows/Local QnA.json b/packages/server/marketplaces/chatflows/Local QnA.json index 2637f259..3e8b93f6 100644 --- a/packages/server/marketplaces/chatflows/Local QnA.json +++ b/packages/server/marketplaces/chatflows/Local QnA.json @@ -226,10 +226,10 @@ "type": "options", "options": [ { - "id": "textFile_0-output-document-Document", + "id": "textFile_0-output-document-Document|json", "name": "document", "label": "Document", - "type": "Document" + "type": "Document | json" }, { "id": "textFile_0-output-text-string|json", @@ -651,11 +651,11 @@ }, { "source": "textFile_0", - "sourceHandle": "textFile_0-output-document-Document", + "sourceHandle": "textFile_0-output-document-Document|json", "target": "faiss_0", "targetHandle": "faiss_0-input-document-Document", "type": "buttonedge", - "id": "textFile_0-textFile_0-output-document-Document-faiss_0-faiss_0-input-document-Document", + "id": "textFile_0-textFile_0-output-document-Document|json-faiss_0-faiss_0-input-document-Document", "data": { "label": "" } diff --git a/packages/server/marketplaces/chatflows/Metadata Filter.json b/packages/server/marketplaces/chatflows/Metadata Filter.json index 38ad9211..ed2efb9f 100644 --- a/packages/server/marketplaces/chatflows/Metadata Filter.json +++ b/packages/server/marketplaces/chatflows/Metadata Filter.json @@ -128,10 +128,10 @@ "type": "options", "options": [ { - "id": "textFile_0-output-document-Document", + "id": "textFile_0-output-document-Document|json", "name": "document", "label": "Document", - "type": "Document" + "type": "Document | json" }, { "id": "textFile_0-output-text-string|json", @@ -838,11 +838,11 @@ }, { "source": "textFile_0", - "sourceHandle": "textFile_0-output-document-Document", + "sourceHandle": "textFile_0-output-document-Document|json", "target": "pinecone_0", "targetHandle": "pinecone_0-input-document-Document", "type": "buttonedge", - "id": "textFile_0-textFile_0-output-document-Document-pinecone_0-pinecone_0-input-document-Document", + "id": "textFile_0-textFile_0-output-document-Document|json-pinecone_0-pinecone_0-input-document-Document", "data": { "label": "" } diff --git a/packages/server/marketplaces/chatflows/Multiple VectorDB.json b/packages/server/marketplaces/chatflows/Multiple VectorDB.json index e8141ad7..b76270e7 100644 --- a/packages/server/marketplaces/chatflows/Multiple VectorDB.json +++ b/packages/server/marketplaces/chatflows/Multiple VectorDB.json @@ -966,10 +966,10 @@ "type": "options", "options": [ { - "id": "plainText_0-output-document-Document", + "id": "plainText_0-output-document-Document|json", "name": "document", "label": "Document", - "type": "Document" + "type": "Document | json" }, { "id": "plainText_0-output-text-string|json", @@ -1503,10 +1503,10 @@ "type": "options", "options": [ { - "id": "plainText_1-output-document-Document", + "id": "plainText_1-output-document-Document|json", "name": "document", "label": "Document", - "type": "Document" + "type": "Document | json" }, { "id": "plainText_1-output-text-string|json", @@ -1723,11 +1723,11 @@ }, { "source": "plainText_0", - "sourceHandle": "plainText_0-output-document-Document", + "sourceHandle": "plainText_0-output-document-Document|json", "target": "redis_0", "targetHandle": "redis_0-input-document-Document", "type": "buttonedge", - "id": "plainText_0-plainText_0-output-document-Document-redis_0-redis_0-input-document-Document", + "id": "plainText_0-plainText_0-output-document-Document|json-redis_0-redis_0-input-document-Document", "data": { "label": "" } @@ -1778,11 +1778,11 @@ }, { "source": "plainText_1", - "sourceHandle": "plainText_1-output-document-Document", + "sourceHandle": "plainText_1-output-document-Document|json", "target": "faiss_0", "targetHandle": "faiss_0-input-document-Document", "type": "buttonedge", - "id": "plainText_1-plainText_1-output-document-Document-faiss_0-faiss_0-input-document-Document", + "id": "plainText_1-plainText_1-output-document-Document|json-faiss_0-faiss_0-input-document-Document", "data": { "label": "" } diff --git a/packages/ui/src/utils/genericHelper.js b/packages/ui/src/utils/genericHelper.js index eadbdb88..74dc9578 100644 --- a/packages/ui/src/utils/genericHelper.js +++ b/packages/ui/src/utils/genericHelper.js @@ -99,6 +99,7 @@ export const initNode = (nodeData, newNodeId) => { id: `${newNodeId}-output-${nodeData.outputs[j].name}-${baseClasses}`, name: nodeData.outputs[j].name, label: nodeData.outputs[j].label, + description: nodeData.outputs[j].description ?? '', type } options.push(newOutputOption) @@ -107,6 +108,7 @@ export const initNode = (nodeData, newNodeId) => { name: 'output', label: 'Output', type: 'options', + description: nodeData.outputs[0].description ?? '', options, default: nodeData.outputs[0].name } @@ -116,6 +118,7 @@ export const initNode = (nodeData, newNodeId) => { id: `${newNodeId}-output-${nodeData.name}-${nodeData.baseClasses.join('|')}`, name: nodeData.name, label: nodeData.type, + description: nodeData.description ?? '', type: nodeData.baseClasses.join(' | ') } outputAnchors.push(newOutput) From e3c899230c036bbaa817952c07e8960e8fbdf5e6 Mon Sep 17 00:00:00 2001 From: chungyau97 Date: Sat, 10 Feb 2024 12:02:21 +0800 Subject: [PATCH 41/45] add FLOWISE_FILE_SIZE_LIMIT variable --- packages/server/.env.example | 1 + packages/server/src/index.ts | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/packages/server/.env.example b/packages/server/.env.example index ebc59cf3..a7a93345 100644 --- a/packages/server/.env.example +++ b/packages/server/.env.example @@ -20,6 +20,7 @@ PORT=3000 # FLOWISE_USERNAME=user # FLOWISE_PASSWORD=1234 # FLOWISE_SECRETKEY_OVERWRITE=myencryptionkey +# FLOWISE_FILE_SIZE_LIMIT=50mb # DEBUG=true # LOG_LEVEL=debug (error | warn | info | verbose | debug) # TOOL_FUNCTION_BUILTIN_DEP=crypto,fs diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 48f93249..973ce1ea 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -120,8 +120,9 @@ export class App { async config(socketIO?: Server) { // Limit is needed to allow sending/receiving base64 encoded string - this.app.use(express.json({ limit: '50mb' })) - this.app.use(express.urlencoded({ limit: '50mb', extended: true })) + const flowise_file_size_limit = process.env.FLOWISE_FILE_SIZE_LIMIT ?? '50mb' + this.app.use(express.json({ limit: flowise_file_size_limit })) + this.app.use(express.urlencoded({ limit: flowise_file_size_limit, extended: true })) if (process.env.NUMBER_OF_PROXIES && parseInt(process.env.NUMBER_OF_PROXIES) > 0) this.app.set('trust proxy', parseInt(process.env.NUMBER_OF_PROXIES)) From 702b8c1aab8474e09373874657efb3c061b71ada Mon Sep 17 00:00:00 2001 From: chungyau97 Date: Sat, 10 Feb 2024 12:12:32 +0800 Subject: [PATCH 42/45] add FLOWISE_FILE_SIZE_LIMIT into docker env --- docker/.env.example | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/.env.example b/docker/.env.example index a4beaf8a..84019299 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -21,6 +21,7 @@ LOG_PATH=/root/.flowise/logs # FLOWISE_USERNAME=user # FLOWISE_PASSWORD=1234 # FLOWISE_SECRETKEY_OVERWRITE=myencryptionkey +# FLOWISE_FILE_SIZE_LIMIT=50mb # DEBUG=true # LOG_LEVEL=debug (error | warn | info | verbose | debug) # TOOL_FUNCTION_BUILTIN_DEP=crypto,fs From a132f51727559c82fa157bcb37047cb727599b92 Mon Sep 17 00:00:00 2001 From: chungyau97 Date: Sun, 11 Feb 2024 17:37:48 +0800 Subject: [PATCH 43/45] add more FLOWISE_FILE_SIZE_LIMIT --- CONTRIBUTING-ZH.md | 1 + CONTRIBUTING.md | 1 + docker/docker-compose.yml | 1 + packages/server/src/commands/start.ts | 4 ++++ 4 files changed, 7 insertions(+) diff --git a/CONTRIBUTING-ZH.md b/CONTRIBUTING-ZH.md index 7e35d194..e000da4f 100644 --- a/CONTRIBUTING-ZH.md +++ b/CONTRIBUTING-ZH.md @@ -123,6 +123,7 @@ Flowise 支持不同的环境变量来配置您的实例。您可以在 `package | PORT | Flowise 运行的 HTTP 端口 | 数字 | 3000 | | FLOWISE_USERNAME | 登录用户名 | 字符串 | | | FLOWISE_PASSWORD | 登录密码 | 字符串 | | +| FLOWISE_FILE_SIZE_LIMIT | 上传文件大小限制 | 字符串 | 50mb | | DEBUG | 打印组件的日志 | 布尔值 | | | LOG_PATH | 存储日志文件的位置 | 字符串 | `your-path/Flowise/logs` | | LOG_LEVEL | 日志的不同级别 | 枚举字符串: `error`, `info`, `verbose`, `debug` | `info` | diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 25a27e84..fdeb848b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -127,6 +127,7 @@ Flowise support different environment variables to configure your instance. You | IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | | | FLOWISE_USERNAME | Username to login | String | | | FLOWISE_PASSWORD | Password to login | String | | +| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb | | DEBUG | Print logs from components | Boolean | | | LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` | | LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` | diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 71bcfcfb..4da945fe 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -10,6 +10,7 @@ services: - IFRAME_ORIGINS=${IFRAME_ORIGINS} - FLOWISE_USERNAME=${FLOWISE_USERNAME} - FLOWISE_PASSWORD=${FLOWISE_PASSWORD} + - FLOWISE_FILE_SIZE_LIMIT=${FLOWISE_FILE_SIZE_LIMIT} - DEBUG=${DEBUG} - DATABASE_PATH=${DATABASE_PATH} - DATABASE_TYPE=${DATABASE_TYPE} diff --git a/packages/server/src/commands/start.ts b/packages/server/src/commands/start.ts index dfb20766..a649dba6 100644 --- a/packages/server/src/commands/start.ts +++ b/packages/server/src/commands/start.ts @@ -18,6 +18,7 @@ export default class Start extends Command { static flags = { FLOWISE_USERNAME: Flags.string(), FLOWISE_PASSWORD: Flags.string(), + FLOWISE_FILE_SIZE_LIMIT: Flags.string(), PORT: Flags.string(), CORS_ORIGINS: Flags.string(), IFRAME_ORIGINS: Flags.string(), @@ -91,6 +92,9 @@ export default class Start extends Command { if (flags.FLOWISE_PASSWORD) process.env.FLOWISE_PASSWORD = flags.FLOWISE_PASSWORD if (flags.APIKEY_PATH) process.env.APIKEY_PATH = flags.APIKEY_PATH + //API Configuration + if (flags.FLOWISE_FILE_SIZE_LIMIT) process.env.FLOWISE_FILE_SIZE_LIMIT = flags.FLOWISE_FILE_SIZE_LIMIT + // Credentials if (flags.SECRETKEY_PATH) process.env.SECRETKEY_PATH = flags.SECRETKEY_PATH if (flags.FLOWISE_SECRETKEY_OVERWRITE) process.env.FLOWISE_SECRETKEY_OVERWRITE = flags.FLOWISE_SECRETKEY_OVERWRITE From 5471a4c9aa48068a7aecf6433dc7e7da6bd8d973 Mon Sep 17 00:00:00 2001 From: Ilango Date: Mon, 12 Feb 2024 12:01:19 +0530 Subject: [PATCH 44/45] Show error when relative links method is not set and allow 0 as limit value --- .../nodes/documentloaders/Cheerio/Cheerio.ts | 4 +- .../documentloaders/Playwright/Playwright.ts | 4 +- .../documentloaders/Puppeteer/Puppeteer.ts | 4 +- packages/server/src/index.ts | 4 ++ .../dialog/ManageScrapedLinksDialog.js | 50 +++++++++++++++++-- 5 files changed, 58 insertions(+), 8 deletions(-) diff --git a/packages/components/nodes/documentloaders/Cheerio/Cheerio.ts b/packages/components/nodes/documentloaders/Cheerio/Cheerio.ts index 6af1f9a9..48ae85bc 100644 --- a/packages/components/nodes/documentloaders/Cheerio/Cheerio.ts +++ b/packages/components/nodes/documentloaders/Cheerio/Cheerio.ts @@ -126,7 +126,9 @@ class Cheerio_DocumentLoaders implements INode { let docs = [] if (relativeLinksMethod) { if (process.env.DEBUG === 'true') options.logger.info(`Start ${relativeLinksMethod}`) - if (!limit) limit = 10 + // if limit is 0 we don't want it to default to 10 so we check explicitly for null or undefined + // so when limit is 0 we can fetch all the links + if (limit === null || limit === undefined) limit = 10 else if (limit < 0) throw new Error('Limit cannot be less than 0') const pages: string[] = selectedLinks && selectedLinks.length > 0 diff --git a/packages/components/nodes/documentloaders/Playwright/Playwright.ts b/packages/components/nodes/documentloaders/Playwright/Playwright.ts index 2ba60d0f..55fa9608 100644 --- a/packages/components/nodes/documentloaders/Playwright/Playwright.ts +++ b/packages/components/nodes/documentloaders/Playwright/Playwright.ts @@ -167,7 +167,9 @@ class Playwright_DocumentLoaders implements INode { let docs = [] if (relativeLinksMethod) { if (process.env.DEBUG === 'true') options.logger.info(`Start ${relativeLinksMethod}`) - if (!limit) limit = 10 + // if limit is 0 we don't want it to default to 10 so we check explicitly for null or undefined + // so when limit is 0 we can fetch all the links + if (limit === null || limit === undefined) limit = 10 else if (limit < 0) throw new Error('Limit cannot be less than 0') const pages: string[] = selectedLinks && selectedLinks.length > 0 diff --git a/packages/components/nodes/documentloaders/Puppeteer/Puppeteer.ts b/packages/components/nodes/documentloaders/Puppeteer/Puppeteer.ts index 1f8c8f3f..90b5a277 100644 --- a/packages/components/nodes/documentloaders/Puppeteer/Puppeteer.ts +++ b/packages/components/nodes/documentloaders/Puppeteer/Puppeteer.ts @@ -168,7 +168,9 @@ class Puppeteer_DocumentLoaders implements INode { let docs = [] if (relativeLinksMethod) { if (process.env.DEBUG === 'true') options.logger.info(`Start ${relativeLinksMethod}`) - if (!limit) limit = 10 + // if limit is 0 we don't want it to default to 10 so we check explicitly for null or undefined + // so when limit is 0 we can fetch all the links + if (limit === null || limit === undefined) limit = 10 else if (limit < 0) throw new Error('Limit cannot be less than 0') const pages: string[] = selectedLinks && selectedLinks.length > 0 diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 7ceba556..b994ba62 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -1148,6 +1148,10 @@ export class App { this.app.get('/api/v1/fetch-links', async (req: Request, res: Response) => { const url = decodeURIComponent(req.query.url as string) const relativeLinksMethod = req.query.relativeLinksMethod as string + if (!relativeLinksMethod) { + return res.status(500).send('Please choose a Relative Links Method in Additional Parameters.') + } + const limit = parseInt(req.query.limit as string) if (process.env.DEBUG === 'true') console.info(`Start ${relativeLinksMethod}`) const links: string[] = relativeLinksMethod === 'webCrawl' ? await webCrawl(url, limit) : await xmlScrape(url, limit) diff --git a/packages/ui/src/ui-component/dialog/ManageScrapedLinksDialog.js b/packages/ui/src/ui-component/dialog/ManageScrapedLinksDialog.js index 9a846ce9..a4199504 100644 --- a/packages/ui/src/ui-component/dialog/ManageScrapedLinksDialog.js +++ b/packages/ui/src/ui-component/dialog/ManageScrapedLinksDialog.js @@ -16,7 +16,7 @@ import { Stack, Typography } from '@mui/material' -import { IconTrash } from '@tabler/icons' +import { IconTrash, IconX } from '@tabler/icons' import PerfectScrollbar from 'react-perfect-scrollbar' import { BackdropLoader } from 'ui-component/loading/BackdropLoader' @@ -24,12 +24,23 @@ import { StyledButton } from 'ui-component/button/StyledButton' import scraperApi from 'api/scraper' -import { HIDE_CANVAS_DIALOG, SHOW_CANVAS_DIALOG } from 'store/actions' +import useNotifier from 'utils/useNotifier' + +import { + HIDE_CANVAS_DIALOG, + SHOW_CANVAS_DIALOG, + enqueueSnackbar as enqueueSnackbarAction, + closeSnackbar as closeSnackbarAction +} from 'store/actions' const ManageScrapedLinksDialog = ({ show, dialogProps, onCancel, onSave }) => { const portalElement = document.getElementById('portal') const dispatch = useDispatch() + useNotifier() + const enqueueSnackbar = (...args) => dispatch(enqueueSnackbarAction(...args)) + const closeSnackbar = (...args) => dispatch(closeSnackbarAction(...args)) + const [loading, setLoading] = useState(false) const [selectedLinks, setSelectedLinks] = useState([]) const [url, setUrl] = useState('') @@ -53,9 +64,38 @@ const ManageScrapedLinksDialog = ({ show, dialogProps, onCancel, onSave }) => { const handleFetchLinks = async () => { setLoading(true) - const fetchLinksResp = await scraperApi.fetchLinks(url, dialogProps.relativeLinksMethod, dialogProps.limit) - if (fetchLinksResp.data) { - setSelectedLinks(fetchLinksResp.data.links) + try { + const fetchLinksResp = await scraperApi.fetchLinks(url, dialogProps.relativeLinksMethod, dialogProps.limit) + if (fetchLinksResp.data) { + setSelectedLinks(fetchLinksResp.data.links) + enqueueSnackbar({ + message: 'Successfully fetched links', + options: { + key: new Date().getTime() + Math.random(), + variant: 'success', + action: (key) => ( + + ) + } + }) + } + } catch (error) { + const errorData = error.response.data || `${error.response.status}: ${error.response.statusText}` + enqueueSnackbar({ + message: errorData, + options: { + key: new Date().getTime() + Math.random(), + variant: 'error', + persist: true, + action: (key) => ( + + ) + } + }) } setLoading(false) } From a6abd593a62c743f1c2949e9b012b7017aff6e37 Mon Sep 17 00:00:00 2001 From: Jared Tracy Date: Mon, 12 Feb 2024 20:53:40 -0600 Subject: [PATCH 45/45] Allows query chatmessage API endpoint by messageId This will be useful when the exact message is required by another system. --- packages/server/src/index.ts | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 07797f32..284c88d6 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -511,6 +511,7 @@ export class App { const chatId = req.query?.chatId as string | undefined const memoryType = req.query?.memoryType as string | undefined const sessionId = req.query?.sessionId as string | undefined + const messageId = req.query?.messageId as string | undefined const startDate = req.query?.startDate as string | undefined const endDate = req.query?.endDate as string | undefined let chatTypeFilter = req.query?.chatType as chatType | undefined @@ -538,7 +539,8 @@ export class App { memoryType, sessionId, startDate, - endDate + endDate, + messageId ) return res.json(chatmessages) }) @@ -1440,7 +1442,8 @@ export class App { memoryType?: string, sessionId?: string, startDate?: string, - endDate?: string + endDate?: string, + messageId?: string ): Promise { let fromDate if (startDate) fromDate = new Date(startDate) @@ -1455,7 +1458,8 @@ export class App { chatId, memoryType: memoryType ?? (chatId ? IsNull() : undefined), sessionId: sessionId ?? undefined, - createdDate: toDate && fromDate ? Between(fromDate, toDate) : undefined + createdDate: toDate && fromDate ? Between(fromDate, toDate) : undefined, + id: messageId ?? undefined }, order: { createdDate: sortOrder === 'DESC' ? 'DESC' : 'ASC'