add llamaindex

pull/1611/head
Henry 2023-12-04 20:04:09 +00:00
parent 423c23aaf0
commit 40f8371de9
56 changed files with 4509 additions and 536 deletions

View File

@ -2,7 +2,7 @@ import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Inter
import { initializeAgentExecutorWithOptions, AgentExecutor, InitializeAgentExecutorOptions } from 'langchain/agents'
import { Tool } from 'langchain/tools'
import { BaseChatMemory } from 'langchain/memory'
import { getBaseClasses, mapChatHistory } from '../../../src/utils'
import { getBaseClasses } from '../../../src/utils'
import { BaseChatModel } from 'langchain/chat_models/base'
import { flatten } from 'lodash'
import { additionalCallbacks } from '../../../src/handler'
@ -90,18 +90,17 @@ class ConversationalAgent_Agents implements INode {
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const executor = nodeData.instance as AgentExecutor
const memory = nodeData.inputs?.memory as BaseChatMemory
const memory = nodeData.inputs?.memory
memory.returnMessages = true // Return true for BaseChatModel
if (options && options.chatHistory) {
const chatHistoryClassName = memory.chatHistory.constructor.name
// Only replace when its In-Memory
if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
memory.chatHistory = mapChatHistory(options)
executor.memory = memory
}
/* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory
* LongTermMemory will automatically retrieved chatHistory from sessionId
*/
if (options && options.chatHistory && memory.isShortTermMemory) {
await memory.resumeMessages(options.chatHistory)
}
;(executor.memory as any).returnMessages = true // Return true for BaseChatModel
executor.memory = memory
const callbacks = await additionalCallbacks(nodeData, options)

View File

@ -1,6 +1,6 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { initializeAgentExecutorWithOptions, AgentExecutor } from 'langchain/agents'
import { getBaseClasses, mapChatHistory } from '../../../src/utils'
import { getBaseClasses } from '../../../src/utils'
import { flatten } from 'lodash'
import { BaseChatMemory } from 'langchain/memory'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
@ -58,8 +58,8 @@ class ConversationalRetrievalAgent_Agents implements INode {
async init(nodeData: INodeData): Promise<any> {
const model = nodeData.inputs?.model
const memory = nodeData.inputs?.memory as BaseChatMemory
const systemMessage = nodeData.inputs?.systemMessage as string
const memory = nodeData.inputs?.memory as BaseChatMemory
let tools = nodeData.inputs?.tools
tools = flatten(tools)
@ -78,19 +78,21 @@ class ConversationalRetrievalAgent_Agents implements INode {
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const executor = nodeData.instance as AgentExecutor
const memory = nodeData.inputs?.memory
if (executor.memory) {
;(executor.memory as any).memoryKey = 'chat_history'
;(executor.memory as any).outputKey = 'output'
;(executor.memory as any).returnMessages = true
memory.memoryKey = 'chat_history'
memory.outputKey = 'output'
memory.returnMessages = true
const chatHistoryClassName = (executor.memory as any).chatHistory.constructor.name
// Only replace when its In-Memory
if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
;(executor.memory as any).chatHistory = mapChatHistory(options)
}
/* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory
* LongTermMemory will automatically retrieved chatHistory from sessionId
*/
if (options && options.chatHistory && memory.isShortTermMemory) {
await memory.resumeMessages(options.chatHistory)
}
executor.memory = memory
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)

View File

@ -81,50 +81,8 @@ class OpenAIAssistant_Agents implements INode {
}
}
async init(): Promise<any> {
return null
}
//@ts-ignore
memoryMethods = {
async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise<void> {
const selectedAssistantId = nodeData.inputs?.selectedAssistant as string
const appDataSource = options.appDataSource as DataSource
const databaseEntities = options.databaseEntities as IDatabaseEntity
let sessionId = nodeData.inputs?.sessionId as string
const assistant = await appDataSource.getRepository(databaseEntities['Assistant']).findOneBy({
id: selectedAssistantId
})
if (!assistant) {
options.logger.error(`Assistant ${selectedAssistantId} not found`)
return
}
if (!sessionId && options.chatId) {
const chatmsg = await appDataSource.getRepository(databaseEntities['ChatMessage']).findOneBy({
chatId: options.chatId
})
if (!chatmsg) {
options.logger.error(`Chat Message with Chat Id: ${options.chatId} not found`)
return
}
sessionId = chatmsg.sessionId
}
const credentialData = await getCredentialData(assistant.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
if (!openAIApiKey) {
options.logger.error(`OpenAI ApiKey not found`)
return
}
const openai = new OpenAI({ apiKey: openAIApiKey })
options.logger.info(`Clearing OpenAI Thread ${sessionId}`)
if (sessionId) await openai.beta.threads.del(sessionId)
options.logger.info(`Successfully cleared OpenAI Thread ${sessionId}`)
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
return new OpenAIAssistant({ nodeData, options })
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
@ -459,4 +417,58 @@ const formatToOpenAIAssistantTool = (tool: any): OpenAI.Beta.AssistantCreatePara
}
}
interface OpenAIAssistantInput {
nodeData: INodeData
options: ICommonObject
}
class OpenAIAssistant {
nodeData: INodeData
options: ICommonObject = {}
constructor(fields: OpenAIAssistantInput) {
this.nodeData = fields.nodeData
this.options = fields.options
}
async clearChatMessages(): Promise<void> {
const selectedAssistantId = this.nodeData.inputs?.selectedAssistant as string
const appDataSource = this.options.appDataSource as DataSource
const databaseEntities = this.options.databaseEntities as IDatabaseEntity
let sessionId = this.nodeData.inputs?.sessionId as string
const assistant = await appDataSource.getRepository(databaseEntities['Assistant']).findOneBy({
id: selectedAssistantId
})
if (!assistant) {
this.options.logger.error(`Assistant ${selectedAssistantId} not found`)
return
}
if (!sessionId && this.options.chatId) {
const chatmsg = await appDataSource.getRepository(databaseEntities['ChatMessage']).findOneBy({
chatId: this.options.chatId
})
if (!chatmsg) {
this.options.logger.error(`Chat Message with Chat Id: ${this.options.chatId} not found`)
return
}
sessionId = chatmsg.sessionId
}
const credentialData = await getCredentialData(assistant.credential ?? '', this.options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, this.nodeData)
if (!openAIApiKey) {
this.options.logger.error(`OpenAI ApiKey not found`)
return
}
const openai = new OpenAI({ apiKey: openAIApiKey })
this.options.logger.info(`Clearing OpenAI Thread ${sessionId}`)
if (sessionId) await openai.beta.threads.del(sessionId)
this.options.logger.info(`Successfully cleared OpenAI Thread ${sessionId}`)
}
}
module.exports = { nodeClass: OpenAIAssistant_Agents }

View File

@ -1,6 +1,6 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { initializeAgentExecutorWithOptions, AgentExecutor } from 'langchain/agents'
import { getBaseClasses, mapChatHistory } from '../../../src/utils'
import { getBaseClasses } from '../../../src/utils'
import { BaseLanguageModel } from 'langchain/base_language'
import { flatten } from 'lodash'
import { BaseChatMemory } from 'langchain/memory'
@ -56,8 +56,8 @@ class OpenAIFunctionAgent_Agents implements INode {
async init(nodeData: INodeData): Promise<any> {
const model = nodeData.inputs?.model as BaseLanguageModel
const memory = nodeData.inputs?.memory as BaseChatMemory
const systemMessage = nodeData.inputs?.systemMessage as string
const memory = nodeData.inputs?.memory as BaseChatMemory
let tools = nodeData.inputs?.tools
tools = flatten(tools)
@ -69,25 +69,23 @@ class OpenAIFunctionAgent_Agents implements INode {
prefix: systemMessage ?? `You are a helpful AI assistant.`
}
})
if (memory) executor.memory = memory
executor.memory = memory
return executor
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const executor = nodeData.instance as AgentExecutor
const memory = nodeData.inputs?.memory as BaseChatMemory
const memory = nodeData.inputs?.memory
memory.returnMessages = true // Return true for BaseChatModel
if (options && options.chatHistory) {
const chatHistoryClassName = memory.chatHistory.constructor.name
// Only replace when its In-Memory
if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
memory.chatHistory = mapChatHistory(options)
executor.memory = memory
}
/* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory
* LongTermMemory will automatically retrieved chatHistory from sessionId
*/
if (options && options.chatHistory && memory.isShortTermMemory) {
await memory.resumeMessages(options.chatHistory)
}
;(executor.memory as any).returnMessages = true // Return true for BaseChatModel
executor.memory = memory
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)

View File

@ -1,6 +1,6 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ConversationChain } from 'langchain/chains'
import { getBaseClasses, mapChatHistory } from '../../../src/utils'
import { getBaseClasses } from '../../../src/utils'
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts'
import { BufferMemory } from 'langchain/memory'
import { BaseChatModel } from 'langchain/chat_models/base'
@ -105,15 +105,14 @@ class ConversationChain_Chains implements INode {
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const chain = nodeData.instance as ConversationChain
const memory = nodeData.inputs?.memory as BufferMemory
const memory = nodeData.inputs?.memory
memory.returnMessages = true // Return true for BaseChatModel
if (options && options.chatHistory) {
const chatHistoryClassName = memory.chatHistory.constructor.name
// Only replace when its In-Memory
if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
memory.chatHistory = mapChatHistory(options)
}
/* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory
* LongTermMemory will automatically retrieved chatHistory from sessionId
*/
if (options && options.chatHistory && memory.isShortTermMemory) {
await memory.resumeMessages(options.chatHistory)
}
chain.memory = memory

View File

@ -1,9 +1,9 @@
import { BaseLanguageModel } from 'langchain/base_language'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, mapChatHistory } from '../../../src/utils'
import { ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { ConversationalRetrievalQAChain, QAChainParams } from 'langchain/chains'
import { BaseRetriever } from 'langchain/schema/retriever'
import { BufferMemory, BufferMemoryInput } from 'langchain/memory'
import { BufferMemoryInput, BufferMemory } from 'langchain/memory'
import { PromptTemplate } from 'langchain/prompts'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import {
@ -158,7 +158,7 @@ class ConversationalRetrievalQAChain_Chains implements INode {
returnMessages: true
}
if (chainOption === 'refine') fields.outputKey = 'output_text'
obj.memory = new BufferMemory(fields)
obj.memory = new BufferMemoryExtended(fields)
}
const chain = ConversationalRetrievalQAChain.fromLLM(model, vectorStoreRetriever, obj)
@ -178,12 +178,11 @@ class ConversationalRetrievalQAChain_Chains implements INode {
const obj = { question: input }
if (options && options.chatHistory && chain.memory) {
const chatHistoryClassName = (chain.memory as any).chatHistory.constructor.name
// Only replace when its In-Memory
if (chatHistoryClassName && chatHistoryClassName === 'ChatMessageHistory') {
;(chain.memory as any).chatHistory = mapChatHistory(options)
}
/* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory
* LongTermMemory will automatically retrieved chatHistory from sessionId
*/
if (options && options.chatHistory && chain.memory && (chain.memory as any).isShortTermMemory) {
await (chain.memory as any).resumeMessages(options.chatHistory)
}
const loggerHandler = new ConsoleCallbackHandler(options.logger)
@ -216,4 +215,27 @@ class ConversationalRetrievalQAChain_Chains implements INode {
}
}
class BufferMemoryExtended extends BufferMemory {
isShortTermMemory = true
constructor(fields: BufferMemoryInput) {
super(fields)
}
async clearChatMessages(): Promise<void> {
await this.clear()
}
async resumeMessages(messages: IMessage[]): Promise<void> {
// Clear existing chatHistory to avoid duplication
if (messages.length) await this.clear()
// Insert into chatHistory
for (const msg of messages) {
if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message)
else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message)
}
}
}
module.exports = { nodeClass: ConversationalRetrievalQAChain_Chains }

View File

@ -0,0 +1,135 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { OpenAI, ALL_AVAILABLE_OPENAI_MODELS } from 'llamaindex'
interface AzureOpenAIConfig {
apiKey?: string
endpoint?: string
apiVersion?: string
deploymentName?: string
}
class AzureChatOpenAI_LlamaIndex_ChatModels implements INode {
label: string
name: string
version: number
type: string
icon: string
category: string
description: string
baseClasses: string[]
tags: string[]
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'AzureChatOpenAI'
this.name = 'azureChatOpenAI_LlamaIndex'
this.version = 1.0
this.type = 'AzureChatOpenAI'
this.icon = 'Azure.svg'
this.category = 'Chat Models'
this.description = 'Wrapper around Azure OpenAI Chat LLM with LlamaIndex implementation'
this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(OpenAI)]
this.tags = ['LlamaIndex']
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['azureOpenAIApi']
}
this.inputs = [
{
label: 'Model Name',
name: 'modelName',
type: 'options',
options: [
{
label: 'gpt-4',
name: 'gpt-4'
},
{
label: 'gpt-4-32k',
name: 'gpt-4-32k'
},
{
label: 'gpt-3.5-turbo',
name: 'gpt-3.5-turbo'
},
{
label: 'gpt-3.5-turbo-16k',
name: 'gpt-3.5-turbo-16k'
}
],
default: 'gpt-3.5-turbo-16k',
optional: true
},
{
label: 'Temperature',
name: 'temperature',
type: 'number',
step: 0.1,
default: 0.9,
optional: true
},
{
label: 'Max Tokens',
name: 'maxTokens',
type: 'number',
step: 1,
optional: true,
additionalParams: true
},
{
label: 'Top Probability',
name: 'topP',
type: 'number',
step: 0.1,
optional: true,
additionalParams: true
},
{
label: 'Timeout',
name: 'timeout',
type: 'number',
step: 1,
optional: true,
additionalParams: true
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const modelName = nodeData.inputs?.modelName as keyof typeof ALL_AVAILABLE_OPENAI_MODELS
const temperature = nodeData.inputs?.temperature as string
const maxTokens = nodeData.inputs?.maxTokens as string
const topP = nodeData.inputs?.topP as string
const timeout = nodeData.inputs?.timeout as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData)
const azureOpenAIApiInstanceName = getCredentialParam('azureOpenAIApiInstanceName', credentialData, nodeData)
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
const obj: Partial<OpenAI> & { azure?: AzureOpenAIConfig } = {
temperature: parseFloat(temperature),
model: modelName,
azure: {
apiKey: azureOpenAIApiKey,
endpoint: `https://${azureOpenAIApiInstanceName}.openai.azure.com`,
apiVersion: azureOpenAIApiVersion,
deploymentName: azureOpenAIApiDeploymentName
}
}
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
if (topP) obj.topP = parseFloat(topP)
if (timeout) obj.timeout = parseInt(timeout, 10)
const model = new OpenAI(obj)
return model
}
}
module.exports = { nodeClass: AzureChatOpenAI_LlamaIndex_ChatModels }

View File

@ -3,6 +3,7 @@ import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../
import { AnthropicInput, ChatAnthropic } from 'langchain/chat_models/anthropic'
import { BaseCache } from 'langchain/schema'
import { BaseLLMParams } from 'langchain/llms/base'
import { availableModels } from './utils'
class ChatAnthropic_ChatModels implements INode {
label: string
@ -42,67 +43,7 @@ class ChatAnthropic_ChatModels implements INode {
label: 'Model Name',
name: 'modelName',
type: 'options',
options: [
{
label: 'claude-2',
name: 'claude-2',
description: 'Claude 2 latest major version, automatically get updates to the model as they are released'
},
{
label: 'claude-2.1',
name: 'claude-2.1',
description: 'Claude 2 latest full version'
},
{
label: 'claude-instant-1',
name: 'claude-instant-1',
description: 'Claude Instant latest major version, automatically get updates to the model as they are released'
},
{
label: 'claude-v1',
name: 'claude-v1'
},
{
label: 'claude-v1-100k',
name: 'claude-v1-100k'
},
{
label: 'claude-v1.0',
name: 'claude-v1.0'
},
{
label: 'claude-v1.2',
name: 'claude-v1.2'
},
{
label: 'claude-v1.3',
name: 'claude-v1.3'
},
{
label: 'claude-v1.3-100k',
name: 'claude-v1.3-100k'
},
{
label: 'claude-instant-v1',
name: 'claude-instant-v1'
},
{
label: 'claude-instant-v1-100k',
name: 'claude-instant-v1-100k'
},
{
label: 'claude-instant-v1.0',
name: 'claude-instant-v1.0'
},
{
label: 'claude-instant-v1.1',
name: 'claude-instant-v1.1'
},
{
label: 'claude-instant-v1.1-100k',
name: 'claude-instant-v1.1-100k'
}
],
options: [...availableModels],
default: 'claude-2',
optional: true
},

View File

@ -0,0 +1,94 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { Anthropic } from 'llamaindex'
import { availableModels } from './utils'
class ChatAnthropic_LlamaIndex_ChatModels implements INode {
label: string
name: string
version: number
type: string
icon: string
category: string
description: string
tags: string[]
baseClasses: string[]
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'ChatAnthropic'
this.name = 'chatAnthropic_LlamaIndex'
this.version = 1.0
this.type = 'ChatAnthropic'
this.icon = 'chatAnthropic.png'
this.category = 'Chat Models'
this.description = 'Wrapper around ChatAnthropic LLM with LlamaIndex implementation'
this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(Anthropic)]
this.tags = ['LlamaIndex']
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['anthropicApi']
}
this.inputs = [
{
label: 'Model Name',
name: 'modelName',
type: 'options',
options: [...availableModels],
default: 'claude-2',
optional: true
},
{
label: 'Temperature',
name: 'temperature',
type: 'number',
step: 0.1,
default: 0.9,
optional: true
},
{
label: 'Max Tokens',
name: 'maxTokensToSample',
type: 'number',
step: 1,
optional: true,
additionalParams: true
},
{
label: 'Top P',
name: 'topP',
type: 'number',
step: 0.1,
optional: true,
additionalParams: true
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const temperature = nodeData.inputs?.temperature as string
const modelName = nodeData.inputs?.modelName as string
const maxTokensToSample = nodeData.inputs?.maxTokensToSample as string
const topP = nodeData.inputs?.topP as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const anthropicApiKey = getCredentialParam('anthropicApiKey', credentialData, nodeData)
const obj: Partial<Anthropic> = {
temperature: parseFloat(temperature),
model: modelName,
apiKey: anthropicApiKey
}
if (maxTokensToSample) obj.maxTokens = parseInt(maxTokensToSample, 10)
if (topP) obj.topP = parseFloat(topP)
const model = new Anthropic(obj)
return model
}
}
module.exports = { nodeClass: ChatAnthropic_LlamaIndex_ChatModels }

View File

@ -0,0 +1,61 @@
export const availableModels = [
{
label: 'claude-2',
name: 'claude-2',
description: 'Claude 2 latest major version, automatically get updates to the model as they are released'
},
{
label: 'claude-2.1',
name: 'claude-2.1',
description: 'Claude 2 latest full version'
},
{
label: 'claude-instant-1',
name: 'claude-instant-1',
description: 'Claude Instant latest major version, automatically get updates to the model as they are released'
},
{
label: 'claude-v1',
name: 'claude-v1'
},
{
label: 'claude-v1-100k',
name: 'claude-v1-100k'
},
{
label: 'claude-v1.0',
name: 'claude-v1.0'
},
{
label: 'claude-v1.2',
name: 'claude-v1.2'
},
{
label: 'claude-v1.3',
name: 'claude-v1.3'
},
{
label: 'claude-v1.3-100k',
name: 'claude-v1.3-100k'
},
{
label: 'claude-instant-v1',
name: 'claude-instant-v1'
},
{
label: 'claude-instant-v1-100k',
name: 'claude-instant-v1-100k'
},
{
label: 'claude-instant-v1.0',
name: 'claude-instant-v1.0'
},
{
label: 'claude-instant-v1.1',
name: 'claude-instant-v1.1'
},
{
label: 'claude-instant-v1.1-100k',
name: 'claude-instant-v1.1-100k'
}
]

View File

@ -0,0 +1,148 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { OpenAI, ALL_AVAILABLE_OPENAI_MODELS } from 'llamaindex'
class ChatOpenAI_LlamaIndex_LLMs implements INode {
label: string
name: string
version: number
type: string
icon: string
category: string
description: string
baseClasses: string[]
tags: string[]
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'ChatOpenAI'
this.name = 'chatOpenAI_LlamaIndex'
this.version = 1.0
this.type = 'ChatOpenAI'
this.icon = 'openai.png'
this.category = 'Chat Models'
this.description = 'Wrapper around OpenAI Chat LLM with LlamaIndex implementation'
this.baseClasses = [this.type, 'BaseChatModel_LlamaIndex', ...getBaseClasses(OpenAI)]
this.tags = ['LlamaIndex']
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['openAIApi']
}
this.inputs = [
{
label: 'Model Name',
name: 'modelName',
type: 'options',
options: [
{
label: 'gpt-4',
name: 'gpt-4'
},
{
label: 'gpt-4-1106-preview',
name: 'gpt-4-1106-preview'
},
{
label: 'gpt-4-vision-preview',
name: 'gpt-4-vision-preview'
},
{
label: 'gpt-4-0613',
name: 'gpt-4-0613'
},
{
label: 'gpt-4-32k',
name: 'gpt-4-32k'
},
{
label: 'gpt-4-32k-0613',
name: 'gpt-4-32k-0613'
},
{
label: 'gpt-3.5-turbo',
name: 'gpt-3.5-turbo'
},
{
label: 'gpt-3.5-turbo-1106',
name: 'gpt-3.5-turbo-1106'
},
{
label: 'gpt-3.5-turbo-0613',
name: 'gpt-3.5-turbo-0613'
},
{
label: 'gpt-3.5-turbo-16k',
name: 'gpt-3.5-turbo-16k'
},
{
label: 'gpt-3.5-turbo-16k-0613',
name: 'gpt-3.5-turbo-16k-0613'
}
],
default: 'gpt-3.5-turbo',
optional: true
},
{
label: 'Temperature',
name: 'temperature',
type: 'number',
step: 0.1,
default: 0.9,
optional: true
},
{
label: 'Max Tokens',
name: 'maxTokens',
type: 'number',
step: 1,
optional: true,
additionalParams: true
},
{
label: 'Top Probability',
name: 'topP',
type: 'number',
step: 0.1,
optional: true,
additionalParams: true
},
{
label: 'Timeout',
name: 'timeout',
type: 'number',
step: 1,
optional: true,
additionalParams: true
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const temperature = nodeData.inputs?.temperature as string
const modelName = nodeData.inputs?.modelName as keyof typeof ALL_AVAILABLE_OPENAI_MODELS
const maxTokens = nodeData.inputs?.maxTokens as string
const topP = nodeData.inputs?.topP as string
const timeout = nodeData.inputs?.timeout as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
const obj: Partial<OpenAI> = {
temperature: parseFloat(temperature),
model: modelName,
apiKey: openAIApiKey
}
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
if (topP) obj.topP = parseFloat(topP)
if (timeout) obj.timeout = parseInt(timeout, 10)
const model = new OpenAI(obj)
return model
}
}
module.exports = { nodeClass: ChatOpenAI_LlamaIndex_LLMs }

View File

@ -0,0 +1,77 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { OpenAIEmbedding } from 'llamaindex'
interface AzureOpenAIConfig {
apiKey?: string
endpoint?: string
apiVersion?: string
deploymentName?: string
}
class AzureOpenAIEmbedding_LlamaIndex_Embeddings implements INode {
label: string
name: string
version: number
type: string
icon: string
category: string
description: string
baseClasses: string[]
credential: INodeParams
tags: string[]
inputs: INodeParams[]
constructor() {
this.label = 'Azure OpenAI Embeddings'
this.name = 'azureOpenAIEmbeddingsLlamaIndex'
this.version = 1.0
this.type = 'AzureOpenAIEmbeddings'
this.icon = 'Azure.svg'
this.category = 'Embeddings'
this.description = 'Azure OpenAI API embeddings with LlamaIndex implementation'
this.baseClasses = [this.type, 'BaseEmbedding_LlamaIndex', ...getBaseClasses(OpenAIEmbedding)]
this.tags = ['LlamaIndex']
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['azureOpenAIApi']
}
this.inputs = [
{
label: 'Timeout',
name: 'timeout',
type: 'number',
optional: true,
additionalParams: true
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const timeout = nodeData.inputs?.timeout as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData)
const azureOpenAIApiInstanceName = getCredentialParam('azureOpenAIApiInstanceName', credentialData, nodeData)
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
const obj: Partial<OpenAIEmbedding> & { azure?: AzureOpenAIConfig } = {
azure: {
apiKey: azureOpenAIApiKey,
endpoint: `https://${azureOpenAIApiInstanceName}.openai.azure.com`,
apiVersion: azureOpenAIApiVersion,
deploymentName: azureOpenAIApiDeploymentName
}
}
if (timeout) obj.timeout = parseInt(timeout, 10)
const model = new OpenAIEmbedding(obj)
return model
}
}
module.exports = { nodeClass: AzureOpenAIEmbedding_LlamaIndex_Embeddings }

View File

@ -0,0 +1,68 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { OpenAIEmbedding } from 'llamaindex'
class OpenAIEmbedding_LlamaIndex_Embeddings implements INode {
label: string
name: string
version: number
type: string
icon: string
category: string
description: string
baseClasses: string[]
tags: string[]
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'OpenAI Embedding'
this.name = 'openAIEmbedding_LlamaIndex'
this.version = 1.0
this.type = 'OpenAIEmbedding'
this.icon = 'openai.png'
this.category = 'Embeddings'
this.description = 'OpenAI Embedding with LlamaIndex implementation'
this.baseClasses = [this.type, 'BaseEmbedding_LlamaIndex', ...getBaseClasses(OpenAIEmbedding)]
this.tags = ['LlamaIndex']
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['openAIApi']
}
this.inputs = [
{
label: 'Timeout',
name: 'timeout',
type: 'number',
optional: true,
additionalParams: true
},
{
label: 'BasePath',
name: 'basepath',
type: 'string',
optional: true,
additionalParams: true
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const timeout = nodeData.inputs?.timeout as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
const obj: Partial<OpenAIEmbedding> = {
apiKey: openAIApiKey
}
if (timeout) obj.timeout = parseInt(timeout, 10)
const model = new OpenAIEmbedding(obj)
return model
}
}
module.exports = { nodeClass: OpenAIEmbedding_LlamaIndex_Embeddings }

View File

@ -0,0 +1,178 @@
import { ICommonObject, IMessage, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { ContextChatEngine, ChatMessage } from 'llamaindex'
class ContextChatEngine_LlamaIndex implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
tags: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Context Chat Engine'
this.name = 'contextChatEngine'
this.version = 1.0
this.type = 'ContextChatEngine'
this.icon = 'context-chat-engine.png'
this.category = 'Engine'
this.description = 'Answer question based on retrieved documents (context) with built-in memory to remember conversation'
this.baseClasses = [this.type]
this.tags = ['LlamaIndex']
this.inputs = [
{
label: 'Chat Model',
name: 'model',
type: 'BaseChatModel_LlamaIndex'
},
{
label: 'Vector Store Retriever',
name: 'vectorStoreRetriever',
type: 'VectorIndexRetriever'
},
{
label: 'Memory',
name: 'memory',
type: 'BaseChatMemory'
},
{
label: 'System Message',
name: 'systemMessagePrompt',
type: 'string',
rows: 4,
optional: true,
placeholder:
'I want you to act as a document that I am having a conversation with. Your name is "AI Assistant". You will provide me with answers from the given info. If the answer is not included, say exactly "Hmm, I am not sure." and stop after that. Refuse to answer any question not about the info. Never break character.'
}
]
}
async init(nodeData: INodeData): Promise<any> {
const model = nodeData.inputs?.model
const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever
const memory = nodeData.inputs?.memory
const chatEngine = new ContextChatEngine({ chatModel: model, retriever: vectorStoreRetriever })
;(chatEngine as any).memory = memory
return chatEngine
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const chatEngine = nodeData.instance as ContextChatEngine
const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string
const memory = nodeData.inputs?.memory
const chatHistory = [] as ChatMessage[]
let sessionId = ''
if (memory) {
if (memory.isSessionIdUsingChatMessageId) sessionId = options.chatId
else sessionId = nodeData.inputs?.sessionId
}
if (systemMessagePrompt) {
chatHistory.push({
content: systemMessagePrompt,
role: 'user'
})
}
/* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory
* LongTermMemory will automatically retrieved chatHistory from sessionId
*/
if (options && options.chatHistory && memory.isShortTermMemory) {
await memory.resumeMessages(options.chatHistory)
}
const msgs: IMessage[] = await memory.getChatMessages(sessionId)
for (const message of msgs) {
if (message.type === 'apiMessage') {
chatHistory.push({
content: message.message,
role: 'assistant'
})
} else if (message.type === 'userMessage') {
chatHistory.push({
content: message.message,
role: 'user'
})
}
}
if (options.socketIO && options.socketIOClientId) {
let response = ''
const stream = await chatEngine.chat(input, chatHistory, true)
let isStart = true
const onNextPromise = () => {
return new Promise((resolve, reject) => {
const onNext = async () => {
try {
const { value, done } = await stream.next()
if (!done) {
if (isStart) {
options.socketIO.to(options.socketIOClientId).emit('start')
isStart = false
}
options.socketIO.to(options.socketIOClientId).emit('token', value)
response += value
onNext()
} else {
resolve(response)
}
} catch (error) {
reject(error)
}
}
onNext()
})
}
try {
const result = await onNextPromise()
if (memory) {
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: result,
type: 'apiMessage'
}
],
sessionId
)
}
return result as string
} catch (error) {
throw new Error(error)
}
} else {
const response = await chatEngine.chat(input, chatHistory)
if (memory) {
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: response?.response,
type: 'apiMessage'
}
],
sessionId
)
}
return response?.response
}
}
}
module.exports = { nodeClass: ContextChatEngine_LlamaIndex }

View File

@ -0,0 +1,171 @@
import { ICommonObject, IMessage, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { ChatMessage, SimpleChatEngine } from 'llamaindex'
class SimpleChatEngine_LlamaIndex implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
tags: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Simple Chat Engine'
this.name = 'simpleChatEngine'
this.version = 1.0
this.type = 'SimpleChatEngine'
this.icon = 'chat-engine.png'
this.category = 'Engine'
this.description = 'Simple engine to handle back and forth conversations'
this.baseClasses = [this.type]
this.tags = ['LlamaIndex']
this.inputs = [
{
label: 'Chat Model',
name: 'model',
type: 'BaseChatModel_LlamaIndex'
},
{
label: 'Memory',
name: 'memory',
type: 'BaseChatMemory'
},
{
label: 'System Message',
name: 'systemMessagePrompt',
type: 'string',
rows: 4,
optional: true,
placeholder: 'You are a helpful assistant'
}
]
}
async init(nodeData: INodeData): Promise<any> {
const model = nodeData.inputs?.model
const memory = nodeData.inputs?.memory
const chatEngine = new SimpleChatEngine({ llm: model })
;(chatEngine as any).memory = memory
return chatEngine
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const chatEngine = nodeData.instance as SimpleChatEngine
const systemMessagePrompt = nodeData.inputs?.systemMessagePrompt as string
const memory = nodeData.inputs?.memory
const chatHistory = [] as ChatMessage[]
let sessionId = ''
if (memory) {
if (memory.isSessionIdUsingChatMessageId) sessionId = options.chatId
else sessionId = nodeData.inputs?.sessionId
}
if (systemMessagePrompt) {
chatHistory.push({
content: systemMessagePrompt,
role: 'user'
})
}
/* When incomingInput.history is provided, only force replace chatHistory if its ShortTermMemory
* LongTermMemory will automatically retrieved chatHistory from sessionId
*/
if (options && options.chatHistory && memory.isShortTermMemory) {
await memory.resumeMessages(options.chatHistory)
}
const msgs: IMessage[] = await memory.getChatMessages(sessionId)
for (const message of msgs) {
if (message.type === 'apiMessage') {
chatHistory.push({
content: message.message,
role: 'assistant'
})
} else if (message.type === 'userMessage') {
chatHistory.push({
content: message.message,
role: 'user'
})
}
}
if (options.socketIO && options.socketIOClientId) {
let response = ''
const stream = await chatEngine.chat(input, chatHistory, true)
let isStart = true
const onNextPromise = () => {
return new Promise((resolve, reject) => {
const onNext = async () => {
try {
const { value, done } = await stream.next()
if (!done) {
if (isStart) {
options.socketIO.to(options.socketIOClientId).emit('start')
isStart = false
}
options.socketIO.to(options.socketIOClientId).emit('token', value)
response += value
onNext()
} else {
resolve(response)
}
} catch (error) {
reject(error)
}
}
onNext()
})
}
try {
const result = await onNextPromise()
if (memory) {
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: result,
type: 'apiMessage'
}
],
sessionId
)
}
return result as string
} catch (error) {
throw new Error(error)
}
} else {
const response = await chatEngine.chat(input, chatHistory)
if (memory) {
await memory.addChatMessages(
[
{
text: input,
type: 'userMessage'
},
{
text: response?.response,
type: 'apiMessage'
}
],
sessionId
)
}
return response?.response
}
}
}
module.exports = { nodeClass: SimpleChatEngine_LlamaIndex }

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.5 KiB

View File

@ -0,0 +1,126 @@
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import {
RetrieverQueryEngine,
BaseNode,
Metadata,
ResponseSynthesizer,
CompactAndRefine,
TreeSummarize,
Refine,
SimpleResponseBuilder
} from 'llamaindex'
class QueryEngine_LlamaIndex implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
tags: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Query Engine'
this.name = 'queryEngine'
this.version = 1.0
this.type = 'QueryEngine'
this.icon = 'query-engine.png'
this.category = 'Engine'
this.description = 'Simple query engine built to answer question over your data, without memory'
this.baseClasses = [this.type]
this.tags = ['LlamaIndex']
this.inputs = [
{
label: 'Vector Store Retriever',
name: 'vectorStoreRetriever',
type: 'VectorIndexRetriever'
},
{
label: 'Response Synthesizer',
name: 'responseSynthesizer',
type: 'ResponseSynthesizer',
description:
'ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See <a target="_blank" href="https://ts.llamaindex.ai/modules/low_level/response_synthesizer">more</a>',
optional: true
},
{
label: 'Return Source Documents',
name: 'returnSourceDocuments',
type: 'boolean',
optional: true
}
]
}
async init(nodeData: INodeData): Promise<any> {
const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever
const responseSynthesizerObj = nodeData.inputs?.responseSynthesizer
if (responseSynthesizerObj) {
if (responseSynthesizerObj.type === 'TreeSummarize') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new TreeSummarize(vectorStoreRetriever.serviceContext, responseSynthesizerObj.textQAPromptTemplate),
serviceContext: vectorStoreRetriever.serviceContext
})
return new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer)
} else if (responseSynthesizerObj.type === 'CompactAndRefine') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new CompactAndRefine(
vectorStoreRetriever.serviceContext,
responseSynthesizerObj.textQAPromptTemplate,
responseSynthesizerObj.refinePromptTemplate
),
serviceContext: vectorStoreRetriever.serviceContext
})
return new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer)
} else if (responseSynthesizerObj.type === 'Refine') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new Refine(
vectorStoreRetriever.serviceContext,
responseSynthesizerObj.textQAPromptTemplate,
responseSynthesizerObj.refinePromptTemplate
),
serviceContext: vectorStoreRetriever.serviceContext
})
return new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer)
} else if (responseSynthesizerObj.type === 'SimpleResponseBuilder') {
const responseSynthesizer = new ResponseSynthesizer({
responseBuilder: new SimpleResponseBuilder(vectorStoreRetriever.serviceContext),
serviceContext: vectorStoreRetriever.serviceContext
})
return new RetrieverQueryEngine(vectorStoreRetriever, responseSynthesizer)
}
}
const queryEngine = new RetrieverQueryEngine(vectorStoreRetriever)
return queryEngine
}
async run(nodeData: INodeData, input: string): Promise<string | object> {
const queryEngine = nodeData.instance as RetrieverQueryEngine
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
const response = await queryEngine.query(input)
if (returnSourceDocuments && response.sourceNodes?.length)
return { text: response?.response, sourceDocuments: reformatSourceDocuments(response.sourceNodes) }
return response?.response
}
}
const reformatSourceDocuments = (sourceNodes: BaseNode<Metadata>[]) => {
const sourceDocuments = []
for (const node of sourceNodes) {
sourceDocuments.push({
pageContent: (node as any).text,
metadata: node.metadata
})
}
return sourceDocuments
}
module.exports = { nodeClass: QueryEngine_LlamaIndex }

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

View File

@ -1,6 +1,6 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { BufferMemory } from 'langchain/memory'
import { IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface'
import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils'
import { BufferMemory, BufferMemoryInput } from 'langchain/memory'
class BufferMemory_Memory implements INode {
label: string
@ -41,7 +41,7 @@ class BufferMemory_Memory implements INode {
async init(nodeData: INodeData): Promise<any> {
const memoryKey = nodeData.inputs?.memoryKey as string
const inputKey = nodeData.inputs?.inputKey as string
return new BufferMemory({
return new BufferMemoryExtended({
returnMessages: true,
memoryKey,
inputKey
@ -49,4 +49,43 @@ class BufferMemory_Memory implements INode {
}
}
class BufferMemoryExtended extends BufferMemory {
isShortTermMemory = true
constructor(fields: BufferMemoryInput) {
super(fields)
}
async getChatMessages(): Promise<IMessage[]> {
const memoryResult = await this.loadMemoryVariables({})
const baseMessages = memoryResult[this.memoryKey ?? 'chat_history']
return convertBaseMessagetoIMessage(baseMessages)
}
async addChatMessages(msgArray: { text: string; type: MessageType }[]): Promise<void> {
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
const inputValues = { [this.inputKey ?? 'input']: input?.text }
const outputValues = { output: output?.text }
await this.saveContext(inputValues, outputValues)
}
async clearChatMessages(): Promise<void> {
await this.clear()
}
async resumeMessages(messages: IMessage[]): Promise<void> {
// Clear existing chatHistory to avoid duplication
if (messages.length) await this.clear()
// Insert into chatHistory
for (const msg of messages) {
if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message)
else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message)
}
}
}
module.exports = { nodeClass: BufferMemory_Memory }

View File

@ -1,5 +1,5 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface'
import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils'
import { BufferWindowMemory, BufferWindowMemoryInput } from 'langchain/memory'
class BufferWindowMemory_Memory implements INode {
@ -57,7 +57,46 @@ class BufferWindowMemory_Memory implements INode {
k: parseInt(k, 10)
}
return new BufferWindowMemory(obj)
return new BufferWindowMemoryExtended(obj)
}
}
class BufferWindowMemoryExtended extends BufferWindowMemory {
isShortTermMemory = true
constructor(fields: BufferWindowMemoryInput) {
super(fields)
}
async getChatMessages(): Promise<IMessage[]> {
const memoryResult = await this.loadMemoryVariables({})
const baseMessages = memoryResult[this.memoryKey ?? 'chat_history']
return convertBaseMessagetoIMessage(baseMessages)
}
async addChatMessages(msgArray: { text: string; type: MessageType }[]): Promise<void> {
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
const inputValues = { [this.inputKey ?? 'input']: input?.text }
const outputValues = { output: output?.text }
await this.saveContext(inputValues, outputValues)
}
async clearChatMessages(): Promise<void> {
await this.clear()
}
async resumeMessages(messages: IMessage[]): Promise<void> {
// Clear existing chatHistory to avoid duplication
if (messages.length) await this.clear()
// Insert into chatHistory
for (const msg of messages) {
if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message)
else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message)
}
}
}

View File

@ -1,5 +1,5 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface'
import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils'
import { ConversationSummaryMemory, ConversationSummaryMemoryInput } from 'langchain/memory'
import { BaseLanguageModel } from 'langchain/base_language'
@ -56,7 +56,50 @@ class ConversationSummaryMemory_Memory implements INode {
inputKey
}
return new ConversationSummaryMemory(obj)
return new ConversationSummaryMemoryExtended(obj)
}
}
class ConversationSummaryMemoryExtended extends ConversationSummaryMemory {
isShortTermMemory = true
constructor(fields: ConversationSummaryMemoryInput) {
super(fields)
}
async getChatMessages(): Promise<IMessage[]> {
const memoryResult = await this.loadMemoryVariables({})
const baseMessages = memoryResult[this.memoryKey ?? 'chat_history']
return convertBaseMessagetoIMessage(baseMessages)
}
async addChatMessages(msgArray: { text: string; type: MessageType }[]): Promise<void> {
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
const inputValues = { [this.inputKey ?? 'input']: input?.text }
const outputValues = { output: output?.text }
await this.saveContext(inputValues, outputValues)
}
async clearChatMessages(): Promise<void> {
await this.clear()
}
async resumeMessages(messages: IMessage[]): Promise<void> {
// Clear existing chatHistory to avoid duplication
if (messages.length) await this.clear()
// Insert into chatHistory
for (const msg of messages) {
if (msg.type === 'userMessage') await this.chatHistory.addUserMessage(msg.message)
else if (msg.type === 'apiMessage') await this.chatHistory.addAIChatMessage(msg.message)
}
// Replace buffer
const chatMessages = await this.chatHistory.getMessages()
this.buffer = await this.predictNewSummary(chatMessages.slice(-2), this.buffer)
}
}

View File

@ -1,15 +1,19 @@
import {
ICommonObject,
INode,
INodeData,
INodeParams,
getBaseClasses,
getCredentialData,
getCredentialParam,
serializeChatHistory
} from '../../../src'
DynamoDBClient,
DynamoDBClientConfig,
GetItemCommand,
GetItemCommandInput,
UpdateItemCommand,
UpdateItemCommandInput,
DeleteItemCommand,
DeleteItemCommandInput,
AttributeValue
} from '@aws-sdk/client-dynamodb'
import { DynamoDBChatMessageHistory } from 'langchain/stores/message/dynamodb'
import { BufferMemory, BufferMemoryInput } from 'langchain/memory'
import { mapStoredMessageToChatMessage, AIMessage, HumanMessage, StoredMessage } from 'langchain/schema'
import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ICommonObject, IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface'
class DynamoDb_Memory implements INode {
label: string
@ -60,7 +64,8 @@ class DynamoDb_Memory implements INode {
label: 'Session ID',
name: 'sessionId',
type: 'string',
description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId',
description:
'If not specified, a random id will be used. Learn <a target="_blank" href="https://docs.flowiseai.com/memory/long-term-memory#ui-and-embedded-chat">more</a>',
default: '',
additionalParams: true,
optional: true
@ -78,73 +83,205 @@ class DynamoDb_Memory implements INode {
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
return initalizeDynamoDB(nodeData, options)
}
//@ts-ignore
memoryMethods = {
async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise<void> {
const dynamodbMemory = await initalizeDynamoDB(nodeData, options)
const sessionId = nodeData.inputs?.sessionId as string
const chatId = options?.chatId as string
options.logger.info(`Clearing DynamoDb memory session ${sessionId ? sessionId : chatId}`)
await dynamodbMemory.clear()
options.logger.info(`Successfully cleared DynamoDb memory session ${sessionId ? sessionId : chatId}`)
},
async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise<string> {
const memoryKey = nodeData.inputs?.memoryKey as string
const dynamodbMemory = await initalizeDynamoDB(nodeData, options)
const key = memoryKey ?? 'chat_history'
const memoryResult = await dynamodbMemory.loadMemoryVariables({})
return serializeChatHistory(memoryResult[key])
}
}
}
const initalizeDynamoDB = async (nodeData: INodeData, options: ICommonObject): Promise<BufferMemory> => {
const tableName = nodeData.inputs?.tableName as string
const partitionKey = nodeData.inputs?.partitionKey as string
const sessionId = nodeData.inputs?.sessionId as string
const region = nodeData.inputs?.region as string
const memoryKey = nodeData.inputs?.memoryKey as string
const chatId = options.chatId
let isSessionIdUsingChatMessageId = false
if (!sessionId && chatId) isSessionIdUsingChatMessageId = true
let sessionId = ''
if (!nodeData.inputs?.sessionId && chatId) {
isSessionIdUsingChatMessageId = true
sessionId = chatId
} else {
sessionId = nodeData.inputs?.sessionId
}
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const accessKeyId = getCredentialParam('accessKey', credentialData, nodeData)
const secretAccessKey = getCredentialParam('secretAccessKey', credentialData, nodeData)
const config: DynamoDBClientConfig = {
region,
credentials: {
accessKeyId,
secretAccessKey
}
}
const client = new DynamoDBClient(config ?? {})
const dynamoDb = new DynamoDBChatMessageHistory({
tableName,
partitionKey,
sessionId: sessionId ? sessionId : chatId,
config: {
region,
credentials: {
accessKeyId,
secretAccessKey
}
}
config
})
const memory = new BufferMemoryExtended({
memoryKey: memoryKey ?? 'chat_history',
chatHistory: dynamoDb,
isSessionIdUsingChatMessageId
isSessionIdUsingChatMessageId,
sessionId,
dynamodbClient: client
})
return memory
}
interface BufferMemoryExtendedInput {
isSessionIdUsingChatMessageId: boolean
dynamodbClient: DynamoDBClient
sessionId: string
}
interface DynamoDBSerializedChatMessage {
M: {
type: {
S: string
}
text: {
S: string
}
role?: {
S: string
}
}
}
class BufferMemoryExtended extends BufferMemory {
isSessionIdUsingChatMessageId? = false
isSessionIdUsingChatMessageId = false
sessionId = ''
dynamodbClient: DynamoDBClient
constructor(fields: BufferMemoryInput & Partial<BufferMemoryExtendedInput>) {
constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) {
super(fields)
this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId
this.sessionId = fields.sessionId
this.dynamodbClient = fields.dynamodbClient
}
overrideDynamoKey(overrideSessionId = '') {
const existingDynamoKey = (this as any).dynamoKey
const partitionKey = (this as any).partitionKey
let newDynamoKey: Record<string, AttributeValue> = {}
if (Object.keys(existingDynamoKey).includes(partitionKey)) {
newDynamoKey[partitionKey] = { S: overrideSessionId }
}
return Object.keys(newDynamoKey).length ? newDynamoKey : existingDynamoKey
}
async addNewMessage(
messages: StoredMessage[],
client: DynamoDBClient,
tableName = '',
dynamoKey: Record<string, AttributeValue> = {},
messageAttributeName = 'messages'
) {
const params: UpdateItemCommandInput = {
TableName: tableName,
Key: dynamoKey,
ExpressionAttributeNames: {
'#m': messageAttributeName
},
ExpressionAttributeValues: {
':empty_list': {
L: []
},
':m': {
L: messages.map((message) => {
const dynamoSerializedMessage: DynamoDBSerializedChatMessage = {
M: {
type: {
S: message.type
},
text: {
S: message.data.content
}
}
}
if (message.data.role) {
dynamoSerializedMessage.M.role = { S: message.data.role }
}
return dynamoSerializedMessage
})
}
},
UpdateExpression: 'SET #m = list_append(if_not_exists(#m, :empty_list), :m)'
}
await client.send(new UpdateItemCommand(params))
}
async getChatMessages(overrideSessionId = ''): Promise<IMessage[]> {
if (!this.dynamodbClient) return []
const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey
const tableName = (this as any).tableName
const messageAttributeName = (this as any).messageAttributeName
const params: GetItemCommandInput = {
TableName: tableName,
Key: dynamoKey
}
const response = await this.dynamodbClient.send(new GetItemCommand(params))
const items = response.Item ? response.Item[messageAttributeName]?.L ?? [] : []
const messages = items
.map((item) => ({
type: item.M?.type.S,
data: {
role: item.M?.role?.S,
content: item.M?.text.S
}
}))
.filter((x): x is StoredMessage => x.type !== undefined && x.data.content !== undefined)
const baseMessages = messages.map(mapStoredMessageToChatMessage)
return convertBaseMessagetoIMessage(baseMessages)
}
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> {
if (!this.dynamodbClient) return
const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey
const tableName = (this as any).tableName
const messageAttributeName = (this as any).messageAttributeName
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
if (input) {
const newInputMessage = new HumanMessage(input.text)
const messageToAdd = [newInputMessage].map((msg) => msg.toDict())
await this.addNewMessage(messageToAdd, this.dynamodbClient, tableName, dynamoKey, messageAttributeName)
}
if (output) {
const newOutputMessage = new AIMessage(output.text)
const messageToAdd = [newOutputMessage].map((msg) => msg.toDict())
await this.addNewMessage(messageToAdd, this.dynamodbClient, tableName, dynamoKey, messageAttributeName)
}
}
async clearChatMessages(overrideSessionId = ''): Promise<void> {
if (!this.dynamodbClient) return
const dynamoKey = overrideSessionId ? this.overrideDynamoKey(overrideSessionId) : (this as any).dynamoKey
const tableName = (this as any).tableName
const params: DeleteItemCommandInput = {
TableName: tableName,
Key: dynamoKey
}
await this.dynamodbClient.send(new DeleteItemCommand(params))
await this.clear()
}
}

View File

@ -1,17 +1,9 @@
import {
getBaseClasses,
getCredentialData,
getCredentialParam,
ICommonObject,
INode,
INodeData,
INodeParams,
serializeChatHistory
} from '../../../src'
import { MongoClient, Collection, Document } from 'mongodb'
import { MongoDBChatMessageHistory } from 'langchain/stores/message/mongodb'
import { BufferMemory, BufferMemoryInput } from 'langchain/memory'
import { BaseMessage, mapStoredMessageToChatMessage } from 'langchain/schema'
import { MongoClient } from 'mongodb'
import { BaseMessage, mapStoredMessageToChatMessage, AIMessage, HumanMessage } from 'langchain/schema'
import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ICommonObject, IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface'
class MongoDB_Memory implements INode {
label: string
@ -57,7 +49,8 @@ class MongoDB_Memory implements INode {
label: 'Session Id',
name: 'sessionId',
type: 'string',
description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId',
description:
'If not specified, a random id will be used. Learn <a target="_blank" href="https://docs.flowiseai.com/memory/long-term-memory#ui-and-embedded-chat">more</a>',
default: '',
additionalParams: true,
optional: true
@ -75,44 +68,33 @@ class MongoDB_Memory implements INode {
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
return initializeMongoDB(nodeData, options)
}
//@ts-ignore
memoryMethods = {
async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise<void> {
const mongodbMemory = await initializeMongoDB(nodeData, options)
const sessionId = nodeData.inputs?.sessionId as string
const chatId = options?.chatId as string
options.logger.info(`Clearing MongoDB memory session ${sessionId ? sessionId : chatId}`)
await mongodbMemory.clear()
options.logger.info(`Successfully cleared MongoDB memory session ${sessionId ? sessionId : chatId}`)
},
async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise<string> {
const memoryKey = nodeData.inputs?.memoryKey as string
const mongodbMemory = await initializeMongoDB(nodeData, options)
const key = memoryKey ?? 'chat_history'
const memoryResult = await mongodbMemory.loadMemoryVariables({})
return serializeChatHistory(memoryResult[key])
}
}
}
const initializeMongoDB = async (nodeData: INodeData, options: ICommonObject): Promise<BufferMemory> => {
const databaseName = nodeData.inputs?.databaseName as string
const collectionName = nodeData.inputs?.collectionName as string
const sessionId = nodeData.inputs?.sessionId as string
const memoryKey = nodeData.inputs?.memoryKey as string
const chatId = options?.chatId as string
let isSessionIdUsingChatMessageId = false
if (!sessionId && chatId) isSessionIdUsingChatMessageId = true
let sessionId = ''
if (!nodeData.inputs?.sessionId && chatId) {
isSessionIdUsingChatMessageId = true
sessionId = chatId
} else {
sessionId = nodeData.inputs?.sessionId
}
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
let mongoDBConnectUrl = getCredentialParam('mongoDBConnectUrl', credentialData, nodeData)
const client = new MongoClient(mongoDBConnectUrl)
await client.connect()
const collection = client.db(databaseName).collection(collectionName)
/**** Methods below are needed to override the original implementations ****/
const mongoDBChatMessageHistory = new MongoDBChatMessageHistory({
collection,
sessionId: sessionId ? sessionId : chatId
@ -140,24 +122,83 @@ const initializeMongoDB = async (nodeData: INodeData, options: ICommonObject): P
mongoDBChatMessageHistory.clear = async (): Promise<void> => {
await collection.deleteOne({ sessionId: (mongoDBChatMessageHistory as any).sessionId })
}
/**** End of override functions ****/
return new BufferMemoryExtended({
memoryKey: memoryKey ?? 'chat_history',
chatHistory: mongoDBChatMessageHistory,
isSessionIdUsingChatMessageId
isSessionIdUsingChatMessageId,
sessionId,
collection
})
}
interface BufferMemoryExtendedInput {
isSessionIdUsingChatMessageId: boolean
collection: Collection<Document>
sessionId: string
}
class BufferMemoryExtended extends BufferMemory {
isSessionIdUsingChatMessageId? = false
isSessionIdUsingChatMessageId = false
sessionId = ''
collection: Collection<Document>
constructor(fields: BufferMemoryInput & Partial<BufferMemoryExtendedInput>) {
constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) {
super(fields)
this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId
this.sessionId = fields.sessionId
this.collection = fields.collection
}
async getChatMessages(overrideSessionId = ''): Promise<IMessage[]> {
if (!this.collection) return []
const id = overrideSessionId ?? this.sessionId
const document = await this.collection.findOne({ sessionId: id })
const messages = document?.messages || []
const baseMessages = messages.map(mapStoredMessageToChatMessage)
return convertBaseMessagetoIMessage(baseMessages)
}
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> {
if (!this.collection) return
const id = overrideSessionId ?? this.sessionId
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
if (input) {
const newInputMessage = new HumanMessage(input.text)
const messageToAdd = [newInputMessage].map((msg) => msg.toDict())
await this.collection.updateOne(
{ sessionId: id },
{
$push: { messages: { $each: messageToAdd } }
},
{ upsert: true }
)
}
if (output) {
const newOutputMessage = new AIMessage(output.text)
const messageToAdd = [newOutputMessage].map((msg) => msg.toDict())
await this.collection.updateOne(
{ sessionId: id },
{
$push: { messages: { $each: messageToAdd } }
},
{ upsert: true }
)
}
}
async clearChatMessages(overrideSessionId = ''): Promise<void> {
if (!this.collection) return
const id = overrideSessionId ?? this.sessionId
await this.collection.deleteOne({ sessionId: id })
await this.clear()
}
}

View File

@ -1,9 +1,9 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface'
import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ICommonObject } from '../../../src'
import { MotorheadMemory, MotorheadMemoryInput } from 'langchain/memory'
import fetch from 'node-fetch'
import { getBufferString } from 'langchain/memory'
import { InputValues, MemoryVariables, OutputValues } from 'langchain/memory'
class MotorMemory_Memory implements INode {
label: string
@ -46,7 +46,8 @@ class MotorMemory_Memory implements INode {
label: 'Session Id',
name: 'sessionId',
type: 'string',
description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId',
description:
'If not specified, a random id will be used. Learn <a target="_blank" href="https://docs.flowiseai.com/memory/long-term-memory#ui-and-embedded-chat">more</a>',
default: '',
additionalParams: true,
optional: true
@ -64,35 +65,22 @@ class MotorMemory_Memory implements INode {
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
return initalizeMotorhead(nodeData, options)
}
//@ts-ignore
memoryMethods = {
async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise<void> {
const motorhead = await initalizeMotorhead(nodeData, options)
const sessionId = nodeData.inputs?.sessionId as string
const chatId = options?.chatId as string
options.logger.info(`Clearing Motorhead memory session ${sessionId ? sessionId : chatId}`)
await motorhead.clear()
options.logger.info(`Successfully cleared Motorhead memory session ${sessionId ? sessionId : chatId}`)
},
async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise<string> {
const memoryKey = nodeData.inputs?.memoryKey as string
const motorhead = await initalizeMotorhead(nodeData, options)
const key = memoryKey ?? 'chat_history'
const memoryResult = await motorhead.loadMemoryVariables({})
return getBufferString(memoryResult[key])
}
}
}
const initalizeMotorhead = async (nodeData: INodeData, options: ICommonObject): Promise<MotorheadMemory> => {
const memoryKey = nodeData.inputs?.memoryKey as string
const baseURL = nodeData.inputs?.baseURL as string
const sessionId = nodeData.inputs?.sessionId as string
const chatId = options?.chatId as string
let isSessionIdUsingChatMessageId = false
if (!sessionId && chatId) isSessionIdUsingChatMessageId = true
let sessionId = ''
if (!nodeData.inputs?.sessionId && chatId) {
isSessionIdUsingChatMessageId = true
sessionId = chatId
} else {
sessionId = nodeData.inputs?.sessionId
}
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const apiKey = getCredentialParam('apiKey', credentialData, nodeData)
@ -100,8 +88,9 @@ const initalizeMotorhead = async (nodeData: INodeData, options: ICommonObject):
let obj: MotorheadMemoryInput & Partial<MotorheadMemoryExtendedInput> = {
returnMessages: true,
sessionId: sessionId ? sessionId : chatId,
memoryKey
sessionId,
memoryKey,
isSessionIdUsingChatMessageId
}
if (baseURL) {
@ -117,8 +106,6 @@ const initalizeMotorhead = async (nodeData: INodeData, options: ICommonObject):
}
}
if (isSessionIdUsingChatMessageId) obj.isSessionIdUsingChatMessageId = true
const motorheadMemory = new MotorheadMemoryExtended(obj)
// Get messages from sessionId
@ -139,7 +126,24 @@ class MotorheadMemoryExtended extends MotorheadMemory {
this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId
}
async clear(): Promise<void> {
async loadMemoryVariables(values: InputValues, overrideSessionId = ''): Promise<MemoryVariables> {
if (overrideSessionId) {
super.sessionId = overrideSessionId
}
return super.loadMemoryVariables({ values })
}
async saveContext(inputValues: InputValues, outputValues: OutputValues, overrideSessionId = ''): Promise<void> {
if (overrideSessionId) {
super.sessionId = overrideSessionId
}
return super.saveContext(inputValues, outputValues)
}
async clear(overrideSessionId = ''): Promise<void> {
if (overrideSessionId) {
super.sessionId = overrideSessionId
}
try {
await this.caller.call(fetch, `${this.url}/sessions/${this.sessionId}/memory`, {
//@ts-ignore
@ -155,6 +159,28 @@ class MotorheadMemoryExtended extends MotorheadMemory {
await this.chatHistory.clear()
await super.clear()
}
async getChatMessages(overrideSessionId = ''): Promise<IMessage[]> {
const id = overrideSessionId ?? this.sessionId
const memoryVariables = await this.loadMemoryVariables({}, id)
const baseMessages = memoryVariables[this.memoryKey]
return convertBaseMessagetoIMessage(baseMessages)
}
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> {
const id = overrideSessionId ?? this.sessionId
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
const inputValues = { [this.inputKey ?? 'input']: input?.text }
const outputValues = { output: output?.text }
await this.saveContext(inputValues, outputValues, id)
}
async clearChatMessages(overrideSessionId = ''): Promise<void> {
const id = overrideSessionId ?? this.sessionId
await this.clear(id)
}
}
module.exports = { nodeClass: MotorMemory_Memory }

View File

@ -1,9 +1,9 @@
import { INode, INodeData, INodeParams, ICommonObject } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam, serializeChatHistory } from '../../../src/utils'
import { Redis } from 'ioredis'
import { BufferMemory, BufferMemoryInput } from 'langchain/memory'
import { RedisChatMessageHistory, RedisChatMessageHistoryInput } from 'langchain/stores/message/ioredis'
import { mapStoredMessageToChatMessage, BaseMessage } from 'langchain/schema'
import { Redis } from 'ioredis'
import { mapStoredMessageToChatMessage, BaseMessage, AIMessage, HumanMessage } from 'langchain/schema'
import { INode, INodeData, INodeParams, ICommonObject, MessageType, IMessage } from '../../../src/Interface'
import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
class RedisBackedChatMemory_Memory implements INode {
label: string
@ -38,7 +38,8 @@ class RedisBackedChatMemory_Memory implements INode {
label: 'Session Id',
name: 'sessionId',
type: 'string',
description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId',
description:
'If not specified, a random id will be used. Learn <a target="_blank" href="https://docs.flowiseai.com/memory/long-term-memory#ui-and-embedded-chat">more</a>',
default: '',
additionalParams: true,
optional: true
@ -64,40 +65,28 @@ class RedisBackedChatMemory_Memory implements INode {
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
return await initalizeRedis(nodeData, options)
}
//@ts-ignore
memoryMethods = {
async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise<void> {
const redis = await initalizeRedis(nodeData, options)
const sessionId = nodeData.inputs?.sessionId as string
const chatId = options?.chatId as string
options.logger.info(`Clearing Redis memory session ${sessionId ? sessionId : chatId}`)
await redis.clear()
options.logger.info(`Successfully cleared Redis memory session ${sessionId ? sessionId : chatId}`)
},
async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise<string> {
const memoryKey = nodeData.inputs?.memoryKey as string
const redis = await initalizeRedis(nodeData, options)
const key = memoryKey ?? 'chat_history'
const memoryResult = await redis.loadMemoryVariables({})
return serializeChatHistory(memoryResult[key])
}
}
}
const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Promise<BufferMemory> => {
const sessionId = nodeData.inputs?.sessionId as string
const sessionTTL = nodeData.inputs?.sessionTTL as number
const memoryKey = nodeData.inputs?.memoryKey as string
const chatId = options?.chatId as string
let isSessionIdUsingChatMessageId = false
if (!sessionId && chatId) isSessionIdUsingChatMessageId = true
let sessionId = ''
if (!nodeData.inputs?.sessionId && chatId) {
isSessionIdUsingChatMessageId = true
sessionId = chatId
} else {
sessionId = nodeData.inputs?.sessionId
}
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const redisUrl = getCredentialParam('redisUrl', credentialData, nodeData)
let client: Redis
if (!redisUrl || redisUrl === '') {
const username = getCredentialParam('redisCacheUser', credentialData, nodeData)
const password = getCredentialParam('redisCachePwd', credentialData, nodeData)
@ -115,7 +104,7 @@ const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Prom
}
let obj: RedisChatMessageHistoryInput = {
sessionId: sessionId ? sessionId : chatId,
sessionId,
client
}
@ -128,6 +117,7 @@ const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Prom
const redisChatMessageHistory = new RedisChatMessageHistory(obj)
/**** Methods below are needed to override the original implementations ****/
redisChatMessageHistory.getMessages = async (): Promise<BaseMessage[]> => {
const rawStoredMessages = await client.lrange((redisChatMessageHistory as any).sessionId, 0, -1)
const orderedMessages = rawStoredMessages.reverse().map((message) => JSON.parse(message))
@ -145,25 +135,73 @@ const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Prom
redisChatMessageHistory.clear = async (): Promise<void> => {
await client.del((redisChatMessageHistory as any).sessionId)
}
/**** End of override functions ****/
const memory = new BufferMemoryExtended({
memoryKey: memoryKey ?? 'chat_history',
chatHistory: redisChatMessageHistory,
isSessionIdUsingChatMessageId
isSessionIdUsingChatMessageId,
sessionId,
redisClient: client
})
return memory
}
interface BufferMemoryExtendedInput {
isSessionIdUsingChatMessageId: boolean
redisClient: Redis
sessionId: string
}
class BufferMemoryExtended extends BufferMemory {
isSessionIdUsingChatMessageId? = false
isSessionIdUsingChatMessageId = false
sessionId = ''
redisClient: Redis
constructor(fields: BufferMemoryInput & Partial<BufferMemoryExtendedInput>) {
constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) {
super(fields)
this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId
this.sessionId = fields.sessionId
this.redisClient = fields.redisClient
}
async getChatMessages(overrideSessionId = ''): Promise<IMessage[]> {
if (!this.redisClient) return []
const id = overrideSessionId ?? this.sessionId
const rawStoredMessages = await this.redisClient.lrange(id, 0, -1)
const orderedMessages = rawStoredMessages.reverse().map((message) => JSON.parse(message))
const baseMessages = orderedMessages.map(mapStoredMessageToChatMessage)
return convertBaseMessagetoIMessage(baseMessages)
}
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> {
if (!this.redisClient) return
const id = overrideSessionId ?? this.sessionId
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
if (input) {
const newInputMessage = new HumanMessage(input.text)
const messageToAdd = [newInputMessage].map((msg) => msg.toDict())
await this.redisClient.lpush(id, JSON.stringify(messageToAdd[0]))
}
if (output) {
const newOutputMessage = new AIMessage(output.text)
const messageToAdd = [newOutputMessage].map((msg) => msg.toDict())
await this.redisClient.lpush(id, JSON.stringify(messageToAdd[0]))
}
}
async clearChatMessages(overrideSessionId = ''): Promise<void> {
if (!this.redisClient) return
const id = overrideSessionId ?? this.sessionId
await this.redisClient.del(id)
await this.clear()
}
}

View File

@ -1,8 +1,10 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam, serializeChatHistory } from '../../../src/utils'
import { ICommonObject } from '../../../src'
import { Redis } from '@upstash/redis'
import { BufferMemory, BufferMemoryInput } from 'langchain/memory'
import { UpstashRedisChatMessageHistory } from 'langchain/stores/message/upstash_redis'
import { mapStoredMessageToChatMessage, AIMessage, HumanMessage, StoredMessage } from 'langchain/schema'
import { IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface'
import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ICommonObject } from '../../../src/Interface'
class UpstashRedisBackedChatMemory_Memory implements INode {
label: string
@ -43,7 +45,8 @@ class UpstashRedisBackedChatMemory_Memory implements INode {
label: 'Session Id',
name: 'sessionId',
type: 'string',
description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId',
description:
'If not specified, a random id will be used. Learn <a target="_blank" href="https://docs.flowiseai.com/memory/long-term-memory#ui-and-embedded-chat">more</a>',
default: '',
additionalParams: true,
optional: true
@ -62,51 +65,43 @@ class UpstashRedisBackedChatMemory_Memory implements INode {
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
return initalizeUpstashRedis(nodeData, options)
}
//@ts-ignore
memoryMethods = {
async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise<void> {
const redis = await initalizeUpstashRedis(nodeData, options)
const sessionId = nodeData.inputs?.sessionId as string
const chatId = options?.chatId as string
options.logger.info(`Clearing Upstash Redis memory session ${sessionId ? sessionId : chatId}`)
await redis.clear()
options.logger.info(`Successfully cleared Upstash Redis memory session ${sessionId ? sessionId : chatId}`)
},
async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise<string> {
const redis = await initalizeUpstashRedis(nodeData, options)
const key = 'chat_history'
const memoryResult = await redis.loadMemoryVariables({})
return serializeChatHistory(memoryResult[key])
}
}
}
const initalizeUpstashRedis = async (nodeData: INodeData, options: ICommonObject): Promise<BufferMemory> => {
const baseURL = nodeData.inputs?.baseURL as string
const sessionId = nodeData.inputs?.sessionId as string
const sessionTTL = nodeData.inputs?.sessionTTL as string
const chatId = options?.chatId as string
let isSessionIdUsingChatMessageId = false
if (!sessionId && chatId) isSessionIdUsingChatMessageId = true
let sessionId = ''
if (!nodeData.inputs?.sessionId && chatId) {
isSessionIdUsingChatMessageId = true
sessionId = chatId
} else {
sessionId = nodeData.inputs?.sessionId
}
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const upstashRestToken = getCredentialParam('upstashRestToken', credentialData, nodeData)
const client = new Redis({
url: baseURL,
token: upstashRestToken
})
const redisChatMessageHistory = new UpstashRedisChatMessageHistory({
sessionId: sessionId ? sessionId : chatId,
sessionTTL: sessionTTL ? parseInt(sessionTTL, 10) : undefined,
config: {
url: baseURL,
token: upstashRestToken
}
client
})
const memory = new BufferMemoryExtended({
memoryKey: 'chat_history',
chatHistory: redisChatMessageHistory,
isSessionIdUsingChatMessageId
isSessionIdUsingChatMessageId,
sessionId,
redisClient: client
})
return memory
@ -114,14 +109,59 @@ const initalizeUpstashRedis = async (nodeData: INodeData, options: ICommonObject
interface BufferMemoryExtendedInput {
isSessionIdUsingChatMessageId: boolean
redisClient: Redis
sessionId: string
}
class BufferMemoryExtended extends BufferMemory {
isSessionIdUsingChatMessageId? = false
isSessionIdUsingChatMessageId = false
sessionId = ''
redisClient: Redis
constructor(fields: BufferMemoryInput & Partial<BufferMemoryExtendedInput>) {
constructor(fields: BufferMemoryInput & BufferMemoryExtendedInput) {
super(fields)
this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId
this.sessionId = fields.sessionId
this.redisClient = fields.redisClient
}
async getChatMessages(overrideSessionId = ''): Promise<IMessage[]> {
if (!this.redisClient) return []
const id = overrideSessionId ?? this.sessionId
const rawStoredMessages: StoredMessage[] = await this.redisClient.lrange<StoredMessage>(id, 0, -1)
const orderedMessages = rawStoredMessages.reverse()
const previousMessages = orderedMessages.filter((x): x is StoredMessage => x.type !== undefined && x.data.content !== undefined)
const baseMessages = previousMessages.map(mapStoredMessageToChatMessage)
return convertBaseMessagetoIMessage(baseMessages)
}
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> {
if (!this.redisClient) return
const id = overrideSessionId ?? this.sessionId
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
if (input) {
const newInputMessage = new HumanMessage(input.text)
const messageToAdd = [newInputMessage].map((msg) => msg.toDict())
await this.redisClient.lpush(id, JSON.stringify(messageToAdd[0]))
}
if (output) {
const newOutputMessage = new AIMessage(output.text)
const messageToAdd = [newOutputMessage].map((msg) => msg.toDict())
await this.redisClient.lpush(id, JSON.stringify(messageToAdd[0]))
}
}
async clearChatMessages(overrideSessionId = ''): Promise<void> {
if (!this.redisClient) return
const id = overrideSessionId ?? this.sessionId
await this.redisClient.del(id)
await this.clear()
}
}

View File

@ -1,9 +1,8 @@
import { SystemMessage } from 'langchain/schema'
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { IMessage, INode, INodeData, INodeParams, MessageType } from '../../../src/Interface'
import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ZepMemory, ZepMemoryInput } from 'langchain/memory/zep'
import { ICommonObject } from '../../../src'
import { getBufferString } from 'langchain/memory'
import { InputValues, MemoryVariables, OutputValues } from 'langchain/memory'
class ZepMemory_Memory implements INode {
label: string
@ -20,7 +19,7 @@ class ZepMemory_Memory implements INode {
constructor() {
this.label = 'Zep Memory'
this.name = 'ZepMemory'
this.version = 1.0
this.version = 2.0
this.type = 'ZepMemory'
this.icon = 'zep.png'
this.category = 'Memory'
@ -41,17 +40,12 @@ class ZepMemory_Memory implements INode {
type: 'string',
default: 'http://127.0.0.1:8000'
},
{
label: 'Auto Summary',
name: 'autoSummary',
type: 'boolean',
default: true
},
{
label: 'Session Id',
name: 'sessionId',
type: 'string',
description: 'If not specified, the first CHAT_MESSAGE_ID will be used as sessionId',
description:
'If not specified, a random id will be used. Learn <a target="_blank" href="https://docs.flowiseai.com/memory/long-term-memory#ui-and-embedded-chat">more</a>',
default: '',
additionalParams: true,
optional: true
@ -61,13 +55,7 @@ class ZepMemory_Memory implements INode {
name: 'k',
type: 'number',
default: '10',
description: 'Window of size k to surface the last k back-and-forth to use as memory.'
},
{
label: 'Auto Summary Template',
name: 'autoSummaryTemplate',
type: 'string',
default: 'This is the summary of the following conversation:\n{summary}',
description: 'Window of size k to surface the last k back-and-forth to use as memory.',
additionalParams: true
},
{
@ -109,57 +97,7 @@ class ZepMemory_Memory implements INode {
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const autoSummaryTemplate = nodeData.inputs?.autoSummaryTemplate as string
const autoSummary = nodeData.inputs?.autoSummary as boolean
const k = nodeData.inputs?.k as string
let zep = await initalizeZep(nodeData, options)
// hack to support summary
let tmpFunc = zep.loadMemoryVariables
zep.loadMemoryVariables = async (values) => {
let data = await tmpFunc.bind(zep, values)()
if (autoSummary && zep.returnMessages && data[zep.memoryKey] && data[zep.memoryKey].length) {
const zepClient = await zep.zepClientPromise
const memory = await zepClient.memory.getMemory(zep.sessionId, parseInt(k, 10) ?? 10)
if (memory?.summary) {
let summary = autoSummaryTemplate.replace(/{summary}/g, memory.summary.content)
// eslint-disable-next-line no-console
console.log('[ZepMemory] auto summary:', summary)
data[zep.memoryKey].unshift(new SystemMessage(summary))
}
}
// for langchain zep memory compatibility, or we will get "Missing value for input variable chat_history"
if (data instanceof Array) {
data = {
[zep.memoryKey]: data
}
}
return data
}
return zep
}
//@ts-ignore
memoryMethods = {
async clearSessionMemory(nodeData: INodeData, options: ICommonObject): Promise<void> {
const zep = await initalizeZep(nodeData, options)
const sessionId = nodeData.inputs?.sessionId as string
const chatId = options?.chatId as string
options.logger.info(`Clearing Zep memory session ${sessionId ? sessionId : chatId}`)
await zep.clear()
options.logger.info(`Successfully cleared Zep memory session ${sessionId ? sessionId : chatId}`)
},
async getChatMessages(nodeData: INodeData, options: ICommonObject): Promise<string> {
const memoryKey = nodeData.inputs?.memoryKey as string
const aiPrefix = nodeData.inputs?.aiPrefix as string
const humanPrefix = nodeData.inputs?.humanPrefix as string
const zep = await initalizeZep(nodeData, options)
const key = memoryKey ?? 'chat_history'
const memoryResult = await zep.loadMemoryVariables({})
return getBufferString(memoryResult[key], humanPrefix, aiPrefix)
}
return await initalizeZep(nodeData, options)
}
}
@ -169,40 +107,94 @@ const initalizeZep = async (nodeData: INodeData, options: ICommonObject): Promis
const humanPrefix = nodeData.inputs?.humanPrefix as string
const memoryKey = nodeData.inputs?.memoryKey as string
const inputKey = nodeData.inputs?.inputKey as string
const sessionId = nodeData.inputs?.sessionId as string
const k = nodeData.inputs?.k as string
const chatId = options?.chatId as string
let isSessionIdUsingChatMessageId = false
if (!sessionId && chatId) isSessionIdUsingChatMessageId = true
let sessionId = ''
if (!nodeData.inputs?.sessionId && chatId) {
isSessionIdUsingChatMessageId = true
sessionId = chatId
} else {
sessionId = nodeData.inputs?.sessionId
}
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const apiKey = getCredentialParam('apiKey', credentialData, nodeData)
const obj: ZepMemoryInput & Partial<ZepMemoryExtendedInput> = {
const obj: ZepMemoryInput & ZepMemoryExtendedInput = {
baseURL,
sessionId: sessionId ? sessionId : chatId,
aiPrefix,
humanPrefix,
returnMessages: true,
memoryKey,
inputKey
inputKey,
sessionId,
isSessionIdUsingChatMessageId,
k: k ? parseInt(k, 10) : undefined
}
if (apiKey) obj.apiKey = apiKey
if (isSessionIdUsingChatMessageId) obj.isSessionIdUsingChatMessageId = true
return new ZepMemoryExtended(obj)
}
interface ZepMemoryExtendedInput {
isSessionIdUsingChatMessageId: boolean
k?: number
}
class ZepMemoryExtended extends ZepMemory {
isSessionIdUsingChatMessageId? = false
isSessionIdUsingChatMessageId = false
lastN?: number
constructor(fields: ZepMemoryInput & Partial<ZepMemoryExtendedInput>) {
constructor(fields: ZepMemoryInput & ZepMemoryExtendedInput) {
super(fields)
this.isSessionIdUsingChatMessageId = fields.isSessionIdUsingChatMessageId
this.lastN = fields.k
}
async loadMemoryVariables(values: InputValues, overrideSessionId = ''): Promise<MemoryVariables> {
if (overrideSessionId) {
super.sessionId = overrideSessionId
}
return super.loadMemoryVariables({ ...values, lastN: this.lastN })
}
async saveContext(inputValues: InputValues, outputValues: OutputValues, overrideSessionId = ''): Promise<void> {
if (overrideSessionId) {
super.sessionId = overrideSessionId
}
return super.saveContext(inputValues, outputValues)
}
async clear(overrideSessionId = ''): Promise<void> {
if (overrideSessionId) {
super.sessionId = overrideSessionId
}
return super.clear()
}
async getChatMessages(overrideSessionId = ''): Promise<IMessage[]> {
const id = overrideSessionId ?? this.sessionId
const memoryVariables = await this.loadMemoryVariables({}, id)
const baseMessages = memoryVariables[this.memoryKey]
return convertBaseMessagetoIMessage(baseMessages)
}
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId = ''): Promise<void> {
const id = overrideSessionId ?? this.sessionId
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
const inputValues = { [this.inputKey ?? 'input']: input?.text }
const outputValues = { output: output?.text }
await this.saveContext(inputValues, outputValues, id)
}
async clearChatMessages(overrideSessionId = ''): Promise<void> {
const id = overrideSessionId ?? this.sessionId
await this.clear(id)
}
}

View File

@ -0,0 +1,75 @@
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { ResponseSynthesizerClass } from '../base'
class CompactRefine_LlamaIndex implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
tags: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Compact and Refine'
this.name = 'compactrefineLlamaIndex'
this.version = 1.0
this.type = 'CompactRefine'
this.icon = 'compactrefine.svg'
this.category = 'Response Synthesizer'
this.description =
'CompactRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.'
this.baseClasses = [this.type, 'ResponseSynthesizer']
this.tags = ['LlamaIndex']
this.inputs = [
{
label: 'Refine Prompt',
name: 'refinePrompt',
type: 'string',
rows: 4,
default: `The original query is as follows: {query}
We have provided an existing answer: {existingAnswer}
We have the opportunity to refine the existing answer (only if needed) with some more context below.
------------
{context}
------------
Given the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.
Refined Answer:`,
warning: `Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}`,
optional: true
},
{
label: 'Text QA Prompt',
name: 'textQAPrompt',
type: 'string',
rows: 4,
default: `Context information is below.
---------------------
{context}
---------------------
Given the context information and not prior knowledge, answer the query.
Query: {query}
Answer:`,
warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`,
optional: true
}
]
}
async init(nodeData: INodeData): Promise<any> {
const refinePrompt = nodeData.inputs?.refinePrompt as string
const textQAPrompt = nodeData.inputs?.textQAPrompt as string
const refinePromptTemplate = ({ context = '', existingAnswer = '', query = '' }) =>
refinePrompt.replace('{existingAnswer}', existingAnswer).replace('{context}', context).replace('{query}', query)
const textQAPromptTemplate = ({ context = '', query = '' }) => textQAPrompt.replace('{context}', context).replace('{query}', query)
return new ResponseSynthesizerClass({ textQAPromptTemplate, refinePromptTemplate, type: 'CompactAndRefine' })
}
}
module.exports = { nodeClass: CompactRefine_LlamaIndex }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-layers-difference" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M16 16v2a2 2 0 0 1 -2 2h-8a2 2 0 0 1 -2 -2v-8a2 2 0 0 1 2 -2h2v-2a2 2 0 0 1 2 -2h8a2 2 0 0 1 2 2v8a2 2 0 0 1 -2 2h-2" /><path d="M10 8l-2 0l0 2" /><path d="M8 14l0 2l2 0" /><path d="M14 8l2 0l0 2" /><path d="M16 14l0 2l-2 0" /></svg>

After

Width:  |  Height:  |  Size: 529 B

View File

@ -0,0 +1,75 @@
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { ResponseSynthesizerClass } from '../base'
class Refine_LlamaIndex implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
tags: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Refine'
this.name = 'refineLlamaIndex'
this.version = 1.0
this.type = 'Refine'
this.icon = 'refine.svg'
this.category = 'Response Synthesizer'
this.description =
'Create and refine an answer by sequentially going through each retrieved text chunk. This makes a separate LLM call per Node. Good for more detailed answers.'
this.baseClasses = [this.type, 'ResponseSynthesizer']
this.tags = ['LlamaIndex']
this.inputs = [
{
label: 'Refine Prompt',
name: 'refinePrompt',
type: 'string',
rows: 4,
default: `The original query is as follows: {query}
We have provided an existing answer: {existingAnswer}
We have the opportunity to refine the existing answer (only if needed) with some more context below.
------------
{context}
------------
Given the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.
Refined Answer:`,
warning: `Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}`,
optional: true
},
{
label: 'Text QA Prompt',
name: 'textQAPrompt',
type: 'string',
rows: 4,
default: `Context information is below.
---------------------
{context}
---------------------
Given the context information and not prior knowledge, answer the query.
Query: {query}
Answer:`,
warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`,
optional: true
}
]
}
async init(nodeData: INodeData): Promise<any> {
const refinePrompt = nodeData.inputs?.refinePrompt as string
const textQAPrompt = nodeData.inputs?.textQAPrompt as string
const refinePromptTemplate = ({ context = '', existingAnswer = '', query = '' }) =>
refinePrompt.replace('{existingAnswer}', existingAnswer).replace('{context}', context).replace('{query}', query)
const textQAPromptTemplate = ({ context = '', query = '' }) => textQAPrompt.replace('{context}', context).replace('{query}', query)
return new ResponseSynthesizerClass({ textQAPromptTemplate, refinePromptTemplate, type: 'Refine' })
}
}
module.exports = { nodeClass: Refine_LlamaIndex }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-filter-search" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M11.36 20.213l-2.36 .787v-8.5l-4.48 -4.928a2 2 0 0 1 -.52 -1.345v-2.227h16v2.172a2 2 0 0 1 -.586 1.414l-4.414 4.414" /><path d="M18 18m-3 0a3 3 0 1 0 6 0a3 3 0 1 0 -6 0" /><path d="M20.2 20.2l1.8 1.8" /></svg>

After

Width:  |  Height:  |  Size: 501 B

View File

@ -0,0 +1,35 @@
import { INode, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { ResponseSynthesizerClass } from '../base'
class SimpleResponseBuilder_LlamaIndex implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
tags: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Simple Response Builder'
this.name = 'simpleResponseBuilderLlamaIndex'
this.version = 1.0
this.type = 'SimpleResponseBuilder'
this.icon = 'simplerb.svg'
this.category = 'Response Synthesizer'
this.description = `Apply a query to a collection of text chunks, gathering the responses in an array, and return a combined string of all responses. Useful for individual queries on each text chunk.`
this.baseClasses = [this.type, 'ResponseSynthesizer']
this.tags = ['LlamaIndex']
this.inputs = []
}
async init(): Promise<any> {
return new ResponseSynthesizerClass({ type: 'SimpleResponseBuilder' })
}
}
module.exports = { nodeClass: SimpleResponseBuilder_LlamaIndex }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-quote" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M10 11h-4a1 1 0 0 1 -1 -1v-3a1 1 0 0 1 1 -1h3a1 1 0 0 1 1 1v6c0 2.667 -1.333 4.333 -4 5" /><path d="M19 11h-4a1 1 0 0 1 -1 -1v-3a1 1 0 0 1 1 -1h3a1 1 0 0 1 1 1v6c0 2.667 -1.333 4.333 -4 5" /></svg>

After

Width:  |  Height:  |  Size: 481 B

View File

@ -0,0 +1,56 @@
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { ResponseSynthesizerClass } from '../base'
class TreeSummarize_LlamaIndex implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
tags: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'TreeSummarize'
this.name = 'treeSummarizeLlamaIndex'
this.version = 1.0
this.type = 'TreeSummarize'
this.icon = 'treesummarize.svg'
this.category = 'Response Synthesizer'
this.description =
'Given a set of text chunks and the query, recursively construct a tree and return the root node as the response. Good for summarization purposes.'
this.baseClasses = [this.type, 'ResponseSynthesizer']
this.tags = ['LlamaIndex']
this.inputs = [
{
label: 'Prompt',
name: 'prompt',
type: 'string',
rows: 4,
default: `Context information from multiple sources is below.
---------------------
{context}
---------------------
Given the information from multiple sources and not prior knowledge, answer the query.
Query: {query}
Answer:`,
warning: `Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}`,
optional: true
}
]
}
async init(nodeData: INodeData): Promise<any> {
const prompt = nodeData.inputs?.prompt as string
const textQAPromptTemplate = ({ context = '', query = '' }) => prompt.replace('{context}', context).replace('{query}', query)
return new ResponseSynthesizerClass({ textQAPromptTemplate, type: 'TreeSummarize' })
}
}
module.exports = { nodeClass: TreeSummarize_LlamaIndex }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-tree" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M12 13l-2 -2" /><path d="M12 12l2 -2" /><path d="M12 21v-13" /><path d="M9.824 16a3 3 0 0 1 -2.743 -3.69a3 3 0 0 1 .304 -4.833a3 3 0 0 1 4.615 -3.707a3 3 0 0 1 4.614 3.707a3 3 0 0 1 .305 4.833a3 3 0 0 1 -2.919 3.695h-4z" /></svg>

After

Width:  |  Height:  |  Size: 512 B

View File

@ -0,0 +1,11 @@
export class ResponseSynthesizerClass {
type: string
textQAPromptTemplate?: any
refinePromptTemplate?: any
constructor(params: { type: string; textQAPromptTemplate?: any; refinePromptTemplate?: any }) {
this.type = params.type
this.textQAPromptTemplate = params.textQAPromptTemplate
this.refinePromptTemplate = params.refinePromptTemplate
}
}

View File

@ -0,0 +1,366 @@
import {
BaseNode,
Document,
Metadata,
VectorStore,
VectorStoreQuery,
VectorStoreQueryResult,
serviceContextFromDefaults,
storageContextFromDefaults,
VectorStoreIndex,
BaseEmbedding
} from 'llamaindex'
import { FetchResponse, Index, Pinecone, ScoredPineconeRecord } from '@pinecone-database/pinecone'
import { flatten } from 'lodash'
import { Document as LCDocument } from 'langchain/document'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { flattenObject, getCredentialData, getCredentialParam } from '../../../src/utils'
class PineconeLlamaIndex_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
tags: string[]
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
constructor() {
this.label = 'Pinecone'
this.name = 'pineconeLlamaIndex'
this.version = 1.0
this.type = 'Pinecone'
this.icon = 'pinecone.png'
this.category = 'Vector Stores'
this.description = `Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database`
this.baseClasses = [this.type, 'VectorIndexRetriever']
this.tags = ['LlamaIndex']
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['pineconeApi']
}
this.inputs = [
{
label: 'Document',
name: 'document',
type: 'Document',
list: true,
optional: true
},
{
label: 'Chat Model',
name: 'model',
type: 'BaseChatModel_LlamaIndex'
},
{
label: 'Embeddings',
name: 'embeddings',
type: 'BaseEmbedding_LlamaIndex'
},
{
label: 'Pinecone Index',
name: 'pineconeIndex',
type: 'string'
},
{
label: 'Pinecone Namespace',
name: 'pineconeNamespace',
type: 'string',
placeholder: 'my-first-namespace',
additionalParams: true,
optional: true
},
{
label: 'Pinecone Metadata Filter',
name: 'pineconeMetadataFilter',
type: 'json',
optional: true,
additionalParams: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
}
//@ts-ignore
vectorStoreMethods = {
async upsert(nodeData: INodeData, options: ICommonObject): Promise<void> {
const indexName = nodeData.inputs?.pineconeIndex as string
const pineconeNamespace = nodeData.inputs?.pineconeNamespace as string
const docs = nodeData.inputs?.document as LCDocument[]
const embeddings = nodeData.inputs?.embeddings as BaseEmbedding
const model = nodeData.inputs?.model
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData)
const pineconeEnv = getCredentialParam('pineconeEnv', credentialData, nodeData)
const pcvs = new PineconeVectorStore({
indexName,
apiKey: pineconeApiKey,
environment: pineconeEnv,
namespace: pineconeNamespace
})
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
finalDocs.push(new LCDocument(flattenDocs[i]))
}
}
const llamadocs: Document[] = []
for (const doc of finalDocs) {
llamadocs.push(new Document({ text: doc.pageContent, metadata: doc.metadata }))
}
const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings })
const storageContext = await storageContextFromDefaults({ vectorStore: pcvs })
try {
await VectorStoreIndex.fromDocuments(llamadocs, { serviceContext, storageContext })
} catch (e) {
throw new Error(e)
}
}
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const indexName = nodeData.inputs?.pineconeIndex as string
const pineconeNamespace = nodeData.inputs?.pineconeNamespace as string
const pineconeMetadataFilter = nodeData.inputs?.pineconeMetadataFilter
const embeddings = nodeData.inputs?.embeddings as BaseEmbedding
const model = nodeData.inputs?.model
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData)
const pineconeEnv = getCredentialParam('pineconeEnv', credentialData, nodeData)
const obj: PineconeParams = {
indexName,
apiKey: pineconeApiKey,
environment: pineconeEnv
}
if (pineconeNamespace) obj.namespace = pineconeNamespace
if (pineconeMetadataFilter) {
const metadatafilter = typeof pineconeMetadataFilter === 'object' ? pineconeMetadataFilter : JSON.parse(pineconeMetadataFilter)
obj.queryFilter = metadatafilter
}
const pcvs = new PineconeVectorStore(obj)
const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings })
const storageContext = await storageContextFromDefaults({ vectorStore: pcvs })
const index = await VectorStoreIndex.init({
nodes: [],
storageContext,
serviceContext
})
const retriever = index.asRetriever()
retriever.similarityTopK = k
;(retriever as any).serviceContext = serviceContext
return retriever
}
}
type PineconeParams = {
indexName: string
apiKey: string
environment: string
namespace?: string
chunkSize?: number
queryFilter?: object
}
class PineconeVectorStore implements VectorStore {
storesText: boolean = true
db?: Pinecone
indexName: string
apiKey: string
environment: string
chunkSize: number
namespace?: string
queryFilter?: object
constructor(params: PineconeParams) {
this.indexName = params?.indexName
this.apiKey = params?.apiKey
this.environment = params?.environment
this.namespace = params?.namespace ?? ''
this.chunkSize = params?.chunkSize ?? Number.parseInt(process.env.PINECONE_CHUNK_SIZE ?? '100')
this.queryFilter = params?.queryFilter ?? {}
}
private async getDb(): Promise<Pinecone> {
if (!this.db) {
this.db = new Pinecone({
apiKey: this.apiKey,
environment: this.environment
})
}
return Promise.resolve(this.db)
}
client() {
return this.getDb()
}
async index() {
const db: Pinecone = await this.getDb()
return db.Index(this.indexName)
}
async clearIndex() {
const db: Pinecone = await this.getDb()
return await db.index(this.indexName).deleteAll()
}
async add(embeddingResults: BaseNode<Metadata>[]): Promise<string[]> {
if (embeddingResults.length == 0) {
return Promise.resolve([])
}
const idx: Index = await this.index()
const nodes = embeddingResults.map(this.nodeToRecord)
for (let i = 0; i < nodes.length; i += this.chunkSize) {
const chunk = nodes.slice(i, i + this.chunkSize)
const result = await this.saveChunk(idx, chunk)
if (!result) {
return Promise.reject()
}
}
return Promise.resolve([])
}
protected async saveChunk(idx: Index, chunk: any) {
try {
const namespace = idx.namespace(this.namespace ?? '')
await namespace.upsert(chunk)
return true
} catch (err) {
return false
}
}
async delete(refDocId: string): Promise<void> {
const idx = await this.index()
const namespace = idx.namespace(this.namespace ?? '')
return namespace.deleteOne(refDocId)
}
async query(query: VectorStoreQuery): Promise<VectorStoreQueryResult> {
const queryOptions: any = {
vector: query.queryEmbedding,
topK: query.similarityTopK,
filter: this.queryFilter
}
const idx = await this.index()
const namespace = idx.namespace(this.namespace ?? '')
const results = await namespace.query(queryOptions)
const idList = results.matches.map((row) => row.id)
const records: FetchResponse<any> = await namespace.fetch(idList)
const rows = Object.values(records.records)
const nodes = rows.map((row) => {
return new Document({
id_: row.id,
text: this.textFromResultRow(row),
metadata: this.metaWithoutText(row.metadata),
embedding: row.values
})
})
const result = {
nodes: nodes,
similarities: results.matches.map((row) => row.score || 999),
ids: results.matches.map((row) => row.id)
}
return Promise.resolve(result)
}
/**
* Required by VectorStore interface. Currently ignored.
*/
persist(): Promise<void> {
return Promise.resolve()
}
textFromResultRow(row: ScoredPineconeRecord<Metadata>): string {
return row.metadata?.text ?? ''
}
metaWithoutText(meta: Metadata): any {
return Object.keys(meta)
.filter((key) => key != 'text')
.reduce((acc: any, key: string) => {
acc[key] = meta[key]
return acc
}, {})
}
nodeToRecord(node: BaseNode<Metadata>) {
let id: any = node.id_.length ? node.id_ : null
return {
id: id,
values: node.getEmbedding(),
metadata: {
...cleanupMetadata(node.metadata),
text: (node as any).text
}
}
}
}
const cleanupMetadata = (nodeMetadata: ICommonObject) => {
// Pinecone doesn't support nested objects, so we flatten them
const documentMetadata: any = { ...nodeMetadata }
// preserve string arrays which are allowed
const stringArrays: Record<string, string[]> = {}
for (const key of Object.keys(documentMetadata)) {
if (Array.isArray(documentMetadata[key]) && documentMetadata[key].every((el: any) => typeof el === 'string')) {
stringArrays[key] = documentMetadata[key]
delete documentMetadata[key]
}
}
const metadata: {
[key: string]: string | number | boolean | string[] | null
} = {
...flattenObject(documentMetadata),
...stringArrays
}
// Pinecone doesn't support null values, so we remove them
for (const key of Object.keys(metadata)) {
if (metadata[key] == null) {
delete metadata[key]
} else if (typeof metadata[key] === 'object' && Object.keys(metadata[key] as unknown as object).length === 0) {
delete metadata[key]
}
}
return metadata
}
module.exports = { nodeClass: PineconeLlamaIndex_VectorStores }

View File

@ -0,0 +1,124 @@
import path from 'path'
import { flatten } from 'lodash'
import { storageContextFromDefaults, serviceContextFromDefaults, VectorStoreIndex, Document } from 'llamaindex'
import { Document as LCDocument } from 'langchain/document'
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getUserHome } from '../../../src'
class SimpleStoreUpsert_LlamaIndex_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
tags: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'SimpleStore'
this.name = 'simpleStoreLlamaIndex'
this.version = 1.0
this.type = 'SimpleVectorStore'
this.icon = 'simplevs.svg'
this.category = 'Vector Stores'
this.description = 'Upsert embedded data to local path and perform similarity search'
this.baseClasses = [this.type, 'VectorIndexRetriever']
this.tags = ['LlamaIndex']
this.inputs = [
{
label: 'Document',
name: 'document',
type: 'Document',
list: true,
optional: true
},
{
label: 'Chat Model',
name: 'model',
type: 'BaseChatModel_LlamaIndex'
},
{
label: 'Embeddings',
name: 'embeddings',
type: 'BaseEmbedding_LlamaIndex'
},
{
label: 'Base Path to store',
name: 'basePath',
description:
'Path to store persist embeddings indexes with persistence. If not specified, default to same path where database is stored',
type: 'string',
optional: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
optional: true
}
]
}
//@ts-ignore
vectorStoreMethods = {
async upsert(nodeData: INodeData): Promise<void> {
const basePath = nodeData.inputs?.basePath as string
const docs = nodeData.inputs?.document as LCDocument[]
const embeddings = nodeData.inputs?.embeddings
const model = nodeData.inputs?.model
let filePath = ''
if (!basePath) filePath = path.join(getUserHome(), '.flowise', 'llamaindex')
else filePath = basePath
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
finalDocs.push(new LCDocument(flattenDocs[i]))
}
const llamadocs: Document[] = []
for (const doc of finalDocs) {
llamadocs.push(new Document({ text: doc.pageContent, metadata: doc.metadata }))
}
const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings })
const storageContext = await storageContextFromDefaults({ persistDir: filePath })
try {
await VectorStoreIndex.fromDocuments(llamadocs, { serviceContext, storageContext })
} catch (e) {
throw new Error(e)
}
}
}
async init(nodeData: INodeData): Promise<any> {
const basePath = nodeData.inputs?.basePath as string
const embeddings = nodeData.inputs?.embeddings
const model = nodeData.inputs?.model
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
let filePath = ''
if (!basePath) filePath = path.join(getUserHome(), '.flowise', 'llamaindex')
else filePath = basePath
const serviceContext = serviceContextFromDefaults({ llm: model, embedModel: embeddings })
const storageContext = await storageContextFromDefaults({ persistDir: filePath })
const index = await VectorStoreIndex.init({ storageContext, serviceContext })
const retriever = index.asRetriever()
retriever.similarityTopK = k
return retriever
}
}
module.exports = { nodeClass: SimpleStoreUpsert_LlamaIndex_VectorStores }

View File

@ -0,0 +1,6 @@
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-database" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
<path stroke="none" d="M0 0h24v24H0z" fill="none"></path>
<path d="M12 6m-8 0a8 3 0 1 0 16 0a8 3 0 1 0 -16 0"></path>
<path d="M4 6v6a8 3 0 0 0 16 0v-6"></path>
<path d="M4 12v6a8 3 0 0 0 16 0v-6"></path>
</svg>

After

Width:  |  Height:  |  Size: 451 B

View File

@ -21,7 +21,7 @@
"@aws-sdk/client-s3": "^3.427.0",
"@dqbd/tiktoken": "^1.0.7",
"@elastic/elasticsearch": "^8.9.0",
"@getzep/zep-js": "^0.6.3",
"@getzep/zep-js": "^0.9.0",
"@gomomento/sdk": "^1.51.1",
"@gomomento/sdk-core": "^1.51.1",
"@google-ai/generativelanguage": "^0.2.1",
@ -54,6 +54,7 @@
"langfuse-langchain": "^1.0.31",
"langsmith": "^0.0.32",
"linkifyjs": "^4.1.1",
"llamaindex": "^0.0.30",
"llmonitor": "^0.5.5",
"mammoth": "^1.5.1",
"moment": "^2.29.3",

View File

@ -91,6 +91,7 @@ export interface INodeProperties {
version: number
category: string
baseClasses: string[]
tags?: string[]
description?: string
filePath?: string
badge?: string
@ -107,10 +108,6 @@ export interface INode extends INodeProperties {
search: (nodeData: INodeData, options?: ICommonObject) => Promise<any>
delete: (nodeData: INodeData, options?: ICommonObject) => Promise<void>
}
memoryMethods?: {
clearSessionMemory: (nodeData: INodeData, options?: ICommonObject) => Promise<void>
getChatMessages: (nodeData: INodeData, options?: ICommonObject) => Promise<string>
}
init?(nodeData: INodeData, input: string, options?: ICommonObject): Promise<any>
run?(nodeData: INodeData, input: string, options?: ICommonObject): Promise<string | ICommonObject>
}

View File

@ -8,7 +8,7 @@ import { DataSource } from 'typeorm'
import { ICommonObject, IDatabaseEntity, IMessage, INodeData } from './Interface'
import { AES, enc } from 'crypto-js'
import { ChatMessageHistory } from 'langchain/memory'
import { AIMessage, HumanMessage } from 'langchain/schema'
import { AIMessage, HumanMessage, BaseMessage } from 'langchain/schema'
export const numberOrExpressionRegex = '^(\\d+\\.?\\d*|{{.*}})$' //return true if string consists only numbers OR expression {{}}
export const notEmptyRegex = '(.|\\s)*\\S(.|\\s)*' //return true if string is not empty or blank
@ -587,3 +587,54 @@ export const convertSchemaToZod = (schema: string | object): ICommonObject => {
throw new Error(e)
}
}
/**
* Flatten nested object
* @param {ICommonObject} obj
* @param {string} parentKey
* @returns {ICommonObject}
*/
export const flattenObject = (obj: ICommonObject, parentKey?: string) => {
let result: any = {}
Object.keys(obj).forEach((key) => {
const value = obj[key]
const _key = parentKey ? parentKey + '.' + key : key
if (typeof value === 'object') {
result = { ...result, ...flattenObject(value, _key) }
} else {
result[_key] = value
}
})
return result
}
/**
* Convert BaseMessage to IMessage
* @param {ICommonObject} obj
* @param {string} parentKey
* @returns {ICommonObject}
*/
export const convertBaseMessagetoIMessage = (messages: BaseMessage[]): IMessage[] => {
const formatmessages: IMessage[] = []
for (const m of messages) {
if (m._getType() === 'human') {
formatmessages.push({
message: m.content as string,
type: 'userMessage'
})
} else if (m._getType() === 'ai') {
formatmessages.push({
message: m.content as string,
type: 'apiMessage'
})
} else if (m._getType() === 'system') {
formatmessages.push({
message: m.content as string,
type: 'apiMessage'
})
}
}
return formatmessages
}

View File

@ -0,0 +1,855 @@
{
"description": "Answer question based on retrieved documents (context) with built-in memory to remember conversation using LlamaIndex",
"badge": "NEW",
"nodes": [
{
"width": 300,
"height": 438,
"id": "textFile_0",
"position": {
"x": 221.215421786192,
"y": 94.91489477412404
},
"type": "customNode",
"data": {
"id": "textFile_0",
"label": "Text File",
"version": 3,
"name": "textFile",
"type": "Document",
"baseClasses": ["Document"],
"category": "Document Loaders",
"description": "Load data from text files",
"inputParams": [
{
"label": "Txt File",
"name": "txtFile",
"type": "file",
"fileType": ".txt, .html, .aspx, .asp, .cpp, .c, .cs, .css, .go, .h, .java, .js, .less, .ts, .php, .proto, .python, .py, .rst, .ruby, .rb, .rs, .scala, .sc, .scss, .sol, .sql, .swift, .markdown, .md, .tex, .ltx, .vb, .xml",
"id": "textFile_0-input-txtFile-file"
},
{
"label": "Metadata",
"name": "metadata",
"type": "json",
"optional": true,
"additionalParams": true,
"id": "textFile_0-input-metadata-json"
}
],
"inputAnchors": [
{
"label": "Text Splitter",
"name": "textSplitter",
"type": "TextSplitter",
"optional": true,
"id": "textFile_0-input-textSplitter-TextSplitter"
}
],
"inputs": {
"textSplitter": "{{recursiveCharacterTextSplitter_0.data.instance}}",
"metadata": ""
},
"outputAnchors": [
{
"name": "output",
"label": "Output",
"type": "options",
"options": [
{
"id": "textFile_0-output-document-Document",
"name": "document",
"label": "Document",
"type": "Document"
},
{
"id": "textFile_0-output-text-string|json",
"name": "text",
"label": "Text",
"type": "string | json"
}
],
"default": "document"
}
],
"outputs": {
"output": "document"
},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 221.215421786192,
"y": 94.91489477412404
},
"dragging": false
},
{
"width": 300,
"height": 429,
"id": "recursiveCharacterTextSplitter_0",
"position": {
"x": -203.4868320229876,
"y": 101.32475976329766
},
"type": "customNode",
"data": {
"id": "recursiveCharacterTextSplitter_0",
"label": "Recursive Character Text Splitter",
"version": 2,
"name": "recursiveCharacterTextSplitter",
"type": "RecursiveCharacterTextSplitter",
"baseClasses": ["RecursiveCharacterTextSplitter", "TextSplitter", "BaseDocumentTransformer", "Runnable"],
"category": "Text Splitters",
"description": "Split documents recursively by different characters - starting with \"\\n\\n\", then \"\\n\", then \" \"",
"inputParams": [
{
"label": "Chunk Size",
"name": "chunkSize",
"type": "number",
"default": 1000,
"optional": true,
"id": "recursiveCharacterTextSplitter_0-input-chunkSize-number"
},
{
"label": "Chunk Overlap",
"name": "chunkOverlap",
"type": "number",
"optional": true,
"id": "recursiveCharacterTextSplitter_0-input-chunkOverlap-number"
},
{
"label": "Custom Separators",
"name": "separators",
"type": "string",
"rows": 4,
"description": "Array of custom separators to determine when to split the text, will override the default separators",
"placeholder": "[\"|\", \"##\", \">\", \"-\"]",
"additionalParams": true,
"optional": true,
"id": "recursiveCharacterTextSplitter_0-input-separators-string"
}
],
"inputAnchors": [],
"inputs": {
"chunkSize": 1000,
"chunkOverlap": "",
"separators": ""
},
"outputAnchors": [
{
"id": "recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable",
"name": "recursiveCharacterTextSplitter",
"label": "RecursiveCharacterTextSplitter",
"type": "RecursiveCharacterTextSplitter | TextSplitter | BaseDocumentTransformer | Runnable"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": -203.4868320229876,
"y": 101.32475976329766
},
"dragging": false
},
{
"width": 300,
"height": 334,
"id": "openAIEmbedding_LlamaIndex_0",
"position": {
"x": 176.27434578083106,
"y": 953.3664298122493
},
"type": "customNode",
"data": {
"id": "openAIEmbedding_LlamaIndex_0",
"label": "OpenAI Embedding",
"version": 1,
"name": "openAIEmbedding_LlamaIndex",
"type": "OpenAIEmbedding",
"baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"],
"tags": ["LlamaIndex"],
"category": "Embeddings",
"description": "OpenAI Embedding with LlamaIndex implementation",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["openAIApi"],
"id": "openAIEmbedding_LlamaIndex_0-input-credential-credential"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "openAIEmbedding_LlamaIndex_0-input-timeout-number"
},
{
"label": "BasePath",
"name": "basepath",
"type": "string",
"optional": true,
"additionalParams": true,
"id": "openAIEmbedding_LlamaIndex_0-input-basepath-string"
}
],
"inputAnchors": [],
"inputs": {
"timeout": "",
"basepath": ""
},
"outputAnchors": [
{
"id": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding",
"name": "openAIEmbedding_LlamaIndex",
"label": "OpenAIEmbedding",
"type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 176.27434578083106,
"y": 953.3664298122493
},
"dragging": false
},
{
"width": 300,
"height": 585,
"id": "pineconeLlamaIndex_0",
"position": {
"x": 609.3087433345761,
"y": 488.2141798951578
},
"type": "customNode",
"data": {
"id": "pineconeLlamaIndex_0",
"label": "Pinecone",
"version": 1,
"name": "pineconeLlamaIndex",
"type": "Pinecone",
"baseClasses": ["Pinecone", "VectorIndexRetriever"],
"tags": ["LlamaIndex"],
"category": "Vector Stores",
"description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["pineconeApi"],
"id": "pineconeLlamaIndex_0-input-credential-credential"
},
{
"label": "Pinecone Index",
"name": "pineconeIndex",
"type": "string",
"id": "pineconeLlamaIndex_0-input-pineconeIndex-string"
},
{
"label": "Pinecone Namespace",
"name": "pineconeNamespace",
"type": "string",
"placeholder": "my-first-namespace",
"additionalParams": true,
"optional": true,
"id": "pineconeLlamaIndex_0-input-pineconeNamespace-string"
},
{
"label": "Pinecone Metadata Filter",
"name": "pineconeMetadataFilter",
"type": "json",
"optional": true,
"additionalParams": true,
"id": "pineconeLlamaIndex_0-input-pineconeMetadataFilter-json"
},
{
"label": "Top K",
"name": "topK",
"description": "Number of top results to fetch. Default to 4",
"placeholder": "4",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pineconeLlamaIndex_0-input-topK-number"
}
],
"inputAnchors": [
{
"label": "Document",
"name": "document",
"type": "Document",
"list": true,
"optional": true,
"id": "pineconeLlamaIndex_0-input-document-Document"
},
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel_LlamaIndex",
"id": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex"
},
{
"label": "Embeddings",
"name": "embeddings",
"type": "BaseEmbedding_LlamaIndex",
"id": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex"
}
],
"inputs": {
"document": ["{{textFile_0.data.instance}}"],
"model": "{{chatOpenAI_LlamaIndex_1.data.instance}}",
"embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}",
"pineconeIndex": "",
"pineconeNamespace": "",
"pineconeMetadataFilter": "",
"topK": ""
},
"outputAnchors": [
{
"id": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever",
"name": "pineconeLlamaIndex",
"label": "Pinecone",
"type": "Pinecone | VectorIndexRetriever"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 609.3087433345761,
"y": 488.2141798951578
},
"dragging": false
},
{
"width": 300,
"height": 529,
"id": "chatOpenAI_LlamaIndex_1",
"position": {
"x": -195.15244974578656,
"y": 584.9467028201428
},
"type": "customNode",
"data": {
"id": "chatOpenAI_LlamaIndex_1",
"label": "ChatOpenAI",
"version": 1,
"name": "chatOpenAI_LlamaIndex",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel_LlamaIndex"],
"tags": ["LlamaIndex"],
"category": "Chat Models",
"description": "Wrapper around OpenAI Chat LLM with LlamaIndex implementation",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["openAIApi"],
"id": "chatOpenAI_LlamaIndex_1-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
},
{
"label": "gpt-4-32k",
"name": "gpt-4-32k"
},
{
"label": "gpt-4-32k-0613",
"name": "gpt-4-32k-0613"
},
{
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"
},
{
"label": "gpt-3.5-turbo-16k",
"name": "gpt-3.5-turbo-16k"
},
{
"label": "gpt-3.5-turbo-16k-0613",
"name": "gpt-3.5-turbo-16k-0613"
}
],
"default": "gpt-3.5-turbo",
"optional": true,
"id": "chatOpenAI_LlamaIndex_1-input-modelName-options"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"step": 0.1,
"default": 0.9,
"optional": true,
"id": "chatOpenAI_LlamaIndex_1-input-temperature-number"
},
{
"label": "Max Tokens",
"name": "maxTokens",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_LlamaIndex_1-input-maxTokens-number"
},
{
"label": "Top Probability",
"name": "topP",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_LlamaIndex_1-input-topP-number"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_LlamaIndex_1-input-timeout-number"
}
],
"inputAnchors": [],
"inputs": {
"modelName": "gpt-3.5-turbo-16k",
"temperature": 0.9,
"maxTokens": "",
"topP": "",
"timeout": ""
},
"outputAnchors": [
{
"id": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex",
"name": "chatOpenAI_LlamaIndex",
"label": "ChatOpenAI",
"type": "ChatOpenAI | BaseChatModel_LlamaIndex"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": -195.15244974578656,
"y": 584.9467028201428
},
"dragging": false
},
{
"width": 300,
"height": 513,
"id": "contextChatEngine_0",
"position": {
"x": 1550.2553933740128,
"y": 270.7914631777829
},
"type": "customNode",
"data": {
"id": "contextChatEngine_0",
"label": "Context Chat Engine",
"version": 1,
"name": "contextChatEngine",
"type": "ContextChatEngine",
"baseClasses": ["ContextChatEngine"],
"tags": ["LlamaIndex"],
"category": "Engine",
"description": "Answer question based on retrieved documents (context) with built-in memory to remember conversation",
"inputParams": [
{
"label": "System Message",
"name": "systemMessagePrompt",
"type": "string",
"rows": 4,
"optional": true,
"placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.",
"id": "contextChatEngine_0-input-systemMessagePrompt-string"
}
],
"inputAnchors": [
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel_LlamaIndex",
"id": "contextChatEngine_0-input-model-BaseChatModel_LlamaIndex"
},
{
"label": "Vector Store Retriever",
"name": "vectorStoreRetriever",
"type": "VectorIndexRetriever",
"id": "contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever"
},
{
"label": "Memory",
"name": "memory",
"type": "BaseChatMemory",
"id": "contextChatEngine_0-input-memory-BaseChatMemory"
}
],
"inputs": {
"model": "{{chatOpenAI_LlamaIndex_2.data.instance}}",
"vectorStoreRetriever": "{{pineconeLlamaIndex_0.data.instance}}",
"memory": "{{RedisBackedChatMemory_0.data.instance}}",
"systemMessagePrompt": ""
},
"outputAnchors": [
{
"id": "contextChatEngine_0-output-contextChatEngine-ContextChatEngine",
"name": "contextChatEngine",
"label": "ContextChatEngine",
"type": "ContextChatEngine"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 1550.2553933740128,
"y": 270.7914631777829
},
"dragging": false
},
{
"width": 300,
"height": 329,
"id": "RedisBackedChatMemory_0",
"position": {
"x": 1081.252815805786,
"y": 990.1701092562037
},
"type": "customNode",
"data": {
"id": "RedisBackedChatMemory_0",
"label": "Redis-Backed Chat Memory",
"version": 2,
"name": "RedisBackedChatMemory",
"type": "RedisBackedChatMemory",
"baseClasses": ["RedisBackedChatMemory", "BaseChatMemory", "BaseMemory"],
"category": "Memory",
"description": "Summarizes the conversation and stores the memory in Redis server",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"optional": true,
"credentialNames": ["redisCacheApi", "redisCacheUrlApi"],
"id": "RedisBackedChatMemory_0-input-credential-credential"
},
{
"label": "Session Id",
"name": "sessionId",
"type": "string",
"description": "If not specified, a random id will be used. Learn <a target=\"_blank\" href=\"https://docs.flowiseai.com/memory/long-term-memory#ui-and-embedded-chat\">more</a>",
"default": "",
"additionalParams": true,
"optional": true,
"id": "RedisBackedChatMemory_0-input-sessionId-string"
},
{
"label": "Session Timeouts",
"name": "sessionTTL",
"type": "number",
"description": "Omit this parameter to make sessions never expire",
"additionalParams": true,
"optional": true,
"id": "RedisBackedChatMemory_0-input-sessionTTL-number"
},
{
"label": "Memory Key",
"name": "memoryKey",
"type": "string",
"default": "chat_history",
"additionalParams": true,
"id": "RedisBackedChatMemory_0-input-memoryKey-string"
}
],
"inputAnchors": [],
"inputs": {
"sessionId": "",
"sessionTTL": "",
"memoryKey": "chat_history"
},
"outputAnchors": [
{
"id": "RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory",
"name": "RedisBackedChatMemory",
"label": "RedisBackedChatMemory",
"type": "RedisBackedChatMemory | BaseChatMemory | BaseMemory"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"dragging": false,
"positionAbsolute": {
"x": 1081.252815805786,
"y": 990.1701092562037
}
},
{
"width": 300,
"height": 529,
"id": "chatOpenAI_LlamaIndex_2",
"position": {
"x": 1015.1605888108386,
"y": -38.31143117572401
},
"type": "customNode",
"data": {
"id": "chatOpenAI_LlamaIndex_2",
"label": "ChatOpenAI",
"version": 1,
"name": "chatOpenAI_LlamaIndex",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel_LlamaIndex"],
"tags": ["LlamaIndex"],
"category": "Chat Models",
"description": "Wrapper around OpenAI Chat LLM with LlamaIndex implementation",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["openAIApi"],
"id": "chatOpenAI_LlamaIndex_2-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
},
{
"label": "gpt-4-32k",
"name": "gpt-4-32k"
},
{
"label": "gpt-4-32k-0613",
"name": "gpt-4-32k-0613"
},
{
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"
},
{
"label": "gpt-3.5-turbo-16k",
"name": "gpt-3.5-turbo-16k"
},
{
"label": "gpt-3.5-turbo-16k-0613",
"name": "gpt-3.5-turbo-16k-0613"
}
],
"default": "gpt-3.5-turbo",
"optional": true,
"id": "chatOpenAI_LlamaIndex_2-input-modelName-options"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"step": 0.1,
"default": 0.9,
"optional": true,
"id": "chatOpenAI_LlamaIndex_2-input-temperature-number"
},
{
"label": "Max Tokens",
"name": "maxTokens",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_LlamaIndex_2-input-maxTokens-number"
},
{
"label": "Top Probability",
"name": "topP",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_LlamaIndex_2-input-topP-number"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_LlamaIndex_2-input-timeout-number"
}
],
"inputAnchors": [],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
"maxTokens": "",
"topP": "",
"timeout": ""
},
"outputAnchors": [
{
"id": "chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex",
"name": "chatOpenAI_LlamaIndex",
"label": "ChatOpenAI",
"type": "ChatOpenAI | BaseChatModel_LlamaIndex"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 1015.1605888108386,
"y": -38.31143117572401
},
"dragging": false
}
],
"edges": [
{
"source": "recursiveCharacterTextSplitter_0",
"sourceHandle": "recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable",
"target": "textFile_0",
"targetHandle": "textFile_0-input-textSplitter-TextSplitter",
"type": "buttonedge",
"id": "recursiveCharacterTextSplitter_0-recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer|Runnable-textFile_0-textFile_0-input-textSplitter-TextSplitter",
"data": {
"label": ""
}
},
{
"source": "textFile_0",
"sourceHandle": "textFile_0-output-document-Document",
"target": "pineconeLlamaIndex_0",
"targetHandle": "pineconeLlamaIndex_0-input-document-Document",
"type": "buttonedge",
"id": "textFile_0-textFile_0-output-document-Document-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-document-Document",
"data": {
"label": ""
}
},
{
"source": "chatOpenAI_LlamaIndex_1",
"sourceHandle": "chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex",
"target": "pineconeLlamaIndex_0",
"targetHandle": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex",
"type": "buttonedge",
"id": "chatOpenAI_LlamaIndex_1-chatOpenAI_LlamaIndex_1-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex",
"data": {
"label": ""
}
},
{
"source": "openAIEmbedding_LlamaIndex_0",
"sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding",
"target": "pineconeLlamaIndex_0",
"targetHandle": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex",
"type": "buttonedge",
"id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex",
"data": {
"label": ""
}
},
{
"source": "pineconeLlamaIndex_0",
"sourceHandle": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever",
"target": "contextChatEngine_0",
"targetHandle": "contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever",
"type": "buttonedge",
"id": "pineconeLlamaIndex_0-pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever-contextChatEngine_0-contextChatEngine_0-input-vectorStoreRetriever-VectorIndexRetriever",
"data": {
"label": ""
}
},
{
"source": "RedisBackedChatMemory_0",
"sourceHandle": "RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory",
"target": "contextChatEngine_0",
"targetHandle": "contextChatEngine_0-input-memory-BaseChatMemory",
"type": "buttonedge",
"id": "RedisBackedChatMemory_0-RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory-contextChatEngine_0-contextChatEngine_0-input-memory-BaseChatMemory",
"data": {
"label": ""
}
},
{
"source": "chatOpenAI_LlamaIndex_2",
"sourceHandle": "chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex",
"target": "contextChatEngine_0",
"targetHandle": "contextChatEngine_0-input-model-BaseChatModel_LlamaIndex",
"type": "buttonedge",
"id": "chatOpenAI_LlamaIndex_2-chatOpenAI_LlamaIndex_2-output-chatOpenAI_LlamaIndex-ChatOpenAI|BaseChatModel_LlamaIndex-contextChatEngine_0-contextChatEngine_0-input-model-BaseChatModel_LlamaIndex",
"data": {
"label": ""
}
}
]
}

View File

@ -205,7 +205,7 @@
"data": {
"id": "ZepMemory_0",
"label": "Zep Memory",
"version": 1,
"version": 2,
"name": "ZepMemory",
"type": "ZepMemory",
"baseClasses": ["ZepMemory", "BaseChatMemory", "BaseMemory"],
@ -228,13 +228,6 @@
"default": "http://127.0.0.1:8000",
"id": "ZepMemory_0-input-baseURL-string"
},
{
"label": "Auto Summary",
"name": "autoSummary",
"type": "boolean",
"default": true,
"id": "ZepMemory_0-input-autoSummary-boolean"
},
{
"label": "Session Id",
"name": "sessionId",
@ -252,15 +245,8 @@
"default": "10",
"step": 1,
"description": "Window of size k to surface the last k back-and-forths to use as memory.",
"id": "ZepMemory_0-input-k-number"
},
{
"label": "Auto Summary Template",
"name": "autoSummaryTemplate",
"type": "string",
"default": "This is the summary of the following conversation:\n{summary}",
"additionalParams": true,
"id": "ZepMemory_0-input-autoSummaryTemplate-string"
"id": "ZepMemory_0-input-k-number"
},
{
"label": "AI Prefix",
@ -306,10 +292,8 @@
"inputAnchors": [],
"inputs": {
"baseURL": "http://127.0.0.1:8000",
"autoSummary": true,
"sessionId": "",
"k": "10",
"autoSummaryTemplate": "This is the summary of the following conversation:\n{summary}",
"aiPrefix": "ai",
"humanPrefix": "human",
"memoryKey": "chat_history",

View File

@ -0,0 +1,509 @@
{
"description": "Stateless query engine designed to answer question over your data using LlamaIndex",
"badge": "NEW",
"nodes": [
{
"width": 300,
"height": 382,
"id": "queryEngine_0",
"position": {
"x": 1407.9610494306783,
"y": 241.12144405808692
},
"type": "customNode",
"data": {
"id": "queryEngine_0",
"label": "Query Engine",
"version": 1,
"name": "queryEngine",
"type": "QueryEngine",
"baseClasses": ["QueryEngine"],
"tags": ["LlamaIndex"],
"category": "Engine",
"description": "Simple query engine built to answer question over your data, without memory",
"inputParams": [
{
"label": "Return Source Documents",
"name": "returnSourceDocuments",
"type": "boolean",
"optional": true,
"id": "queryEngine_0-input-returnSourceDocuments-boolean"
}
],
"inputAnchors": [
{
"label": "Vector Store Retriever",
"name": "vectorStoreRetriever",
"type": "VectorIndexRetriever",
"id": "queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever"
},
{
"label": "Response Synthesizer",
"name": "responseSynthesizer",
"type": "ResponseSynthesizer",
"description": "ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See <a target=\"_blank\" href=\"https://ts.llamaindex.ai/modules/low_level/response_synthesizer\">more</a>",
"optional": true,
"id": "queryEngine_0-input-responseSynthesizer-ResponseSynthesizer"
}
],
"inputs": {
"vectorStoreRetriever": "{{pineconeLlamaIndex_0.data.instance}}",
"responseSynthesizer": "{{compactrefineLlamaIndex_0.data.instance}}",
"returnSourceDocuments": true
},
"outputAnchors": [
{
"id": "queryEngine_0-output-queryEngine-QueryEngine",
"name": "queryEngine",
"label": "QueryEngine",
"type": "QueryEngine"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 1407.9610494306783,
"y": 241.12144405808692
},
"dragging": false
},
{
"width": 300,
"height": 585,
"id": "pineconeLlamaIndex_0",
"position": {
"x": 977.3886641397302,
"y": -261.2253031641797
},
"type": "customNode",
"data": {
"id": "pineconeLlamaIndex_0",
"label": "Pinecone",
"version": 1,
"name": "pineconeLlamaIndex",
"type": "Pinecone",
"baseClasses": ["Pinecone", "VectorIndexRetriever"],
"tags": ["LlamaIndex"],
"category": "Vector Stores",
"description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["pineconeApi"],
"id": "pineconeLlamaIndex_0-input-credential-credential"
},
{
"label": "Pinecone Index",
"name": "pineconeIndex",
"type": "string",
"id": "pineconeLlamaIndex_0-input-pineconeIndex-string"
},
{
"label": "Pinecone Namespace",
"name": "pineconeNamespace",
"type": "string",
"placeholder": "my-first-namespace",
"additionalParams": true,
"optional": true,
"id": "pineconeLlamaIndex_0-input-pineconeNamespace-string"
},
{
"label": "Pinecone Metadata Filter",
"name": "pineconeMetadataFilter",
"type": "json",
"optional": true,
"additionalParams": true,
"id": "pineconeLlamaIndex_0-input-pineconeMetadataFilter-json"
},
{
"label": "Top K",
"name": "topK",
"description": "Number of top results to fetch. Default to 4",
"placeholder": "4",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pineconeLlamaIndex_0-input-topK-number"
}
],
"inputAnchors": [
{
"label": "Document",
"name": "document",
"type": "Document",
"list": true,
"optional": true,
"id": "pineconeLlamaIndex_0-input-document-Document"
},
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel_LlamaIndex",
"id": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex"
},
{
"label": "Embeddings",
"name": "embeddings",
"type": "BaseEmbedding_LlamaIndex",
"id": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex"
}
],
"inputs": {
"document": "",
"model": "{{chatAnthropic_LlamaIndex_0.data.instance}}",
"embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}",
"pineconeIndex": "",
"pineconeNamespace": "",
"pineconeMetadataFilter": "",
"topK": ""
},
"outputAnchors": [
{
"id": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever",
"name": "pineconeLlamaIndex",
"label": "Pinecone",
"type": "Pinecone | VectorIndexRetriever"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 977.3886641397302,
"y": -261.2253031641797
},
"dragging": false
},
{
"width": 300,
"height": 334,
"id": "openAIEmbedding_LlamaIndex_0",
"position": {
"x": 529.8690713844503,
"y": -18.955726653613254
},
"type": "customNode",
"data": {
"id": "openAIEmbedding_LlamaIndex_0",
"label": "OpenAI Embedding",
"version": 1,
"name": "openAIEmbedding_LlamaIndex",
"type": "OpenAIEmbedding",
"baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"],
"tags": ["LlamaIndex"],
"category": "Embeddings",
"description": "OpenAI Embedding with LlamaIndex implementation",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["openAIApi"],
"id": "openAIEmbedding_LlamaIndex_0-input-credential-credential"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "openAIEmbedding_LlamaIndex_0-input-timeout-number"
},
{
"label": "BasePath",
"name": "basepath",
"type": "string",
"optional": true,
"additionalParams": true,
"id": "openAIEmbedding_LlamaIndex_0-input-basepath-string"
}
],
"inputAnchors": [],
"inputs": {
"timeout": "",
"basepath": ""
},
"outputAnchors": [
{
"id": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding",
"name": "openAIEmbedding_LlamaIndex",
"label": "OpenAIEmbedding",
"type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 529.8690713844503,
"y": -18.955726653613254
},
"dragging": false
},
{
"width": 300,
"height": 749,
"id": "compactrefineLlamaIndex_0",
"position": {
"x": 170.71031618977543,
"y": -33.83233752386292
},
"type": "customNode",
"data": {
"id": "compactrefineLlamaIndex_0",
"label": "Compact and Refine",
"version": 1,
"name": "compactrefineLlamaIndex",
"type": "CompactRefine",
"baseClasses": ["CompactRefine", "ResponseSynthesizer"],
"tags": ["LlamaIndex"],
"category": "Response Synthesizer",
"description": "CompactRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.",
"inputParams": [
{
"label": "Refine Prompt",
"name": "refinePrompt",
"type": "string",
"rows": 4,
"default": "The original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:",
"warning": "Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}",
"optional": true,
"id": "compactrefineLlamaIndex_0-input-refinePrompt-string"
},
{
"label": "Text QA Prompt",
"name": "textQAPrompt",
"type": "string",
"rows": 4,
"default": "Context information is below.\n---------------------\n{context}\n---------------------\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}\nAnswer:",
"warning": "Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}",
"optional": true,
"id": "compactrefineLlamaIndex_0-input-textQAPrompt-string"
}
],
"inputAnchors": [],
"inputs": {
"refinePrompt": "The original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:",
"textQAPrompt": "Context information:\n<context>\n{context}\n</context>\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}"
},
"outputAnchors": [
{
"id": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer",
"name": "compactrefineLlamaIndex",
"label": "CompactRefine",
"type": "CompactRefine | ResponseSynthesizer"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 170.71031618977543,
"y": -33.83233752386292
},
"dragging": false
},
{
"width": 300,
"height": 529,
"id": "chatAnthropic_LlamaIndex_0",
"position": {
"x": 521.3530883359147,
"y": -584.8241219614786
},
"type": "customNode",
"data": {
"id": "chatAnthropic_LlamaIndex_0",
"label": "ChatAnthropic",
"version": 1,
"name": "chatAnthropic_LlamaIndex",
"type": "ChatAnthropic",
"baseClasses": ["ChatAnthropic", "BaseChatModel_LlamaIndex"],
"tags": ["LlamaIndex"],
"category": "Chat Models",
"description": "Wrapper around ChatAnthropic LLM with LlamaIndex implementation",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["anthropicApi"],
"id": "chatAnthropic_LlamaIndex_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "claude-2",
"name": "claude-2",
"description": "Claude 2 latest major version, automatically get updates to the model as they are released"
},
{
"label": "claude-2.1",
"name": "claude-2.1",
"description": "Claude 2 latest full version"
},
{
"label": "claude-instant-1",
"name": "claude-instant-1",
"description": "Claude Instant latest major version, automatically get updates to the model as they are released"
},
{
"label": "claude-v1",
"name": "claude-v1"
},
{
"label": "claude-v1-100k",
"name": "claude-v1-100k"
},
{
"label": "claude-v1.0",
"name": "claude-v1.0"
},
{
"label": "claude-v1.2",
"name": "claude-v1.2"
},
{
"label": "claude-v1.3",
"name": "claude-v1.3"
},
{
"label": "claude-v1.3-100k",
"name": "claude-v1.3-100k"
},
{
"label": "claude-instant-v1",
"name": "claude-instant-v1"
},
{
"label": "claude-instant-v1-100k",
"name": "claude-instant-v1-100k"
},
{
"label": "claude-instant-v1.0",
"name": "claude-instant-v1.0"
},
{
"label": "claude-instant-v1.1",
"name": "claude-instant-v1.1"
},
{
"label": "claude-instant-v1.1-100k",
"name": "claude-instant-v1.1-100k"
}
],
"default": "claude-2",
"optional": true,
"id": "chatAnthropic_LlamaIndex_0-input-modelName-options"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"step": 0.1,
"default": 0.9,
"optional": true,
"id": "chatAnthropic_LlamaIndex_0-input-temperature-number"
},
{
"label": "Max Tokens",
"name": "maxTokensToSample",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatAnthropic_LlamaIndex_0-input-maxTokensToSample-number"
},
{
"label": "Top P",
"name": "topP",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatAnthropic_LlamaIndex_0-input-topP-number"
}
],
"inputAnchors": [],
"inputs": {
"modelName": "claude-2",
"temperature": 0.9,
"maxTokensToSample": "",
"topP": ""
},
"outputAnchors": [
{
"id": "chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex",
"name": "chatAnthropic_LlamaIndex",
"label": "ChatAnthropic",
"type": "ChatAnthropic | BaseChatModel_LlamaIndex"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 521.3530883359147,
"y": -584.8241219614786
},
"dragging": false
}
],
"edges": [
{
"source": "pineconeLlamaIndex_0",
"sourceHandle": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever",
"target": "queryEngine_0",
"targetHandle": "queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever",
"type": "buttonedge",
"id": "pineconeLlamaIndex_0-pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever-queryEngine_0-queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever",
"data": {
"label": ""
}
},
{
"source": "openAIEmbedding_LlamaIndex_0",
"sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding",
"target": "pineconeLlamaIndex_0",
"targetHandle": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex",
"type": "buttonedge",
"id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex",
"data": {
"label": ""
}
},
{
"source": "compactrefineLlamaIndex_0",
"sourceHandle": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer",
"target": "queryEngine_0",
"targetHandle": "queryEngine_0-input-responseSynthesizer-ResponseSynthesizer",
"type": "buttonedge",
"id": "compactrefineLlamaIndex_0-compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer-queryEngine_0-queryEngine_0-input-responseSynthesizer-ResponseSynthesizer",
"data": {
"label": ""
}
},
{
"source": "chatAnthropic_LlamaIndex_0",
"sourceHandle": "chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex",
"target": "pineconeLlamaIndex_0",
"targetHandle": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex",
"type": "buttonedge",
"id": "chatAnthropic_LlamaIndex_0-chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex",
"data": {
"label": ""
}
}
]
}

View File

@ -0,0 +1,270 @@
{
"description": "Simple chat engine to handle back and forth conversations using LlamaIndex",
"badge": "NEW",
"nodes": [
{
"width": 300,
"height": 462,
"id": "simpleChatEngine_0",
"position": {
"x": 1210.127368000538,
"y": 324.98110560103896
},
"type": "customNode",
"data": {
"id": "simpleChatEngine_0",
"label": "Simple Chat Engine",
"version": 1,
"name": "simpleChatEngine",
"type": "SimpleChatEngine",
"baseClasses": ["SimpleChatEngine"],
"tags": ["LlamaIndex"],
"category": "Engine",
"description": "Simple engine to handle back and forth conversations",
"inputParams": [
{
"label": "System Message",
"name": "systemMessagePrompt",
"type": "string",
"rows": 4,
"optional": true,
"placeholder": "You are a helpful assistant",
"id": "simpleChatEngine_0-input-systemMessagePrompt-string"
}
],
"inputAnchors": [
{
"label": "Chat Model",
"name": "model",
"type": "BaseChatModel_LlamaIndex",
"id": "simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex"
},
{
"label": "Memory",
"name": "memory",
"type": "BaseChatMemory",
"id": "simpleChatEngine_0-input-memory-BaseChatMemory"
}
],
"inputs": {
"model": "{{azureChatOpenAI_LlamaIndex_0.data.instance}}",
"memory": "{{bufferMemory_0.data.instance}}",
"systemMessagePrompt": "You are a helpful assistant."
},
"outputAnchors": [
{
"id": "simpleChatEngine_0-output-simpleChatEngine-SimpleChatEngine",
"name": "simpleChatEngine",
"label": "SimpleChatEngine",
"type": "SimpleChatEngine"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"dragging": false,
"positionAbsolute": {
"x": 1210.127368000538,
"y": 324.98110560103896
}
},
{
"width": 300,
"height": 376,
"id": "bufferMemory_0",
"position": {
"x": 393.9823478014782,
"y": 415.7414943210391
},
"type": "customNode",
"data": {
"id": "bufferMemory_0",
"label": "Buffer Memory",
"version": 1,
"name": "bufferMemory",
"type": "BufferMemory",
"baseClasses": ["BufferMemory", "BaseChatMemory", "BaseMemory"],
"category": "Memory",
"description": "Remembers previous conversational back and forths directly",
"inputParams": [
{
"label": "Memory Key",
"name": "memoryKey",
"type": "string",
"default": "chat_history",
"id": "bufferMemory_0-input-memoryKey-string"
},
{
"label": "Input Key",
"name": "inputKey",
"type": "string",
"default": "input",
"id": "bufferMemory_0-input-inputKey-string"
}
],
"inputAnchors": [],
"inputs": {
"memoryKey": "chat_history",
"inputKey": "input"
},
"outputAnchors": [
{
"id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory",
"name": "bufferMemory",
"label": "BufferMemory",
"type": "BufferMemory | BaseChatMemory | BaseMemory"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 393.9823478014782,
"y": 415.7414943210391
},
"dragging": false
},
{
"width": 300,
"height": 529,
"id": "azureChatOpenAI_LlamaIndex_0",
"position": {
"x": 746.5530862509605,
"y": -54.107978373323306
},
"type": "customNode",
"data": {
"id": "azureChatOpenAI_LlamaIndex_0",
"label": "AzureChatOpenAI",
"version": 1,
"name": "azureChatOpenAI_LlamaIndex",
"type": "AzureChatOpenAI",
"baseClasses": ["AzureChatOpenAI", "BaseChatModel_LlamaIndex"],
"tags": ["LlamaIndex"],
"category": "Chat Models",
"description": "Wrapper around Azure OpenAI Chat LLM with LlamaIndex implementation",
"inputParams": [
{
"label": "Connect Credential",
"name": "credential",
"type": "credential",
"credentialNames": ["azureOpenAIApi"],
"id": "azureChatOpenAI_LlamaIndex_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-32k",
"name": "gpt-4-32k"
},
{
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-16k",
"name": "gpt-3.5-turbo-16k"
}
],
"default": "gpt-3.5-turbo-16k",
"optional": true,
"id": "azureChatOpenAI_LlamaIndex_0-input-modelName-options"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"step": 0.1,
"default": 0.9,
"optional": true,
"id": "azureChatOpenAI_LlamaIndex_0-input-temperature-number"
},
{
"label": "Max Tokens",
"name": "maxTokens",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "azureChatOpenAI_LlamaIndex_0-input-maxTokens-number"
},
{
"label": "Top Probability",
"name": "topP",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "azureChatOpenAI_LlamaIndex_0-input-topP-number"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "azureChatOpenAI_LlamaIndex_0-input-timeout-number"
}
],
"inputAnchors": [],
"inputs": {
"modelName": "gpt-3.5-turbo-16k",
"temperature": 0.9,
"maxTokens": "",
"topP": "",
"timeout": ""
},
"outputAnchors": [
{
"id": "azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex",
"name": "azureChatOpenAI_LlamaIndex",
"label": "AzureChatOpenAI",
"type": "AzureChatOpenAI | BaseChatModel_LlamaIndex"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 746.5530862509605,
"y": -54.107978373323306
},
"dragging": false
}
],
"edges": [
{
"source": "bufferMemory_0",
"sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory",
"target": "simpleChatEngine_0",
"targetHandle": "simpleChatEngine_0-input-memory-BaseChatMemory",
"type": "buttonedge",
"id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-simpleChatEngine_0-simpleChatEngine_0-input-memory-BaseChatMemory",
"data": {
"label": ""
}
},
{
"source": "azureChatOpenAI_LlamaIndex_0",
"sourceHandle": "azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex",
"target": "simpleChatEngine_0",
"targetHandle": "simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex",
"type": "buttonedge",
"id": "azureChatOpenAI_LlamaIndex_0-azureChatOpenAI_LlamaIndex_0-output-azureChatOpenAI_LlamaIndex-AzureChatOpenAI|BaseChatModel_LlamaIndex-simpleChatEngine_0-simpleChatEngine_0-input-model-BaseChatModel_LlamaIndex",
"data": {
"label": ""
}
}
]
}

View File

@ -589,7 +589,7 @@
"label": "Session Id",
"name": "sessionId",
"type": "string",
"description": "If not specified, the first CHAT_MESSAGE_ID will be used as sessionId",
"description": "If not specified, a random id will be used. Learn <a target=\"_blank\" href=\"https://docs.flowiseai.com/memory/long-term-memory#ui-and-embedded-chat\">more</a>",
"default": "",
"additionalParams": true,
"optional": true,

View File

@ -37,13 +37,12 @@ import {
databaseEntities,
transformToCredentialEntity,
decryptCredentialData,
clearAllSessionMemory,
replaceInputsWithConfig,
getEncryptionKey,
checkMemorySessionId,
clearSessionMemoryFromViewMessageDialog,
replaceMemorySessionId,
getUserHome,
replaceChatHistory
replaceChatHistory,
clearSessionMemory
} from './utils'
import { cloneDeep, omit, uniqWith, isEqual } from 'lodash'
import { getDataSource } from './DataSource'
@ -387,7 +386,12 @@ export class App {
const endingNodeData = nodes.find((nd) => nd.id === endingNodeId)?.data
if (!endingNodeData) return res.status(500).send(`Ending node ${endingNodeId} data not found`)
if (endingNodeData && endingNodeData.category !== 'Chains' && endingNodeData.category !== 'Agents') {
if (
endingNodeData &&
endingNodeData.category !== 'Chains' &&
endingNodeData.category !== 'Agents' &&
endingNodeData.category !== 'Engine'
) {
return res.status(500).send(`Ending node must be either a Chain or Agent`)
}
@ -472,18 +476,15 @@ export class App {
const parsedFlowData: IReactFlowObject = JSON.parse(flowData)
const nodes = parsedFlowData.nodes
if (isClearFromViewMessageDialog) {
await clearSessionMemoryFromViewMessageDialog(
nodes,
this.nodesPool.componentNodes,
chatId,
this.AppDataSource,
sessionId,
memoryType
)
} else {
await clearAllSessionMemory(nodes, this.nodesPool.componentNodes, chatId, this.AppDataSource, sessionId)
}
await clearSessionMemory(
nodes,
this.nodesPool.componentNodes,
chatId,
this.AppDataSource,
sessionId,
memoryType,
isClearFromViewMessageDialog
)
const deleteOptions: FindOptionsWhere<ChatMessage> = { chatflowid, chatId }
if (memoryType) deleteOptions.memoryType = memoryType
@ -1377,7 +1378,13 @@ export class App {
const endingNodeData = nodes.find((nd) => nd.id === endingNodeId)?.data
if (!endingNodeData) return res.status(500).send(`Ending node ${endingNodeId} data not found`)
if (endingNodeData && endingNodeData.category !== 'Chains' && endingNodeData.category !== 'Agents' && !isUpsert) {
if (
endingNodeData &&
endingNodeData.category !== 'Chains' &&
endingNodeData.category !== 'Agents' &&
endingNodeData.category !== 'Engine' &&
!isUpsert
) {
return res.status(500).send(`Ending node must be either a Chain or Agent`)
}
@ -1396,7 +1403,9 @@ export class App {
isStreamValid = isFlowValidForStream(nodes, endingNodeData)
let chatHistory: IMessage[] | string = incomingInput.history
let chatHistory: IMessage[] = incomingInput.history
// If chatHistory is empty, and sessionId/chatId is present, replace it
if (
endingNodeData.inputs?.memory &&
!incomingInput.history &&
@ -1437,8 +1446,10 @@ export class App {
const nodeToExecute = reactFlowNodes.find((node: IReactFlowNode) => node.id === endingNodeId)
if (!nodeToExecute) return res.status(404).send(`Node ${endingNodeId} not found`)
if (incomingInput.overrideConfig)
if (incomingInput.overrideConfig) {
nodeToExecute.data = replaceInputsWithConfig(nodeToExecute.data, incomingInput.overrideConfig)
}
const reactFlowNodeData: INodeData = resolveVariables(
nodeToExecute.data,
reactFlowNodes,
@ -1458,19 +1469,11 @@ export class App {
logger.debug(`[server]: Running ${nodeToExecuteData.label} (${nodeToExecuteData.id})`)
let sessionId = undefined
if (nodeToExecuteData.instance) sessionId = checkMemorySessionId(nodeToExecuteData.instance, chatId)
const memoryNode = this.findMemoryLabel(nodes, edges)
const memoryType = memoryNode?.data.label
let chatHistory: IMessage[] | string = incomingInput.history
if (memoryNode && !incomingInput.history && (incomingInput.chatId || incomingInput.overrideConfig?.sessionId)) {
chatHistory = await replaceChatHistory(memoryNode, incomingInput, this.AppDataSource, databaseEntities, logger)
}
if (nodeToExecuteData.instance) sessionId = replaceMemorySessionId(nodeToExecuteData.instance, chatId)
let result = isStreamValid
? await nodeInstance.run(nodeToExecuteData, incomingInput.question, {
chatHistory,
chatHistory: incomingInput.history,
socketIO,
socketIOClientId: incomingInput.socketIOClientId,
logger,
@ -1480,7 +1483,7 @@ export class App {
chatId
})
: await nodeInstance.run(nodeToExecuteData, incomingInput.question, {
chatHistory,
chatHistory: incomingInput.history,
logger,
appDataSource: this.AppDataSource,
databaseEntities,
@ -1495,6 +1498,9 @@ export class App {
sessionId = result.assistant.threadId
}
const memoryNode = this.findMemoryLabel(nodes, edges)
const memoryType = memoryNode?.data.label
const userMessage: Omit<IChatMessage, 'id'> = {
role: 'userMessage',
content: incomingInput.question,

View File

@ -217,7 +217,7 @@ export const buildLangchain = async (
depthQueue: IDepthQueue,
componentNodes: IComponentNodes,
question: string,
chatHistory: IMessage[] | string,
chatHistory: IMessage[],
chatId: string,
chatflowid: string,
appDataSource: DataSource,
@ -324,22 +324,30 @@ export const buildLangchain = async (
}
/**
* Clear all session memories on the canvas
* Clear session memories
* @param {IReactFlowNode[]} reactFlowNodes
* @param {IComponentNodes} componentNodes
* @param {string} chatId
* @param {DataSource} appDataSource
* @param {string} sessionId
* @param {string} memoryType
* @param {string} isClearFromViewMessageDialog
*/
export const clearAllSessionMemory = async (
export const clearSessionMemory = async (
reactFlowNodes: IReactFlowNode[],
componentNodes: IComponentNodes,
chatId: string,
appDataSource: DataSource,
sessionId?: string
sessionId?: string,
memoryType?: string,
isClearFromViewMessageDialog?: string
) => {
for (const node of reactFlowNodes) {
if (node.data.category !== 'Memory' && node.data.type !== 'OpenAIAssistant') continue
// Only clear specific session memory from View Message Dialog UI
if (isClearFromViewMessageDialog && memoryType && node.data.label !== memoryType) continue
const nodeInstanceFilePath = componentNodes[node.data.name].filePath as string
const nodeModule = await import(nodeInstanceFilePath)
const newNodeInstance = new nodeModule.nodeClass()
@ -348,42 +356,10 @@ export const clearAllSessionMemory = async (
node.data.inputs.sessionId = sessionId
}
if (newNodeInstance.memoryMethods && newNodeInstance.memoryMethods.clearSessionMemory) {
await newNodeInstance.memoryMethods.clearSessionMemory(node.data, { chatId, appDataSource, databaseEntities, logger })
}
}
}
const initializedInstance = await newNodeInstance.init(node.data, '', { chatId, appDataSource, databaseEntities, logger })
/**
* Clear specific session memory from View Message Dialog UI
* @param {IReactFlowNode[]} reactFlowNodes
* @param {IComponentNodes} componentNodes
* @param {string} chatId
* @param {DataSource} appDataSource
* @param {string} sessionId
* @param {string} memoryType
*/
export const clearSessionMemoryFromViewMessageDialog = async (
reactFlowNodes: IReactFlowNode[],
componentNodes: IComponentNodes,
chatId: string,
appDataSource: DataSource,
sessionId?: string,
memoryType?: string
) => {
if (!sessionId) return
for (const node of reactFlowNodes) {
if (node.data.category !== 'Memory' && node.data.type !== 'OpenAIAssistant') continue
if (memoryType && node.data.label !== memoryType) continue
const nodeInstanceFilePath = componentNodes[node.data.name].filePath as string
const nodeModule = await import(nodeInstanceFilePath)
const newNodeInstance = new nodeModule.nodeClass()
if (sessionId && node.data.inputs) node.data.inputs.sessionId = sessionId
if (newNodeInstance.memoryMethods && newNodeInstance.memoryMethods.clearSessionMemory) {
await newNodeInstance.memoryMethods.clearSessionMemory(node.data, { chatId, appDataSource, databaseEntities, logger })
return
if (initializedInstance.clearChatMessages) {
await initializedInstance.clearChatMessages()
}
}
}
@ -400,7 +376,7 @@ export const getVariableValue = (
paramValue: string,
reactFlowNodes: IReactFlowNode[],
question: string,
chatHistory: IMessage[] | string,
chatHistory: IMessage[],
isAcceptVariable = false
) => {
let returnVal = paramValue
@ -433,10 +409,7 @@ export const getVariableValue = (
}
if (isAcceptVariable && variableFullPath === CHAT_HISTORY_VAR_PREFIX) {
variableDict[`{{${variableFullPath}}}`] = handleEscapeCharacters(
typeof chatHistory === 'string' ? chatHistory : convertChatHistoryToText(chatHistory),
false
)
variableDict[`{{${variableFullPath}}}`] = handleEscapeCharacters(convertChatHistoryToText(chatHistory), false)
}
// Split by first occurrence of '.' to get just nodeId
@ -479,7 +452,7 @@ export const resolveVariables = (
reactFlowNodeData: INodeData,
reactFlowNodes: IReactFlowNode[],
question: string,
chatHistory: IMessage[] | string
chatHistory: IMessage[]
): INodeData => {
let flowNodeData = cloneDeep(reactFlowNodeData)
const types = 'inputs'
@ -558,7 +531,7 @@ export const isStartNodeDependOnInput = (startingNodes: IReactFlowNode[], nodes:
if (inputVariables.length > 0) return true
}
}
const whitelistNodeNames = ['vectorStoreToDocument', 'autoGPT']
const whitelistNodeNames = ['vectorStoreToDocument', 'autoGPT', 'chatPromptTemplate', 'promptTemplate'] //If these nodes are found, chatflow cannot be reused
for (const node of nodes) {
if (whitelistNodeNames.includes(node.data.name)) return true
}
@ -706,7 +679,15 @@ export const findAvailableConfigs = (reactFlowNodes: IReactFlowNode[], component
*/
export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNodeData: INodeData) => {
const streamAvailableLLMs = {
'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama', 'awsChatBedrock'],
'Chat Models': [
'azureChatOpenAI',
'chatOpenAI',
'chatOpenAI_LlamaIndex',
'chatAnthropic',
'chatAnthropic_LlamaIndex',
'chatOllama',
'awsChatBedrock'
],
LLMs: ['azureOpenAI', 'openAI', 'ollama']
}
@ -729,6 +710,9 @@ export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNod
// Agent that are available to stream
const whitelistAgents = ['openAIFunctionAgent', 'csvAgent', 'airtableAgent', 'conversationalRetrievalAgent']
isValidChainOrAgent = whitelistAgents.includes(endingNodeData.name)
} else if (endingNodeData.category === 'Engine') {
const whitelistEngine = ['contextChatEngine', 'simpleChatEngine']
isValidChainOrAgent = whitelistEngine.includes(endingNodeData.name)
}
// If no output parser, flow is available to stream
@ -866,7 +850,7 @@ export const redactCredentialWithPasswordType = (
* @param {any} instance
* @param {string} chatId
*/
export const checkMemorySessionId = (instance: any, chatId: string): string | undefined => {
export const replaceMemorySessionId = (instance: any, chatId: string): string | undefined => {
if (instance.memory && instance.memory.isSessionIdUsingChatMessageId && chatId) {
instance.memory.sessionId = chatId
instance.memory.chatHistory.sessionId = chatId
@ -893,7 +877,7 @@ export const replaceChatHistory = async (
appDataSource: DataSource,
databaseEntities: IDatabaseEntity,
logger: any
): Promise<string> => {
): Promise<IMessage[]> => {
const nodeInstanceFilePath = memoryNode.data.filePath as string
const nodeModule = await import(nodeInstanceFilePath)
const newNodeInstance = new nodeModule.nodeClass()
@ -902,14 +886,12 @@ export const replaceChatHistory = async (
memoryNode.data.inputs.sessionId = incomingInput.overrideConfig.sessionId
}
if (newNodeInstance.memoryMethods && newNodeInstance.memoryMethods.getChatMessages) {
return await newNodeInstance.memoryMethods.getChatMessages(memoryNode.data, {
chatId: incomingInput.chatId,
appDataSource,
databaseEntities,
logger
})
}
const initializedInstance = await newNodeInstance.init(memoryNode.data, '', {
chatId: incomingInput.chatId,
appDataSource,
databaseEntities,
logger
})
return ''
return await initializedInstance.getChatMessages()
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

View File

@ -132,6 +132,35 @@ const NodeInfoDialog = ({ show, dialogProps, onCancel }) => {
</span>
</div>
)}
{dialogProps.data.tags &&
dialogProps.data.tags.length &&
dialogProps.data.tags.map((tag, index) => (
<div
style={{
display: 'flex',
flexDirection: 'row',
width: 'max-content',
borderRadius: 15,
background: '#cae9ff',
padding: 5,
paddingLeft: 10,
paddingRight: 10,
marginTop: 5,
marginLeft: 10,
marginBottom: 5
}}
key={index}
>
<span
style={{
color: '#023e7d',
fontSize: '0.825rem'
}}
>
{tag.toLowerCase()}
</span>
</div>
))}
</div>
</div>
</div>

View File

@ -286,6 +286,7 @@ export const generateExportFlowData = (flowData) => {
name: node.data.name,
type: node.data.type,
baseClasses: node.data.baseClasses,
tags: node.data.tags,
category: node.data.category,
description: node.data.description,
inputParams: node.data.inputParams,

View File

@ -22,7 +22,9 @@ import {
Popper,
Stack,
Typography,
Chip
Chip,
Tab,
Tabs
} from '@mui/material'
import ExpandMoreIcon from '@mui/icons-material/ExpandMore'
@ -36,12 +38,20 @@ import { StyledFab } from 'ui-component/button/StyledFab'
// icons
import { IconPlus, IconSearch, IconMinus, IconX } from '@tabler/icons'
import LlamaindexPNG from 'assets/images/llamaindex.png'
import LangChainPNG from 'assets/images/langchain.png'
// const
import { baseURL } from 'store/constant'
import { SET_COMPONENT_NODES } from 'store/actions'
// ==============================|| ADD NODES||============================== //
function a11yProps(index) {
return {
id: `attachment-tab-${index}`,
'aria-controls': `attachment-tabpanel-${index}`
}
}
const AddNodes = ({ nodesData, node }) => {
const theme = useTheme()
@ -52,6 +62,7 @@ const AddNodes = ({ nodesData, node }) => {
const [nodes, setNodes] = useState({})
const [open, setOpen] = useState(false)
const [categoryExpanded, setCategoryExpanded] = useState({})
const [tabValue, setTabValue] = useState(0)
const anchorRef = useRef(null)
const prevOpen = useRef(open)
@ -86,6 +97,11 @@ const AddNodes = ({ nodesData, node }) => {
}
}
const handleTabChange = (event, newValue) => {
setTabValue(newValue)
filterSearch(searchValue, newValue)
}
const getSearchedNodes = (value) => {
const passed = nodesData.filter((nd) => {
const passesQuery = nd.name.toLowerCase().includes(value.toLowerCase())
@ -95,23 +111,34 @@ const AddNodes = ({ nodesData, node }) => {
return passed
}
const filterSearch = (value) => {
const filterSearch = (value, newTabValue) => {
setSearchValue(value)
setTimeout(() => {
if (value) {
const returnData = getSearchedNodes(value)
groupByCategory(returnData, true)
groupByCategory(returnData, newTabValue ?? tabValue, true)
scrollTop()
} else if (value === '') {
groupByCategory(nodesData)
groupByCategory(nodesData, newTabValue ?? tabValue)
scrollTop()
}
}, 500)
}
const groupByCategory = (nodes, isFilter) => {
const groupByTags = (nodes, newTabValue = 0) => {
const langchainNodes = nodes.filter((nd) => !nd.tags)
const llmaindexNodes = nodes.filter((nd) => nd.tags && nd.tags.includes('LlamaIndex'))
if (newTabValue === 0) {
return langchainNodes
} else {
return llmaindexNodes
}
}
const groupByCategory = (nodes, newTabValue, isFilter) => {
const taggedNodes = groupByTags(nodes, newTabValue)
const accordianCategories = {}
const result = nodes.reduce(function (r, a) {
const result = taggedNodes.reduce(function (r, a) {
r[a.category] = r[a.category] || []
r[a.category].push(a)
accordianCategories[a.category] = isFilter ? true : false
@ -244,13 +271,61 @@ const AddNodes = ({ nodesData, node }) => {
'aria-label': 'weight'
}}
/>
<Tabs variant='fullWidth' value={tabValue} onChange={handleTabChange} aria-label='tabs'>
{['LangChain', 'LlamaIndex'].map((item, index) => (
<Tab
icon={
<div
style={{
borderRadius: '50%'
}}
>
<img
style={{
width: '25px',
height: '25px',
borderRadius: '50%',
objectFit: 'contain'
}}
src={index === 0 ? LangChainPNG : LlamaindexPNG}
alt={item}
/>
</div>
}
iconPosition='start'
key={index}
label={
item === 'LlamaIndex' ? (
<>
<h4>{item}</h4>
&nbsp;
<Chip
sx={{
width: 'max-content',
fontWeight: 700,
fontSize: '0.65rem',
background: theme.palette.primary.main,
color: 'white'
}}
size='small'
label='BETA'
/>
</>
) : (
<h4>{item}</h4>
)
}
{...a11yProps(index)}
></Tab>
))}
</Tabs>
<Divider />
</Box>
<PerfectScrollbar
containerRef={(el) => {
ps.current = el
}}
style={{ height: '100%', maxHeight: 'calc(100vh - 320px)', overflowX: 'hidden' }}
style={{ height: '100%', maxHeight: 'calc(100vh - 380px)', overflowX: 'hidden' }}
>
<Box sx={{ p: 2 }}>
<List

View File

@ -18,6 +18,7 @@ import NodeInfoDialog from 'ui-component/dialog/NodeInfoDialog'
import { baseURL } from 'store/constant'
import { IconTrash, IconCopy, IconInfoCircle, IconAlertTriangle } from '@tabler/icons'
import { flowContext } from 'store/context/ReactFlowContext'
import LlamaindexPNG from 'assets/images/llamaindex.png'
const CardWrapper = styled(MainCard)(({ theme }) => ({
background: theme.palette.card.main,
@ -179,9 +180,25 @@ const CanvasNode = ({ data }) => {
{data.label}
</Typography>
</Box>
<div style={{ flexGrow: 1 }}></div>
{data.tags && data.tags.includes('LlamaIndex') && (
<>
<div
style={{
borderRadius: '50%',
padding: 15
}}
>
<img
style={{ width: '25px', height: '25px', borderRadius: '50%', objectFit: 'contain' }}
src={LlamaindexPNG}
alt='LlamaIndex'
/>
</div>
</>
)}
{warningMessage && (
<>
<div style={{ flexGrow: 1 }}></div>
<Tooltip title={<span style={{ whiteSpace: 'pre-line' }}>{warningMessage}</span>} placement='top'>
<IconButton sx={{ height: 35, width: 35 }}>
<IconAlertTriangle size={35} color='orange' />

View File

@ -13,6 +13,7 @@ import AdditionalParamsDialog from 'ui-component/dialog/AdditionalParamsDialog'
// const
import { baseURL } from 'store/constant'
import LlamaindexPNG from 'assets/images/llamaindex.png'
const CardWrapper = styled(MainCard)(({ theme }) => ({
background: theme.palette.card.main,
@ -87,6 +88,23 @@ const MarketplaceCanvasNode = ({ data }) => {
{data.label}
</Typography>
</Box>
<div style={{ flexGrow: 1 }}></div>
{data.tags && data.tags.includes('LlamaIndex') && (
<>
<div
style={{
borderRadius: '50%',
padding: 15
}}
>
<img
style={{ width: '25px', height: '25px', borderRadius: '50%', objectFit: 'contain' }}
src={LlamaindexPNG}
alt='LlamaIndex'
/>
</div>
</>
)}
</div>
{(data.inputAnchors.length > 0 || data.inputParams.length > 0) && (
<>