Merge branch 'main' into FEATURE/Vision

# Conflicts:
#	packages/components/nodes/chains/ConversationChain/ConversationChain.ts
#	packages/server/src/index.ts
#	packages/server/src/utils/index.ts
feature/Vision
Henry 2024-02-02 02:54:06 +00:00
commit 9cd0362f24
136 changed files with 5054 additions and 2019 deletions

View File

@ -0,0 +1,33 @@
name: autoSyncMergedPullRequest
on:
pull_request_target:
types:
- closed
branches: ['main']
jobs:
autoSyncMergedPullRequest:
if: github.event.pull_request.merged == true
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- uses: actions/checkout@v3
- name: Show PR info
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo The PR #${{ github.event.pull_request.number }} was merged on main branch!
- name: Repository Dispatch
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.AUTOSYNC_TOKEN }}
repository: ${{ secrets.AUTOSYNC_CH_URL }}
event-type: ${{ secrets.AUTOSYNC_PR_EVENT_TYPE }}
client-payload: >-
{
"ref": "${{ github.ref }}",
"prNumber": "${{ github.event.pull_request.number }}",
"prTitle": "${{ github.event.pull_request.title }}",
"prDescription": "${{ github.event.pull_request.description }}",
"sha": "${{ github.sha }}"
}

View File

@ -0,0 +1,36 @@
name: autoSyncSingleCommit
on:
push:
branches:
- main
jobs:
doNotAutoSyncSingleCommit:
if: github.event.commits[1] != null
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: IGNORE autoSyncSingleCommit
run: |
echo This single commit has came from a merged commit. We will ignore it. This case is handled in autoSyncMergedPullRequest workflow for merge commits comming from merged pull requests only! Beware, the regular merge commits are not handled by any workflow for the moment.
autoSyncSingleCommit:
if: github.event.commits[1] == null
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: autoSyncSingleCommit
env:
GITHUB_CONTEXT: ${{ toJSON(github) }}
run: |
echo Autosync a single commit with id: ${{ github.sha }} from openSource main branch towards cloud hosted version.
- name: Repository Dispatch
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.AUTOSYNC_TOKEN }}
repository: ${{ secrets.AUTOSYNC_CH_URL }}
event-type: ${{ secrets.AUTOSYNC_SC_EVENT_TYPE }}
client-payload: >-
{
"ref": "${{ github.ref }}",
"sha": "${{ github.sha }}",
"commitMessage": "${{ github.event.commits[0].message }}"
}

View File

@ -138,6 +138,7 @@ Flowise 支持不同的环境变量来配置您的实例。您可以在 `package
| DATABASE_NAME | 数据库名称(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
| SECRETKEY_PATH | 保存加密密钥(用于加密/解密凭据)的位置 | 字符串 | `your-path/Flowise/packages/server` |
| FLOWISE_SECRETKEY_OVERWRITE | 加密密钥用于替代存储在 SECRETKEY_PATH 中的密钥 | 字符串 |
| DISABLE_FLOWISE_TELEMETRY | 关闭遥测 | 字符串 |
您也可以在使用 `npx` 时指定环境变量。例如:

View File

@ -123,6 +123,8 @@ Flowise support different environment variables to configure your instance. You
| Variable | Description | Type | Default |
| --------------------------- | ---------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- |
| PORT | The HTTP port Flowise runs on | Number | 3000 |
| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | |
| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | |
| FLOWISE_USERNAME | Username to login | String | |
| FLOWISE_PASSWORD | Password to login | String | |
| DEBUG | Print logs from components | Boolean | |
@ -138,9 +140,11 @@ Flowise support different environment variables to configure your instance. You
| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_SSL_KEY_BASE64 | Database SSL client cert in base64 (takes priority over DATABASE_SSL) | Boolean | false |
| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false |
| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` |
| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String |
| DISABLE_FLOWISE_TELEMETRY | Turn off telemetry | Boolean |
You can also specify the env variables when using `npx`. For example:

View File

@ -33,4 +33,4 @@ scenarios:
# Seconds
# Total Users = 2 + 3 + 3 = 8
# Each making 1 HTTP call
# Over a duration of 3 seconds
# Over a durations of 3 seconds

View File

@ -4,6 +4,9 @@ APIKEY_PATH=/root/.flowise
SECRETKEY_PATH=/root/.flowise
LOG_PATH=/root/.flowise/logs
# CORS_ORIGINS="*"
# IFRAME_ORIGINS="*"
# NUMBER_OF_PROXIES= 1
# DATABASE_TYPE=postgres
@ -13,6 +16,7 @@ LOG_PATH=/root/.flowise/logs
# DATABASE_USER=""
# DATABASE_PASSWORD=""
# DATABASE_SSL=true
# DATABASE_SSL_KEY_BASE64=<Self signed certificate in BASE64>
# FLOWISE_USERNAME=user
# FLOWISE_PASSWORD=1234
@ -25,4 +29,6 @@ LOG_PATH=/root/.flowise/logs
# LANGCHAIN_TRACING_V2=true
# LANGCHAIN_ENDPOINT=https://api.smith.langchain.com
# LANGCHAIN_API_KEY=your_api_key
# LANGCHAIN_PROJECT=your_project
# LANGCHAIN_PROJECT=your_project
# DISABLE_FLOWISE_TELEMETRY=true

View File

@ -6,6 +6,8 @@ services:
restart: always
environment:
- PORT=${PORT}
- CORS_ORIGINS=${CORS_ORIGINS}
- IFRAME_ORIGINS=${IFRAME_ORIGINS}
- FLOWISE_USERNAME=${FLOWISE_USERNAME}
- FLOWISE_PASSWORD=${FLOWISE_PASSWORD}
- DEBUG=${DEBUG}
@ -17,11 +19,13 @@ services:
- DATABASE_USER=${DATABASE_USER}
- DATABASE_PASSWORD=${DATABASE_PASSWORD}
- DATABASE_SSL=${DATABASE_SSL}
- DATABASE_SSL_KEY_BASE64=${DATABASE_SSL_KEY_BASE64}
- APIKEY_PATH=${APIKEY_PATH}
- SECRETKEY_PATH=${SECRETKEY_PATH}
- FLOWISE_SECRETKEY_OVERWRITE=${FLOWISE_SECRETKEY_OVERWRITE}
- LOG_LEVEL=${LOG_LEVEL}
- LOG_PATH=${LOG_PATH}
- DISABLE_FLOWISE_TELEMETRY=${DISABLE_FLOWISE_TELEMETRY}
ports:
- '${PORT}:${PORT}'
volumes:

View File

@ -1,6 +1,6 @@
{
"name": "flowise",
"version": "1.4.9",
"version": "1.4.12",
"private": true,
"homepage": "https://flowiseai.com",
"workspaces": [
@ -48,7 +48,7 @@
"pretty-quick": "^3.1.3",
"rimraf": "^3.0.2",
"run-script-os": "^1.1.6",
"turbo": "1.7.4",
"turbo": "^1.7.4",
"typescript": "^4.8.4"
},
"engines": {

View File

@ -16,11 +16,6 @@ class PineconeApi implements INodeCredential {
label: 'Pinecone Api Key',
name: 'pineconeApiKey',
type: 'password'
},
{
label: 'Pinecone Environment',
name: 'pineconeEnv',
type: 'string'
}
]
}

View File

@ -9,7 +9,7 @@ import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams }
import { AgentExecutor } from '../../../src/agents'
import { ChatConversationalAgent } from 'langchain/agents'
import { renderTemplate } from '@langchain/core/prompts'
import { injectChainNodeData } from '../../../src/MultiModalUtils'
import { injectChainNodeData } from '../../../src/multiModalUtils'
const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI.

View File

@ -5,7 +5,7 @@ import { Tool } from 'langchain/tools'
import { BaseLanguageModel } from 'langchain/base_language'
import { flatten } from 'lodash'
import { additionalCallbacks } from '../../../src/handler'
import { injectChainNodeData } from '../../../src/MultiModalUtils'
import { injectChainNodeData } from '../../../src/multiModalUtils'
class MRKLAgentChat_Agents implements INode {
label: string

View File

@ -64,7 +64,7 @@ class OpenAIFunctionAgent_Agents implements INode {
return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
const memory = nodeData.inputs?.memory as FlowiseMemory
const executor = prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
@ -72,12 +72,20 @@ class OpenAIFunctionAgent_Agents implements INode {
const callbacks = await additionalCallbacks(nodeData, options)
let res: ChainValues = {}
let sourceDocuments: ICommonObject[] = []
if (options.socketIO && options.socketIOClientId) {
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
if (res.sourceDocuments) {
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', flatten(res.sourceDocuments))
sourceDocuments = res.sourceDocuments
}
} else {
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
if (res.sourceDocuments) {
sourceDocuments = res.sourceDocuments
}
}
await memory.addChatMessages(
@ -94,7 +102,7 @@ class OpenAIFunctionAgent_Agents implements INode {
this.sessionId
)
return res?.output
return sourceDocuments.length ? { text: res?.output, sourceDocuments: flatten(sourceDocuments) } : res?.output
}
}

View File

@ -1,9 +1,46 @@
import { getBaseClasses, getCredentialData, getCredentialParam, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
import { RedisCache as LangchainRedisCache } from 'langchain/cache/ioredis'
import { Redis } from 'ioredis'
import { Redis, RedisOptions } from 'ioredis'
import { isEqual } from 'lodash'
import { Generation, ChatGeneration, StoredGeneration, mapStoredMessageToChatMessage } from 'langchain/schema'
import hash from 'object-hash'
let redisClientSingleton: Redis
let redisClientOption: RedisOptions
let redisClientUrl: string
const getRedisClientbyOption = (option: RedisOptions) => {
if (!redisClientSingleton) {
// if client doesn't exists
redisClientSingleton = new Redis(option)
redisClientOption = option
return redisClientSingleton
} else if (redisClientSingleton && !isEqual(option, redisClientOption)) {
// if client exists but option changed
redisClientSingleton.quit()
redisClientSingleton = new Redis(option)
redisClientOption = option
return redisClientSingleton
}
return redisClientSingleton
}
const getRedisClientbyUrl = (url: string) => {
if (!redisClientSingleton) {
// if client doesn't exists
redisClientSingleton = new Redis(url)
redisClientUrl = url
return redisClientSingleton
} else if (redisClientSingleton && url !== redisClientUrl) {
// if client exists but option changed
redisClientSingleton.quit()
redisClientSingleton = new Redis(url)
redisClientUrl = url
return redisClientSingleton
}
return redisClientSingleton
}
class RedisCache implements INode {
label: string
name: string
@ -60,7 +97,7 @@ class RedisCache implements INode {
const tlsOptions = sslEnabled === true ? { tls: { rejectUnauthorized: false } } : {}
client = new Redis({
client = getRedisClientbyOption({
port: portStr ? parseInt(portStr) : 6379,
host,
username,
@ -68,7 +105,7 @@ class RedisCache implements INode {
...tlsOptions
})
} else {
client = new Redis(redisUrl)
client = getRedisClientbyUrl(redisUrl)
}
const redisClient = new LangchainRedisCache(client)
@ -94,7 +131,7 @@ class RedisCache implements INode {
for (let i = 0; i < value.length; i += 1) {
const key = getCacheKey(prompt, llmKey, String(i))
if (ttl) {
await client.set(key, JSON.stringify(serializeGeneration(value[i])), 'EX', parseInt(ttl, 10))
await client.set(key, JSON.stringify(serializeGeneration(value[i])), 'PX', parseInt(ttl, 10))
} else {
await client.set(key, JSON.stringify(serializeGeneration(value[i])))
}

View File

@ -1,9 +1,46 @@
import { getBaseClasses, getCredentialData, getCredentialParam, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
import { Redis } from 'ioredis'
import { Redis, RedisOptions } from 'ioredis'
import { isEqual } from 'lodash'
import { CacheBackedEmbeddings } from 'langchain/embeddings/cache_backed'
import { RedisByteStore } from 'langchain/storage/ioredis'
import { Embeddings } from 'langchain/embeddings/base'
let redisClientSingleton: Redis
let redisClientOption: RedisOptions
let redisClientUrl: string
const getRedisClientbyOption = (option: RedisOptions) => {
if (!redisClientSingleton) {
// if client doesn't exists
redisClientSingleton = new Redis(option)
redisClientOption = option
return redisClientSingleton
} else if (redisClientSingleton && !isEqual(option, redisClientOption)) {
// if client exists but option changed
redisClientSingleton.quit()
redisClientSingleton = new Redis(option)
redisClientOption = option
return redisClientSingleton
}
return redisClientSingleton
}
const getRedisClientbyUrl = (url: string) => {
if (!redisClientSingleton) {
// if client doesn't exists
redisClientSingleton = new Redis(url)
redisClientUrl = url
return redisClientSingleton
} else if (redisClientSingleton && url !== redisClientUrl) {
// if client exists but option changed
redisClientSingleton.quit()
redisClientSingleton = new Redis(url)
redisClientUrl = url
return redisClientSingleton
}
return redisClientSingleton
}
class RedisEmbeddingsCache implements INode {
label: string
name: string
@ -75,7 +112,7 @@ class RedisEmbeddingsCache implements INode {
const tlsOptions = sslEnabled === true ? { tls: { rejectUnauthorized: false } } : {}
client = new Redis({
client = getRedisClientbyOption({
port: portStr ? parseInt(portStr) : 6379,
host,
username,
@ -83,7 +120,7 @@ class RedisEmbeddingsCache implements INode {
...tlsOptions
})
} else {
client = new Redis(redisUrl)
client = getRedisClientbyUrl(redisUrl)
}
ttl ??= '3600'

View File

@ -1,14 +1,15 @@
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ConversationChain } from 'langchain/chains'
import { getBaseClasses } from '../../../src/utils'
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts'
import { BaseChatModel } from 'langchain/chat_models/base'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { flatten } from 'lodash'
import { Document } from 'langchain/document'
import { RunnableSequence } from 'langchain/schema/runnable'
import { StringOutputParser } from 'langchain/schema/output_parser'
import { injectChainNodeData } from '../../../src/MultiModalUtils'
import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
import { injectChainNodeData } from '../../../src/multiModalUtils'
let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.`
const inputKey = 'input'
@ -28,7 +29,7 @@ class ConversationChain_Chains implements INode {
constructor(fields?: { sessionId?: string }) {
this.label = 'Conversation Chain'
this.name = 'conversationChain'
this.version = 1.0
this.version = 3.0
this.type = 'ConversationChain'
this.icon = 'conv.svg'
this.category = 'Chains'
@ -45,6 +46,14 @@ class ConversationChain_Chains implements INode {
name: 'memory',
type: 'BaseMemory'
},
{
label: 'Chat Prompt Template',
name: 'chatPromptTemplate',
type: 'ChatPromptTemplate',
description: 'Override existing prompt with Chat Prompt Template. Human Message must includes {input} variable',
optional: true
},
/* Deprecated
{
label: 'Document',
name: 'document',
@ -53,15 +62,25 @@ class ConversationChain_Chains implements INode {
'Include whole document into the context window, if you get maximum context length error, please use model with higher context window like Claude 100k, or gpt4 32k',
optional: true,
list: true
},*/
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
},
{
label: 'System Message',
name: 'systemMessagePrompt',
type: 'string',
rows: 4,
description: 'If Chat Prompt Template is provided, this will be ignored',
additionalParams: true,
optional: true,
placeholder: 'You are a helpful assistant that write codes'
default: systemMessage,
placeholder: systemMessage
}
]
this.sessionId = fields?.sessionId
@ -72,22 +91,40 @@ class ConversationChain_Chains implements INode {
return chain
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const memory = nodeData.inputs?.memory
injectChainNodeData(nodeData, options)
const chain = prepareChain(nodeData, options, this.sessionId)
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the LLM chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
return formatResponse(e.message)
}
}
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)
const additionalCallback = await additionalCallbacks(nodeData, options)
let res = ''
let callbacks = [loggerHandler, ...additionalCallback]
if (process.env.DEBUG === 'true') {
callbacks.push(new LCConsoleCallbackHandler())
}
if (options.socketIO && options.socketIOClientId) {
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
res = await chain.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
callbacks.push(handler)
res = await chain.invoke({ input }, { callbacks })
} else {
res = await chain.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
res = await chain.invoke({ input }, { callbacks })
}
await memory.addChatMessages(
@ -108,36 +145,33 @@ class ConversationChain_Chains implements INode {
}
}
const prepareChatPrompt = (nodeData: INodeData, options: ICommonObject) => {
const prepareChatPrompt = (nodeData: INodeData) => {
const memory = nodeData.inputs?.memory as FlowiseMemory
const prompt = nodeData.inputs?.systemMessagePrompt as string
const docs = nodeData.inputs?.document as Document[]
const chatPromptTemplate = nodeData.inputs?.chatPromptTemplate as ChatPromptTemplate
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
finalDocs.push(new Document(flattenDocs[i]))
if (chatPromptTemplate && chatPromptTemplate.promptMessages.length) {
const sysPrompt = chatPromptTemplate.promptMessages[0]
const humanPrompt = chatPromptTemplate.promptMessages[chatPromptTemplate.promptMessages.length - 1]
const chatPrompt = ChatPromptTemplate.fromMessages([
sysPrompt,
new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'),
humanPrompt
])
if ((chatPromptTemplate as any).promptValues) {
// @ts-ignore
chatPrompt.promptValues = (chatPromptTemplate as any).promptValues
}
return chatPrompt
}
let finalText = ''
for (let i = 0; i < finalDocs.length; i += 1) {
finalText += finalDocs[i].pageContent
}
const replaceChar: string[] = ['{', '}']
for (const char of replaceChar) finalText = finalText.replaceAll(char, '')
if (finalText) systemMessage = `${systemMessage}\nThe AI has the following context:\n${finalText}`
//TODO, this should not be any[], what interface should it be?
let promptMessages: any[] = [
SystemMessagePromptTemplate.fromTemplate(prompt ? `${prompt}\n${systemMessage}` : systemMessage),
const chatPrompt = ChatPromptTemplate.fromMessages([
SystemMessagePromptTemplate.fromTemplate(prompt ? prompt : systemMessage),
new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'),
HumanMessagePromptTemplate.fromTemplate(`{${inputKey}}`)
]
const chatPrompt = ChatPromptTemplate.fromMessages(promptMessages)
])
return chatPrompt
}
@ -148,15 +182,31 @@ const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: s
const memory = nodeData.inputs?.memory as FlowiseMemory
const memoryKey = memory.memoryKey ?? 'chat_history'
const chatPrompt = prepareChatPrompt(nodeData)
let promptVariables = {}
const promptValuesRaw = (chatPrompt as any).promptValues
if (promptValuesRaw) {
const promptValues = handleEscapeCharacters(promptValuesRaw, true)
for (const val in promptValues) {
promptVariables = {
...promptVariables,
[val]: () => {
return promptValues[val]
}
}
}
}
const conversationChain = RunnableSequence.from([
{
[inputKey]: (input: { input: string }) => input.input,
[memoryKey]: async () => {
const history = await memory.getChatMessages(sessionId, true, chatHistory)
return history
}
},
...promptVariables
},
prepareChatPrompt(nodeData, options),
prepareChatPrompt(nodeData),
model,
new StringOutputParser()
])

View File

@ -13,6 +13,7 @@ import { applyPatch } from 'fast-json-patch'
import { convertBaseMessagetoIMessage, getBaseClasses } from '../../../src/utils'
import { ConsoleCallbackHandler, additionalCallbacks } from '../../../src/handler'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, MemoryMethods } from '../../../src/Interface'
import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console'
type RetrievalChainInput = {
chat_history: string
@ -176,11 +177,17 @@ class ConversationalRetrievalQAChain_Chains implements INode {
const history = ((await memory.getChatMessages(this.sessionId, false, options.chatHistory)) as IMessage[]) ?? []
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)
const additionalCallback = await additionalCallbacks(nodeData, options)
let callbacks = [loggerHandler, ...additionalCallback]
if (process.env.DEBUG === 'true') {
callbacks.push(new LCConsoleCallbackHandler())
}
const stream = answerChain.streamLog(
{ question: input, chat_history: history },
{ callbacks: [loggerHandler, ...callbacks] },
{ callbacks },
{
includeNames: [sourceRunnableName]
}

View File

@ -8,7 +8,7 @@ import { formatResponse, injectOutputParser } from '../../outputparsers/OutputPa
import { BaseLLMOutputParser } from 'langchain/schema/output_parser'
import { OutputFixingParser } from 'langchain/output_parsers'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { injectChainNodeData } from '../../../src/MultiModalUtils'
import { injectChainNodeData } from '../../../src/multiModalUtils'
class LLMChain_Chains implements INode {
label: string
@ -83,7 +83,7 @@ class LLMChain_Chains implements INode {
const model = nodeData.inputs?.model as BaseLanguageModel
const prompt = nodeData.inputs?.prompt
const output = nodeData.outputs?.output as string
const promptValues = prompt.promptValues as ICommonObject
let promptValues: ICommonObject | undefined = nodeData.inputs?.prompt.promptValues as ICommonObject
const llmOutputParser = nodeData.inputs?.outputParser as BaseOutputParser
this.outputParser = llmOutputParser
if (llmOutputParser) {
@ -108,17 +108,25 @@ class LLMChain_Chains implements INode {
verbose: process.env.DEBUG === 'true'
})
const inputVariables = chain.prompt.inputVariables as string[] // ["product"]
injectChainNodeData(nodeData, options)
promptValues = injectOutputParser(this.outputParser, chain, promptValues)
const res = await runPrediction(inputVariables, chain, input, promptValues, options, nodeData)
// eslint-disable-next-line no-console
console.log('\x1b[92m\x1b[1m\n*****OUTPUT PREDICTION*****\n\x1b[0m\x1b[0m')
// eslint-disable-next-line no-console
console.log(res)
let finalRes = res
if (this.outputParser && typeof res === 'object' && Object.prototype.hasOwnProperty.call(res, 'json')) {
finalRes = (res as ICommonObject).json
}
/**
* Apply string transformation to convert special chars:
* FROM: hello i am ben\n\n\thow are you?
* TO: hello i am benFLOWISE_NEWLINEFLOWISE_NEWLINEFLOWISE_TABhow are you?
*/
return handleEscapeCharacters(res, false)
return handleEscapeCharacters(finalRes, false)
}
}

View File

@ -1,340 +0,0 @@
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam, handleEscapeCharacters } from '../../../src/utils'
import { OpenAIMultiModalChainInput, VLLMChain } from './VLLMChain'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
class OpenAIMultiModalChain_Chains implements INode {
label: string
name: string
version: number
type: string
icon: string
badge: string
category: string
baseClasses: string[]
description: string
inputs: INodeParams[]
outputs: INodeOutputsValue[]
credential: INodeParams
constructor() {
this.label = 'Open AI MultiModal Chain'
this.name = 'openAIMultiModalChain'
this.version = 1.0
this.type = 'OpenAIMultiModalChain'
this.icon = 'chain.svg'
this.category = 'Chains'
this.badge = 'BETA'
this.description = 'Chain to query against Image and Audio Input.'
this.baseClasses = [this.type, ...getBaseClasses(VLLMChain)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['openAIApi']
}
this.inputs = [
{
label: 'Prompt',
name: 'prompt',
type: 'BasePromptTemplate',
optional: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
},
{
label: 'Model Name',
name: 'modelName',
type: 'options',
options: [
{
label: 'gpt-4-vision-preview',
name: 'gpt-4-vision-preview'
}
],
default: 'gpt-4-vision-preview'
},
{
label: 'Speech to Text',
name: 'speechToText',
type: 'boolean',
optional: true
},
// TODO: only show when speechToText is true
{
label: 'Speech to Text Method',
description: 'How to turn audio into text',
name: 'speechToTextMode',
type: 'options',
options: [
{
label: 'Transcriptions',
name: 'transcriptions',
description:
'Transcribe audio into whatever language the audio is in. Default method when Speech to Text is turned on.'
},
{
label: 'Translations',
name: 'translations',
description: 'Translate and transcribe the audio into english.'
}
],
optional: false,
default: 'transcriptions',
additionalParams: true
},
{
label: 'Image Resolution',
description: 'This parameter controls the resolution in which the model views the image.',
name: 'imageResolution',
type: 'options',
options: [
{
label: 'Low',
name: 'low'
},
{
label: 'High',
name: 'high'
},
{
label: 'Auto',
name: 'auto'
}
],
default: 'low',
optional: false,
additionalParams: true
},
{
label: 'Temperature',
name: 'temperature',
type: 'number',
step: 0.1,
default: 0.9,
optional: true,
additionalParams: true
},
{
label: 'Top Probability',
name: 'topP',
type: 'number',
step: 0.1,
optional: true,
additionalParams: true
},
{
label: 'Max Tokens',
name: 'maxTokens',
type: 'number',
step: 1,
optional: true,
additionalParams: true
},
{
label: 'Accepted Upload Types',
name: 'allowedUploadTypes',
type: 'string',
default: 'image/gif;image/jpeg;image/png;image/webp;audio/mpeg;audio/x-wav;audio/mp4',
hidden: true
},
{
label: 'Maximum Upload Size (MB)',
name: 'maxUploadSize',
type: 'number',
default: '5',
hidden: true
}
]
this.outputs = [
{
label: 'Open AI MultiModal Chain',
name: 'openAIMultiModalChain',
baseClasses: [this.type, ...getBaseClasses(VLLMChain)]
},
{
label: 'Output Prediction',
name: 'outputPrediction',
baseClasses: ['string', 'json']
}
]
}
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
const prompt = nodeData.inputs?.prompt
const output = nodeData.outputs?.output as string
const imageResolution = nodeData.inputs?.imageResolution
const promptValues = prompt.promptValues as ICommonObject
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
const temperature = nodeData.inputs?.temperature as string
const modelName = nodeData.inputs?.modelName as string
const maxTokens = nodeData.inputs?.maxTokens as string
const topP = nodeData.inputs?.topP as string
const speechToText = nodeData.inputs?.speechToText as boolean
const fields: OpenAIMultiModalChainInput = {
openAIApiKey: openAIApiKey,
imageResolution: imageResolution,
verbose: process.env.DEBUG === 'true',
uploads: options.uploads,
modelName: modelName
}
if (temperature) fields.temperature = parseFloat(temperature)
if (maxTokens) fields.maxTokens = parseInt(maxTokens, 10)
if (topP) fields.topP = parseFloat(topP)
if (speechToText) {
const speechToTextMode = nodeData.inputs?.speechToTextMode ?? 'transcriptions'
if (speechToTextMode) fields.speechToTextMode = speechToTextMode
}
if (output === this.name) {
const chain = new VLLMChain({
...fields,
prompt: prompt
})
return chain
} else if (output === 'outputPrediction') {
const chain = new VLLMChain({
...fields
})
const inputVariables: string[] = prompt.inputVariables as string[] // ["product"]
const res = await runPrediction(inputVariables, chain, input, promptValues, options, nodeData)
// eslint-disable-next-line no-console
console.log('\x1b[92m\x1b[1m\n*****OUTPUT PREDICTION*****\n\x1b[0m\x1b[0m')
// eslint-disable-next-line no-console
console.log(res)
/**
* Apply string transformation to convert special chars:
* FROM: hello i am ben\n\n\thow are you?
* TO: hello i am benFLOWISE_NEWLINEFLOWISE_NEWLINEFLOWISE_TABhow are you?
*/
return handleEscapeCharacters(res, false)
}
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const prompt = nodeData.inputs?.prompt
const inputVariables: string[] = prompt.inputVariables as string[] // ["product"]
const chain = nodeData.instance as VLLMChain
let promptValues: ICommonObject | undefined = nodeData.inputs?.prompt.promptValues as ICommonObject
const res = await runPrediction(inputVariables, chain, input, promptValues, options, nodeData)
// eslint-disable-next-line no-console
console.log('\x1b[93m\x1b[1m\n*****FINAL RESULT*****\n\x1b[0m\x1b[0m')
// eslint-disable-next-line no-console
console.log(res)
return res
}
}
const runPrediction = async (
inputVariables: string[],
chain: VLLMChain,
input: string,
promptValuesRaw: ICommonObject | undefined,
options: ICommonObject,
nodeData: INodeData
) => {
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)
const isStreaming = options.socketIO && options.socketIOClientId
const socketIO = isStreaming ? options.socketIO : undefined
const socketIOClientId = isStreaming ? options.socketIOClientId : ''
const moderations = nodeData.inputs?.inputModeration as Moderation[]
const speechToText = nodeData.inputs?.speechToText as boolean
if (options?.uploads) {
if (options.uploads.length === 1 && input.length === 0) {
if (speechToText) {
//special case, text input is empty, but we have an upload (recorded audio)
const convertedText = await chain.processAudioWithWisper(options.uploads[0], undefined)
//so we use the upload as input
input = convertedText
}
// do not send the audio file to the model
} else {
chain.uploads = options.uploads
}
}
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the LLM chain
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
streamResponse(isStreaming, e.message, socketIO, socketIOClientId)
return formatResponse(e.message)
}
}
/**
* Apply string transformation to reverse converted special chars:
* FROM: { "value": "hello i am benFLOWISE_NEWLINEFLOWISE_NEWLINEFLOWISE_TABhow are you?" }
* TO: { "value": "hello i am ben\n\n\thow are you?" }
*/
const promptValues = handleEscapeCharacters(promptValuesRaw, true)
if (promptValues && inputVariables.length > 0) {
let seen: string[] = []
for (const variable of inputVariables) {
seen.push(variable)
if (promptValues[variable]) {
chain.inputKey = variable
seen.pop()
}
}
if (seen.length === 0) {
// All inputVariables have fixed values specified
const options = { ...promptValues }
if (isStreaming) {
const handler = new CustomChainHandler(socketIO, socketIOClientId)
const res = await chain.call(options, [loggerHandler, handler, ...callbacks])
return formatResponse(res?.text)
} else {
const res = await chain.call(options, [loggerHandler, ...callbacks])
return formatResponse(res?.text)
}
} else if (seen.length === 1) {
// If one inputVariable is not specify, use input (user's question) as value
const lastValue = seen.pop()
if (!lastValue) throw new Error('Please provide Prompt Values')
chain.inputKey = lastValue as string
const options = {
...promptValues,
[lastValue]: input
}
if (isStreaming) {
const handler = new CustomChainHandler(socketIO, socketIOClientId)
const res = await chain.call(options, [loggerHandler, handler, ...callbacks])
return formatResponse(res?.text)
} else {
const res = await chain.call(options, [loggerHandler, ...callbacks])
return formatResponse(res?.text)
}
} else {
throw new Error(`Please provide Prompt Values for: ${seen.join(', ')}`)
}
} else {
if (isStreaming) {
const handler = new CustomChainHandler(socketIO, socketIOClientId)
const res = await chain.run(input, [loggerHandler, handler, ...callbacks])
return formatResponse(res)
} else {
const res = await chain.run(input, [loggerHandler, ...callbacks])
return formatResponse(res)
}
}
}
module.exports = { nodeClass: OpenAIMultiModalChain_Chains }

View File

@ -1,216 +0,0 @@
import { OpenAI as OpenAIClient, ClientOptions, OpenAI } from 'openai'
import { BaseChain, ChainInputs } from 'langchain/chains'
import { ChainValues } from 'langchain/schema'
import { BasePromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate } from 'langchain/prompts'
import path from 'path'
import { getUserHome } from '../../../src/utils'
import fs from 'fs'
import { ChatCompletionContentPart, ChatCompletionMessageParam } from 'openai/src/resources/chat/completions'
import ChatCompletionCreateParamsNonStreaming = OpenAI.ChatCompletionCreateParamsNonStreaming
import { IFileUpload } from '../../../src'
/**
* Interface for the input parameters of the OpenAIVisionChain class.
*/
export interface OpenAIMultiModalChainInput extends ChainInputs {
openAIApiKey?: string
openAIOrganization?: string
throwError?: boolean
prompt?: BasePromptTemplate
configuration?: ClientOptions
uploads?: IFileUpload[]
imageResolution?: 'auto' | 'low' | 'high'
temperature?: number
modelName?: string
maxTokens?: number
topP?: number
speechToTextMode?: string
}
/**
* Class representing a chain for generating text from an image using the OpenAI
* Vision API. It extends the BaseChain class and implements the
* OpenAIVisionChainInput interface.
*/
export class VLLMChain extends BaseChain implements OpenAIMultiModalChainInput {
static lc_name() {
return 'VLLMChain'
}
prompt: BasePromptTemplate | undefined
inputKey = 'input'
outputKey = 'text'
uploads?: IFileUpload[]
imageResolution: 'auto' | 'low' | 'high'
openAIApiKey?: string
openAIOrganization?: string
clientConfig: ClientOptions
client: OpenAIClient
throwError: boolean
temperature?: number
modelName?: string
maxTokens?: number
topP?: number
speechToTextMode?: any
constructor(fields: OpenAIMultiModalChainInput) {
super(fields)
this.throwError = fields?.throwError ?? false
this.imageResolution = fields?.imageResolution ?? 'low'
this.openAIApiKey = fields?.openAIApiKey
this.prompt = fields?.prompt
this.temperature = fields?.temperature
this.modelName = fields?.modelName
this.maxTokens = fields?.maxTokens
this.topP = fields?.topP
this.uploads = fields?.uploads ?? []
this.speechToTextMode = fields?.speechToTextMode ?? {}
if (!this.openAIApiKey) {
throw new Error('OpenAI API key not found')
}
this.openAIOrganization = fields?.openAIOrganization
this.clientConfig = {
...fields?.configuration,
apiKey: this.openAIApiKey,
organization: this.openAIOrganization
}
this.client = new OpenAIClient(this.clientConfig)
}
async _call(values: ChainValues): Promise<ChainValues> {
const userInput = values[this.inputKey]
const vRequest: ChatCompletionCreateParamsNonStreaming = {
model: 'gpt-4-vision-preview',
temperature: this.temperature,
top_p: this.topP,
messages: []
}
if (this.maxTokens) vRequest.max_tokens = this.maxTokens
else vRequest.max_tokens = 1024
const chatMessages: ChatCompletionContentPart[] = []
const userRole: ChatCompletionMessageParam = { role: 'user', content: [] }
chatMessages.push({
type: 'text',
text: userInput
})
if (this.speechToTextMode && this.uploads && this.uploads.length > 0) {
const audioUploads = this.getAudioUploads(this.uploads)
for (const upload of audioUploads) {
await this.processAudioWithWisper(upload, chatMessages)
}
}
if (this.uploads && this.uploads.length > 0) {
const imageUploads = this.getImageUploads(this.uploads)
for (const upload of imageUploads) {
let bf = upload.data
if (upload.type == 'stored-file') {
const filePath = path.join(getUserHome(), '.flowise', 'gptvision', upload.data, upload.name)
// as the image is stored in the server, read the file and convert it to base64
const contents = fs.readFileSync(filePath)
bf = 'data:' + upload.mime + ';base64,' + contents.toString('base64')
}
chatMessages.push({
type: 'image_url',
image_url: {
url: bf,
detail: this.imageResolution
}
})
}
}
userRole.content = chatMessages
vRequest.messages.push(userRole)
if (this.prompt && this.prompt instanceof ChatPromptTemplate) {
let chatPrompt = this.prompt as ChatPromptTemplate
chatPrompt.promptMessages.forEach((message: any) => {
if (message instanceof SystemMessagePromptTemplate) {
vRequest.messages.push({
role: 'system',
content: (message.prompt as any).template
})
} else if (message instanceof HumanMessagePromptTemplate) {
vRequest.messages.push({
role: 'user',
content: (message.prompt as any).template
})
}
})
}
let response
try {
response = await this.client.chat.completions.create(vRequest)
} catch (error) {
if (error instanceof Error) {
throw error
} else {
throw new Error(error as string)
}
}
const output = response.choices[0]
return {
[this.outputKey]: output.message.content
}
}
public async processAudioWithWisper(upload: IFileUpload, chatMessages: ChatCompletionContentPart[] | undefined): Promise<string> {
const filePath = path.join(getUserHome(), '.flowise', 'gptvision', upload.data, upload.name)
// as the image is stored in the server, read the file and convert it to base64
const audio_file = fs.createReadStream(filePath)
if (this.speechToTextMode === 'transcriptions') {
const transcription = await this.client.audio.transcriptions.create({
file: audio_file,
model: 'whisper-1'
})
if (chatMessages) {
chatMessages.push({
type: 'text',
text: transcription.text
})
}
return transcription.text
} else if (this.speechToTextMode === 'translations') {
const translation = await this.client.audio.translations.create({
file: audio_file,
model: 'whisper-1'
})
if (chatMessages) {
chatMessages.push({
type: 'text',
text: translation.text
})
}
return translation.text
}
//should never get here
return ''
}
getAudioUploads = (urls: any[]) => {
return urls.filter((url: any) => url.mime.startsWith('audio/'))
}
getImageUploads = (urls: any[]) => {
return urls.filter((url: any) => url.mime.startsWith('image/'))
}
_chainType() {
return 'vision_chain'
}
get inputKeys() {
return this.prompt?.inputVariables ?? [this.inputKey]
}
get outputKeys(): string[] {
return [this.outputKey]
}
}

View File

@ -1,6 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-dna" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
<path stroke="none" d="M0 0h24v24H0z" fill="none"></path>
<path d="M14.828 14.828a4 4 0 1 0 -5.656 -5.656a4 4 0 0 0 5.656 5.656z"></path>
<path d="M9.172 20.485a4 4 0 1 0 -5.657 -5.657"></path>
<path d="M14.828 3.515a4 4 0 0 0 5.657 5.657"></path>
</svg>

Before

Width:  |  Height:  |  Size: 489 B

View File

@ -1,7 +1,9 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { convertMultiOptionsToStringArray, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { BaseCache } from 'langchain/schema'
import { ChatGoogleGenerativeAI } from '@langchain/google-genai'
import { ChatGoogleGenerativeAI, GoogleGenerativeAIChatInput } from '@langchain/google-genai'
import { HarmBlockThreshold, HarmCategory } from '@google/generative-ai'
import type { SafetySetting } from '@google/generative-ai'
class GoogleGenerativeAI_ChatModels implements INode {
label: string
@ -74,6 +76,73 @@ class GoogleGenerativeAI_ChatModels implements INode {
step: 0.1,
optional: true,
additionalParams: true
},
{
label: 'Top Next Highest Probability Tokens',
name: 'topK',
type: 'number',
description: `Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive`,
step: 1,
optional: true,
additionalParams: true
},
{
label: 'Harm Category',
name: 'harmCategory',
type: 'multiOptions',
description:
'Refer to <a target="_blank" href="https://cloud.google.com/vertex-ai/docs/generative-ai/multimodal/configure-safety-attributes#safety_attribute_definitions">official guide</a> on how to use Harm Category',
options: [
{
label: 'Dangerous',
name: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT
},
{
label: 'Harassment',
name: HarmCategory.HARM_CATEGORY_HARASSMENT
},
{
label: 'Hate Speech',
name: HarmCategory.HARM_CATEGORY_HATE_SPEECH
},
{
label: 'Sexually Explicit',
name: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT
}
],
optional: true,
additionalParams: true
},
{
label: 'Harm Block Threshold',
name: 'harmBlockThreshold',
type: 'multiOptions',
description:
'Refer to <a target="_blank" href="https://cloud.google.com/vertex-ai/docs/generative-ai/multimodal/configure-safety-attributes#safety_setting_thresholds">official guide</a> on how to use Harm Block Threshold',
options: [
{
label: 'Low and Above',
name: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
},
{
label: 'Medium and Above',
name: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE
},
{
label: 'None',
name: HarmBlockThreshold.BLOCK_NONE
},
{
label: 'Only High',
name: HarmBlockThreshold.BLOCK_ONLY_HIGH
},
{
label: 'Threshold Unspecified',
name: HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED
}
],
optional: true,
additionalParams: true
}
]
}
@ -86,9 +155,12 @@ class GoogleGenerativeAI_ChatModels implements INode {
const modelName = nodeData.inputs?.modelName as string
const maxOutputTokens = nodeData.inputs?.maxOutputTokens as string
const topP = nodeData.inputs?.topP as string
const topK = nodeData.inputs?.topK as string
const harmCategory = nodeData.inputs?.harmCategory as string
const harmBlockThreshold = nodeData.inputs?.harmBlockThreshold as string
const cache = nodeData.inputs?.cache as BaseCache
const obj = {
const obj: Partial<GoogleGenerativeAIChatInput> = {
apiKey: apiKey,
modelName: modelName,
maxOutputTokens: 2048
@ -98,8 +170,23 @@ class GoogleGenerativeAI_ChatModels implements INode {
const model = new ChatGoogleGenerativeAI(obj)
if (topP) model.topP = parseFloat(topP)
if (topK) model.topK = parseFloat(topK)
if (cache) model.cache = cache
if (temperature) model.temperature = parseFloat(temperature)
// Safety Settings
let harmCategories: string[] = convertMultiOptionsToStringArray(harmCategory)
let harmBlockThresholds: string[] = convertMultiOptionsToStringArray(harmBlockThreshold)
if (harmCategories.length != harmBlockThresholds.length)
throw new Error(`Harm Category & Harm Block Threshold are not the same length`)
const safetySettings: SafetySetting[] = harmCategories.map((harmCategory, index) => {
return {
category: harmCategory as HarmCategory,
threshold: harmBlockThresholds[index] as HarmBlockThreshold
}
})
if (safetySettings.length > 0) model.safetySettings = safetySettings
return model
}
}

View File

@ -1,9 +1,9 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ChatOpenAI, OpenAIChatInput } from 'langchain/chat_models/openai'
import { ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput } from 'langchain/chat_models/openai'
import { BaseCache } from 'langchain/schema'
import { BaseLLMParams } from 'langchain/llms/base'
import { FlowiseChatOpenAI } from './FlowiseChatOpenAI'
import { ChatOpenAI } from './FlowiseChatOpenAI'
class ChatOpenAI_ChatModels implements INode {
label: string
@ -20,12 +20,12 @@ class ChatOpenAI_ChatModels implements INode {
constructor() {
this.label = 'ChatOpenAI'
this.name = 'chatOpenAI'
this.version = 3.0
this.version = 4.0
this.type = 'ChatOpenAI'
this.icon = 'openai.svg'
this.category = 'Chat Models'
this.description = 'Wrapper around OpenAI large language models that use the Chat endpoint'
this.baseClasses = [this.type, ...getBaseClasses(ChatOpenAI)]
this.baseClasses = [this.type, ...getBaseClasses(LangchainChatOpenAI)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
@ -48,6 +48,14 @@ class ChatOpenAI_ChatModels implements INode {
label: 'gpt-4',
name: 'gpt-4'
},
{
label: 'gpt-4-turbo-preview',
name: 'gpt-4-turbo-preview'
},
{
label: 'gpt-4-0125-preview',
name: 'gpt-4-0125-preview'
},
{
label: 'gpt-4-1106-preview',
name: 'gpt-4-1106-preview'
@ -72,6 +80,10 @@ class ChatOpenAI_ChatModels implements INode {
label: 'gpt-3.5-turbo',
name: 'gpt-3.5-turbo'
},
{
label: 'gpt-3.5-turbo-0125',
name: 'gpt-3.5-turbo-0125'
},
{
label: 'gpt-3.5-turbo-1106',
name: 'gpt-3.5-turbo-1106'
@ -158,7 +170,7 @@ class ChatOpenAI_ChatModels implements INode {
label: 'Allow Image Uploads',
name: 'allowImageUploads',
type: 'boolean',
description: 'Enabling this option, would default the model to gpt-4-vision-preview',
description: 'Automatically uses gpt-4-vision-preview when image is being uploaded from chat',
default: false,
optional: true
},
@ -231,16 +243,19 @@ class ChatOpenAI_ChatModels implements INode {
throw new Error("Invalid JSON in the ChatOpenAI's BaseOptions: " + exception)
}
}
const model = new FlowiseChatOpenAI(obj, {
const model = new ChatOpenAI(obj, {
baseURL: basePath,
baseOptions: parsedBaseOptions
})
const multiModal = {
allowImageUploads: allowImageUploads ?? false,
imageResolution
const multiModalOption: IMultiModalOption = {
image: {
allowImageUploads: allowImageUploads ?? false,
imageResolution
}
}
model.multiModal = multiModal
model.multiModalOption = multiModalOption
return model
}
}

View File

@ -1,4 +1,4 @@
import { ChatOpenAI, OpenAIChatInput } from 'langchain/chat_models/openai'
import { ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput } from 'langchain/chat_models/openai'
import { BaseChatModelParams } from 'langchain/chat_models/base'
import type { ClientOptions } from 'openai'
import type { LegacyOpenAIInput } from '@langchain/openai/dist/types'
@ -6,47 +6,59 @@ import { BaseLanguageModelInput } from 'langchain/base_language'
import { ChatOpenAICallOptions } from '@langchain/openai/dist/chat_models'
import { BaseMessageChunk, BaseMessageLike, HumanMessage, LLMResult } from 'langchain/schema'
import { Callbacks } from '@langchain/core/callbacks/manager'
import { ICommonObject, INodeData } from '../../../src'
import { addImagesToMessages } from '../../../src/MultiModalUtils'
import { ICommonObject, IMultiModalOption, INodeData } from '../../../src'
import { addImagesToMessages } from '../../../src/multiModalUtils'
export class FlowiseChatOpenAI extends ChatOpenAI {
multiModal: {}
export class ChatOpenAI extends LangchainChatOpenAI {
//TODO: Should be class variables and not static
public static chainNodeData: INodeData
public static chainNodeOptions: ICommonObject
configuredModel: string
configuredMaxToken?: number
multiModalOption?: IMultiModalOption
constructor(
fields?: Partial<OpenAIChatInput> & BaseChatModelParams & { openAIApiKey?: string },
fields?: Partial<OpenAIChatInput> & BaseChatModelParams & { openAIApiKey?: string; multiModalOption?: IMultiModalOption },
/** @deprecated */
configuration?: ClientOptions & LegacyOpenAIInput
) {
super(fields)
this.multiModalOption = fields?.multiModalOption
this.configuredModel = fields?.modelName ?? 'gpt-3.5-turbo'
this.configuredMaxToken = fields?.maxTokens
}
async invoke(input: BaseLanguageModelInput, options?: ChatOpenAICallOptions): Promise<BaseMessageChunk> {
//input.messages
return super.invoke(input, options)
}
async generate(messages: BaseMessageLike[][], options?: string[] | ChatOpenAICallOptions, callbacks?: Callbacks): Promise<LLMResult> {
//messages
await this.injectMultiModalMessages(messages)
return super.generate(messages, options, callbacks)
}
private async injectMultiModalMessages(messages: BaseMessageLike[][]) {
const nodeData = FlowiseChatOpenAI.chainNodeData
const optionsData = FlowiseChatOpenAI.chainNodeOptions
const messageContent = addImagesToMessages(nodeData, optionsData)
const nodeData = ChatOpenAI.chainNodeData
const optionsData = ChatOpenAI.chainNodeOptions
const messageContent = addImagesToMessages(nodeData, optionsData, this.multiModalOption)
if (messageContent?.length) {
if (messages[0].length > 0 && messages[0][messages[0].length - 1] instanceof HumanMessage) {
const lastMessage = messages[0].pop()
if (lastMessage instanceof HumanMessage) {
lastMessage.content = messageContent
// Change model to gpt-4-vision
this.modelName = 'gpt-4-vision-preview'
// Change default max token to higher when using gpt-4-vision
this.maxTokens = 1024
}
messages[0].push(lastMessage as HumanMessage)
}
} else {
// revert to previous values if image upload is empty
this.modelName = this.configuredModel
this.maxTokens = this.configuredMaxToken
}
}
}

View File

@ -20,7 +20,7 @@ class Airtable_DocumentLoaders implements INode {
constructor() {
this.label = 'Airtable'
this.name = 'airtable'
this.version = 2.0
this.version = 3.0
this.type = 'Document'
this.icon = 'airtable.svg'
this.category = 'Document Loaders'
@ -64,10 +64,21 @@ class Airtable_DocumentLoaders implements INode {
'If your view URL looks like: https://airtable.com/app11RobdGoX0YNsC/tblJdmvbrgizbYICO/viw9UrP77Id0CE4ee, viw9UrP77Id0CE4ee is the view id',
optional: true
},
{
label: 'Include Only Fields',
name: 'fields',
type: 'string',
placeholder: 'Name, Assignee, fld1u0qUz0SoOQ9Gg, fldew39v6LBN5CjUl',
optional: true,
additionalParams: true,
description:
'Comma-separated list of field names or IDs to include. If empty, then ALL fields are used. Use field IDs if field names contain commas.'
},
{
label: 'Return All',
name: 'returnAll',
type: 'boolean',
optional: true,
default: true,
additionalParams: true,
description: 'If all results should be returned or only up to a given limit'
@ -76,9 +87,10 @@ class Airtable_DocumentLoaders implements INode {
label: 'Limit',
name: 'limit',
type: 'number',
optional: true,
default: 100,
additionalParams: true,
description: 'Number of results to return'
description: 'Number of results to return. Ignored when Return All is enabled.'
},
{
label: 'Metadata',
@ -93,6 +105,8 @@ class Airtable_DocumentLoaders implements INode {
const baseId = nodeData.inputs?.baseId as string
const tableId = nodeData.inputs?.tableId as string
const viewId = nodeData.inputs?.viewId as string
const fieldsInput = nodeData.inputs?.fields as string
const fields = fieldsInput ? fieldsInput.split(',').map((field) => field.trim()) : []
const returnAll = nodeData.inputs?.returnAll as boolean
const limit = nodeData.inputs?.limit as string
const textSplitter = nodeData.inputs?.textSplitter as TextSplitter
@ -105,6 +119,7 @@ class Airtable_DocumentLoaders implements INode {
baseId,
tableId,
viewId,
fields,
returnAll,
accessToken,
limit: limit ? parseInt(limit, 10) : 100
@ -112,6 +127,10 @@ class Airtable_DocumentLoaders implements INode {
const loader = new AirtableLoader(airtableOptions)
if (!baseId || !tableId) {
throw new Error('Base ID and Table ID must be provided.')
}
let docs = []
if (textSplitter) {
@ -145,10 +164,18 @@ interface AirtableLoaderParams {
tableId: string
accessToken: string
viewId?: string
fields?: string[]
limit?: number
returnAll?: boolean
}
interface AirtableLoaderRequest {
maxRecords?: number
view: string | undefined
fields?: string[]
offset?: string
}
interface AirtableLoaderResponse {
records: AirtableLoaderPage[]
offset?: string
@ -167,17 +194,20 @@ class AirtableLoader extends BaseDocumentLoader {
public readonly viewId?: string
public readonly fields: string[]
public readonly accessToken: string
public readonly limit: number
public readonly returnAll: boolean
constructor({ baseId, tableId, viewId, accessToken, limit = 100, returnAll = false }: AirtableLoaderParams) {
constructor({ baseId, tableId, viewId, fields = [], accessToken, limit = 100, returnAll = false }: AirtableLoaderParams) {
super()
this.baseId = baseId
this.tableId = tableId
this.viewId = viewId
this.fields = fields
this.accessToken = accessToken
this.limit = limit
this.returnAll = returnAll
@ -190,17 +220,21 @@ class AirtableLoader extends BaseDocumentLoader {
return this.loadLimit()
}
protected async fetchAirtableData(url: string, params: ICommonObject): Promise<AirtableLoaderResponse> {
protected async fetchAirtableData(url: string, data: AirtableLoaderRequest): Promise<AirtableLoaderResponse> {
try {
const headers = {
Authorization: `Bearer ${this.accessToken}`,
'Content-Type': 'application/json',
Accept: 'application/json'
}
const response = await axios.get(url, { params, headers })
const response = await axios.post(url, data, { headers })
return response.data
} catch (error) {
throw new Error(`Failed to fetch ${url} from Airtable: ${error}`)
if (axios.isAxiosError(error)) {
throw new Error(`Failed to fetch ${url} from Airtable: ${error.message}, status: ${error.response?.status}`)
} else {
throw new Error(`Failed to fetch ${url} from Airtable: ${error}`)
}
}
}
@ -218,24 +252,53 @@ class AirtableLoader extends BaseDocumentLoader {
}
private async loadLimit(): Promise<Document[]> {
const params = { maxRecords: this.limit, view: this.viewId }
const data = await this.fetchAirtableData(`https://api.airtable.com/v0/${this.baseId}/${this.tableId}`, params)
if (data.records.length === 0) {
return []
let data: AirtableLoaderRequest = {
maxRecords: this.limit,
view: this.viewId
}
return data.records.map((page) => this.createDocumentFromPage(page))
if (this.fields.length > 0) {
data.fields = this.fields
}
let response: AirtableLoaderResponse
let returnPages: AirtableLoaderPage[] = []
// Paginate if the user specifies a limit > 100 (like 200) but not return all.
do {
response = await this.fetchAirtableData(`https://api.airtable.com/v0/${this.baseId}/${this.tableId}/listRecords`, data)
returnPages.push(...response.records)
data.offset = response.offset
// Stop if we have fetched enough records
if (returnPages.length >= this.limit) break
} while (response.offset !== undefined)
// Truncate array to the limit if necessary
if (returnPages.length > this.limit) {
returnPages.length = this.limit
}
return returnPages.map((page) => this.createDocumentFromPage(page))
}
private async loadAll(): Promise<Document[]> {
const params: ICommonObject = { pageSize: 100, view: this.viewId }
let data: AirtableLoaderResponse
let data: AirtableLoaderRequest = {
view: this.viewId
}
if (this.fields.length > 0) {
data.fields = this.fields
}
let response: AirtableLoaderResponse
let returnPages: AirtableLoaderPage[] = []
do {
data = await this.fetchAirtableData(`https://api.airtable.com/v0/${this.baseId}/${this.tableId}`, params)
returnPages.push.apply(returnPages, data.records)
params.offset = data.offset
} while (data.offset !== undefined)
response = await this.fetchAirtableData(`https://api.airtable.com/v0/${this.baseId}/${this.tableId}/listRecords`, data)
returnPages.push(...response.records)
data.offset = response.offset
} while (response.offset !== undefined)
return returnPages.map((page) => this.createDocumentFromPage(page))
}
}

View File

@ -1,4 +1,4 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { TextSplitter } from 'langchain/text_splitter'
import { CheerioWebBaseLoader, WebBaseLoaderParams } from 'langchain/document_loaders/web/cheerio'
import { test } from 'linkifyjs'
@ -63,6 +63,7 @@ class Cheerio_DocumentLoaders implements INode {
name: 'limit',
type: 'number',
optional: true,
default: '10',
additionalParams: true,
description:
'Only used when "Get Relative Links Method" is selected. Set 0 to retrieve all relative links, default limit is 10.',
@ -86,11 +87,12 @@ class Cheerio_DocumentLoaders implements INode {
]
}
async init(nodeData: INodeData): Promise<any> {
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const textSplitter = nodeData.inputs?.textSplitter as TextSplitter
const metadata = nodeData.inputs?.metadata
const relativeLinksMethod = nodeData.inputs?.relativeLinksMethod as string
let limit = nodeData.inputs?.limit as string
const selectedLinks = nodeData.inputs?.selectedLinks as string[]
let limit = parseInt(nodeData.inputs?.limit as string)
let url = nodeData.inputs?.url as string
url = url.trim()
@ -117,23 +119,33 @@ class Cheerio_DocumentLoaders implements INode {
}
return docs
} catch (err) {
if (process.env.DEBUG === 'true') console.error(`error in CheerioWebBaseLoader: ${err.message}, on page: ${url}`)
if (process.env.DEBUG === 'true') options.logger.error(`error in CheerioWebBaseLoader: ${err.message}, on page: ${url}`)
}
}
let docs = []
if (relativeLinksMethod) {
if (process.env.DEBUG === 'true') console.info(`Start ${relativeLinksMethod}`)
if (!limit) limit = '10'
else if (parseInt(limit) < 0) throw new Error('Limit cannot be less than 0')
if (process.env.DEBUG === 'true') options.logger.info(`Start ${relativeLinksMethod}`)
if (!limit) limit = 10
else if (limit < 0) throw new Error('Limit cannot be less than 0')
const pages: string[] =
relativeLinksMethod === 'webCrawl' ? await webCrawl(url, parseInt(limit)) : await xmlScrape(url, parseInt(limit))
if (process.env.DEBUG === 'true') console.info(`pages: ${JSON.stringify(pages)}, length: ${pages.length}`)
selectedLinks && selectedLinks.length > 0
? selectedLinks.slice(0, limit)
: relativeLinksMethod === 'webCrawl'
? await webCrawl(url, limit)
: await xmlScrape(url, limit)
if (process.env.DEBUG === 'true') options.logger.info(`pages: ${JSON.stringify(pages)}, length: ${pages.length}`)
if (!pages || pages.length === 0) throw new Error('No relative links found')
for (const page of pages) {
docs.push(...(await cheerioLoader(page)))
}
if (process.env.DEBUG === 'true') console.info(`Finish ${relativeLinksMethod}`)
if (process.env.DEBUG === 'true') options.logger.info(`Finish ${relativeLinksMethod}`)
} else if (selectedLinks && selectedLinks.length > 0) {
if (process.env.DEBUG === 'true')
options.logger.info(`pages: ${JSON.stringify(selectedLinks)}, length: ${selectedLinks.length}`)
for (const page of selectedLinks) {
docs.push(...(await cheerioLoader(page)))
}
} else {
docs = await cheerioLoader(url)
}

View File

@ -1,6 +1,7 @@
import { getCredentialData, getCredentialParam } from '../../../src'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { FigmaFileLoader, FigmaLoaderParams } from 'langchain/document_loaders/web/figma'
import { TextSplitter } from 'langchain/text_splitter'
class Figma_DocumentLoaders implements INode {
label: string
@ -71,6 +72,8 @@ class Figma_DocumentLoaders implements INode {
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const nodeIds = (nodeData.inputs?.nodeIds as string)?.trim().split(',') || []
const fileKey = nodeData.inputs?.fileKey as string
const textSplitter = nodeData.inputs?.textSplitter as TextSplitter
const metadata = nodeData.inputs?.metadata
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const accessToken = getCredentialParam('accessToken', credentialData, nodeData)
@ -82,7 +85,21 @@ class Figma_DocumentLoaders implements INode {
}
const loader = new FigmaFileLoader(figmaOptions)
const docs = await loader.load()
const docs = textSplitter ? await loader.loadAndSplit() : await loader.load()
if (metadata) {
const parsedMetadata = typeof metadata === 'object' ? metadata : JSON.parse(metadata)
return docs.map((doc) => {
return {
...doc,
metadata: {
...doc.metadata,
...parsedMetadata
}
}
})
}
return docs
}

View File

@ -1,4 +1,4 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { TextSplitter } from 'langchain/text_splitter'
import { Browser, Page, PlaywrightWebBaseLoader, PlaywrightWebBaseLoaderOptions } from 'langchain/document_loaders/web/playwright'
import { test } from 'linkifyjs'
@ -61,6 +61,7 @@ class Playwright_DocumentLoaders implements INode {
name: 'limit',
type: 'number',
optional: true,
default: '10',
additionalParams: true,
description:
'Only used when "Get Relative Links Method" is selected. Set 0 to retrieve all relative links, default limit is 10.',
@ -114,11 +115,12 @@ class Playwright_DocumentLoaders implements INode {
]
}
async init(nodeData: INodeData): Promise<any> {
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const textSplitter = nodeData.inputs?.textSplitter as TextSplitter
const metadata = nodeData.inputs?.metadata
const relativeLinksMethod = nodeData.inputs?.relativeLinksMethod as string
let limit = nodeData.inputs?.limit as string
const selectedLinks = nodeData.inputs?.selectedLinks as string[]
let limit = parseInt(nodeData.inputs?.limit as string)
let waitUntilGoToOption = nodeData.inputs?.waitUntilGoToOption as 'load' | 'domcontentloaded' | 'networkidle' | 'commit' | undefined
let waitForSelector = nodeData.inputs?.waitForSelector as string
@ -158,23 +160,33 @@ class Playwright_DocumentLoaders implements INode {
}
return docs
} catch (err) {
if (process.env.DEBUG === 'true') console.error(`error in PlaywrightWebBaseLoader: ${err.message}, on page: ${url}`)
if (process.env.DEBUG === 'true') options.logger.error(`error in PlaywrightWebBaseLoader: ${err.message}, on page: ${url}`)
}
}
let docs = []
if (relativeLinksMethod) {
if (process.env.DEBUG === 'true') console.info(`Start ${relativeLinksMethod}`)
if (!limit) limit = '10'
else if (parseInt(limit) < 0) throw new Error('Limit cannot be less than 0')
if (process.env.DEBUG === 'true') options.logger.info(`Start ${relativeLinksMethod}`)
if (!limit) limit = 10
else if (limit < 0) throw new Error('Limit cannot be less than 0')
const pages: string[] =
relativeLinksMethod === 'webCrawl' ? await webCrawl(url, parseInt(limit)) : await xmlScrape(url, parseInt(limit))
if (process.env.DEBUG === 'true') console.info(`pages: ${JSON.stringify(pages)}, length: ${pages.length}`)
selectedLinks && selectedLinks.length > 0
? selectedLinks.slice(0, limit)
: relativeLinksMethod === 'webCrawl'
? await webCrawl(url, limit)
: await xmlScrape(url, limit)
if (process.env.DEBUG === 'true') options.logger.info(`pages: ${JSON.stringify(pages)}, length: ${pages.length}`)
if (!pages || pages.length === 0) throw new Error('No relative links found')
for (const page of pages) {
docs.push(...(await playwrightLoader(page)))
}
if (process.env.DEBUG === 'true') console.info(`Finish ${relativeLinksMethod}`)
if (process.env.DEBUG === 'true') options.logger.info(`Finish ${relativeLinksMethod}`)
} else if (selectedLinks && selectedLinks.length > 0) {
if (process.env.DEBUG === 'true')
options.logger.info(`pages: ${JSON.stringify(selectedLinks)}, length: ${selectedLinks.length}`)
for (const page of selectedLinks) {
docs.push(...(await playwrightLoader(page)))
}
} else {
docs = await playwrightLoader(url)
}

View File

@ -1,4 +1,4 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { TextSplitter } from 'langchain/text_splitter'
import { Browser, Page, PuppeteerWebBaseLoader, PuppeteerWebBaseLoaderOptions } from 'langchain/document_loaders/web/puppeteer'
import { test } from 'linkifyjs'
@ -62,6 +62,7 @@ class Puppeteer_DocumentLoaders implements INode {
name: 'limit',
type: 'number',
optional: true,
default: '10',
additionalParams: true,
description:
'Only used when "Get Relative Links Method" is selected. Set 0 to retrieve all relative links, default limit is 10.',
@ -115,11 +116,12 @@ class Puppeteer_DocumentLoaders implements INode {
]
}
async init(nodeData: INodeData): Promise<any> {
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const textSplitter = nodeData.inputs?.textSplitter as TextSplitter
const metadata = nodeData.inputs?.metadata
const relativeLinksMethod = nodeData.inputs?.relativeLinksMethod as string
let limit = nodeData.inputs?.limit as string
const selectedLinks = nodeData.inputs?.selectedLinks as string[]
let limit = parseInt(nodeData.inputs?.limit as string)
let waitUntilGoToOption = nodeData.inputs?.waitUntilGoToOption as PuppeteerLifeCycleEvent
let waitForSelector = nodeData.inputs?.waitForSelector as string
@ -159,23 +161,33 @@ class Puppeteer_DocumentLoaders implements INode {
}
return docs
} catch (err) {
if (process.env.DEBUG === 'true') console.error(`error in PuppeteerWebBaseLoader: ${err.message}, on page: ${url}`)
if (process.env.DEBUG === 'true') options.logger.error(`error in PuppeteerWebBaseLoader: ${err.message}, on page: ${url}`)
}
}
let docs = []
if (relativeLinksMethod) {
if (process.env.DEBUG === 'true') console.info(`Start ${relativeLinksMethod}`)
if (!limit) limit = '10'
else if (parseInt(limit) < 0) throw new Error('Limit cannot be less than 0')
if (process.env.DEBUG === 'true') options.logger.info(`Start ${relativeLinksMethod}`)
if (!limit) limit = 10
else if (limit < 0) throw new Error('Limit cannot be less than 0')
const pages: string[] =
relativeLinksMethod === 'webCrawl' ? await webCrawl(url, parseInt(limit)) : await xmlScrape(url, parseInt(limit))
if (process.env.DEBUG === 'true') console.info(`pages: ${JSON.stringify(pages)}, length: ${pages.length}`)
selectedLinks && selectedLinks.length > 0
? selectedLinks.slice(0, limit)
: relativeLinksMethod === 'webCrawl'
? await webCrawl(url, limit)
: await xmlScrape(url, limit)
if (process.env.DEBUG === 'true') options.logger.info(`pages: ${JSON.stringify(pages)}, length: ${pages.length}`)
if (!pages || pages.length === 0) throw new Error('No relative links found')
for (const page of pages) {
docs.push(...(await puppeteerLoader(page)))
}
if (process.env.DEBUG === 'true') console.info(`Finish ${relativeLinksMethod}`)
if (process.env.DEBUG === 'true') options.logger.info(`Finish ${relativeLinksMethod}`)
} else if (selectedLinks && selectedLinks.length > 0) {
if (process.env.DEBUG === 'true')
options.logger.info(`pages: ${JSON.stringify(selectedLinks)}, length: ${selectedLinks.length}`)
for (const page of selectedLinks) {
docs.push(...(await puppeteerLoader(page)))
}
} else {
docs = await puppeteerLoader(url)
}

View File

@ -51,7 +51,7 @@ class VectorStoreToDocument_DocumentLoaders implements INode {
{
label: 'Document',
name: 'document',
baseClasses: this.baseClasses
baseClasses: [...this.baseClasses, 'json']
},
{
label: 'Text',

View File

@ -35,7 +35,7 @@ class AzureOpenAIEmbedding_Embeddings implements INode {
label: 'Batch Size',
name: 'batchSize',
type: 'number',
default: '1',
default: '100',
optional: true,
additionalParams: true
},

View File

@ -17,7 +17,7 @@ class OpenAIEmbedding_Embeddings implements INode {
constructor() {
this.label = 'OpenAI Embeddings'
this.name = 'openAIEmbeddings'
this.version = 1.0
this.version = 2.0
this.type = 'OpenAIEmbeddings'
this.icon = 'openai.svg'
this.category = 'Embeddings'
@ -30,6 +30,27 @@ class OpenAIEmbedding_Embeddings implements INode {
credentialNames: ['openAIApi']
}
this.inputs = [
{
label: 'Model Name',
name: 'modelName',
type: 'options',
options: [
{
label: 'text-embedding-3-large',
name: 'text-embedding-3-large'
},
{
label: 'text-embedding-3-small',
name: 'text-embedding-3-small'
},
{
label: 'text-embedding-ada-002',
name: 'text-embedding-ada-002'
}
],
default: 'text-embedding-ada-002',
optional: true
},
{
label: 'Strip New Lines',
name: 'stripNewLines',
@ -66,12 +87,14 @@ class OpenAIEmbedding_Embeddings implements INode {
const batchSize = nodeData.inputs?.batchSize as string
const timeout = nodeData.inputs?.timeout as string
const basePath = nodeData.inputs?.basepath as string
const modelName = nodeData.inputs?.modelName as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
const obj: Partial<OpenAIEmbeddingsParams> & { openAIApiKey?: string } = {
openAIApiKey
openAIApiKey,
modelName
}
if (stripNewLines) obj.stripNewLines = stripNewLines

View File

@ -18,7 +18,7 @@ class AzureOpenAI_LLMs implements INode {
constructor() {
this.label = 'Azure OpenAI'
this.name = 'azureOpenAI'
this.version = 2.0
this.version = 2.1
this.type = 'AzureOpenAI'
this.icon = 'Azure.svg'
this.category = 'LLMs'
@ -89,6 +89,14 @@ class AzureOpenAI_LLMs implements INode {
{
label: 'gpt-35-turbo',
name: 'gpt-35-turbo'
},
{
label: 'gpt-4',
name: 'gpt-4'
},
{
label: 'gpt-4-32k',
name: 'gpt-4-32k'
}
],
default: 'text-davinci-003',

View File

@ -5,6 +5,24 @@ import { mapStoredMessageToChatMessage, AIMessage, HumanMessage, BaseMessage } f
import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface'
let mongoClientSingleton: MongoClient
let mongoUrl: string
const getMongoClient = async (newMongoUrl: string) => {
if (!mongoClientSingleton) {
// if client doesn't exists
mongoClientSingleton = new MongoClient(newMongoUrl)
mongoUrl = newMongoUrl
return mongoClientSingleton
} else if (mongoClientSingleton && newMongoUrl !== mongoUrl) {
// if client exists but url changed
mongoClientSingleton.close()
mongoClientSingleton = new MongoClient(newMongoUrl)
mongoUrl = newMongoUrl
return mongoClientSingleton
}
return mongoClientSingleton
}
class MongoDB_Memory implements INode {
label: string
name: string
@ -79,9 +97,7 @@ const initializeMongoDB = async (nodeData: INodeData, options: ICommonObject): P
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const mongoDBConnectUrl = getCredentialParam('mongoDBConnectUrl', credentialData, nodeData)
const client = new MongoClient(mongoDBConnectUrl)
await client.connect()
const client = await getMongoClient(mongoDBConnectUrl)
const collection = client.db(databaseName).collection(collectionName)
const mongoDBChatMessageHistory = new MongoDBChatMessageHistory({

View File

@ -1,10 +1,47 @@
import { Redis } from 'ioredis'
import { Redis, RedisOptions } from 'ioredis'
import { isEqual } from 'lodash'
import { BufferMemory, BufferMemoryInput } from 'langchain/memory'
import { RedisChatMessageHistory, RedisChatMessageHistoryInput } from 'langchain/stores/message/ioredis'
import { mapStoredMessageToChatMessage, BaseMessage, AIMessage, HumanMessage } from 'langchain/schema'
import { INode, INodeData, INodeParams, ICommonObject, MessageType, IMessage, MemoryMethods, FlowiseMemory } from '../../../src/Interface'
import { convertBaseMessagetoIMessage, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
let redisClientSingleton: Redis
let redisClientOption: RedisOptions
let redisClientUrl: string
const getRedisClientbyOption = (option: RedisOptions) => {
if (!redisClientSingleton) {
// if client doesn't exists
redisClientSingleton = new Redis(option)
redisClientOption = option
return redisClientSingleton
} else if (redisClientSingleton && !isEqual(option, redisClientOption)) {
// if client exists but option changed
redisClientSingleton.quit()
redisClientSingleton = new Redis(option)
redisClientOption = option
return redisClientSingleton
}
return redisClientSingleton
}
const getRedisClientbyUrl = (url: string) => {
if (!redisClientSingleton) {
// if client doesn't exists
redisClientSingleton = new Redis(url)
redisClientUrl = url
return redisClientSingleton
} else if (redisClientSingleton && url !== redisClientUrl) {
// if client exists but option changed
redisClientSingleton.quit()
redisClientSingleton = new Redis(url)
redisClientUrl = url
return redisClientSingleton
}
return redisClientSingleton
}
class RedisBackedChatMemory_Memory implements INode {
label: string
name: string
@ -95,7 +132,7 @@ const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Prom
const tlsOptions = sslEnabled === true ? { tls: { rejectUnauthorized: false } } : {}
client = new Redis({
client = getRedisClientbyOption({
port: portStr ? parseInt(portStr) : 6379,
host,
username,
@ -103,7 +140,7 @@ const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Prom
...tlsOptions
})
} else {
client = new Redis(redisUrl)
client = getRedisClientbyUrl(redisUrl)
}
let obj: RedisChatMessageHistoryInput = {
@ -120,24 +157,6 @@ const initalizeRedis = async (nodeData: INodeData, options: ICommonObject): Prom
const redisChatMessageHistory = new RedisChatMessageHistory(obj)
/*redisChatMessageHistory.getMessages = async (): Promise<BaseMessage[]> => {
const rawStoredMessages = await client.lrange((redisChatMessageHistory as any).sessionId, windowSize ? -windowSize : 0, -1)
const orderedMessages = rawStoredMessages.reverse().map((message) => JSON.parse(message))
return orderedMessages.map(mapStoredMessageToChatMessage)
}
redisChatMessageHistory.addMessage = async (message: BaseMessage): Promise<void> => {
const messageToAdd = [message].map((msg) => msg.toDict())
await client.lpush((redisChatMessageHistory as any).sessionId, JSON.stringify(messageToAdd[0]))
if (sessionTTL) {
await client.expire((redisChatMessageHistory as any).sessionId, sessionTTL)
}
}
redisChatMessageHistory.clear = async (): Promise<void> => {
await client.del((redisChatMessageHistory as any).sessionId)
}*/
const memory = new BufferMemoryExtended({
memoryKey: memoryKey ?? 'chat_history',
chatHistory: redisChatMessageHistory,

View File

@ -29,16 +29,17 @@ class CustomListOutputParser implements INode {
label: 'Length',
name: 'length',
type: 'number',
default: 5,
step: 1,
description: 'Number of values to return'
description: 'Number of values to return',
optional: true
},
{
label: 'Separator',
name: 'separator',
type: 'string',
description: 'Separator between values',
default: ','
default: ',',
optional: true
},
{
label: 'Autofix',
@ -54,10 +55,11 @@ class CustomListOutputParser implements INode {
const separator = nodeData.inputs?.separator as string
const lengthStr = nodeData.inputs?.length as string
const autoFix = nodeData.inputs?.autofixParser as boolean
let length = 5
if (lengthStr) length = parseInt(lengthStr, 10)
const parser = new LangchainCustomListOutputParser({ length: length, separator: separator })
const parser = new LangchainCustomListOutputParser({
length: lengthStr ? parseInt(lengthStr, 10) : undefined,
separator: separator
})
Object.defineProperty(parser, 'autoFix', {
enumerable: true,
configurable: true,

View File

@ -0,0 +1 @@
<svg width="32" height="32" fill="none" xmlns="http://www.w3.org/2000/svg"><path fill-rule="evenodd" clip-rule="evenodd" d="M11.776 18.304c.64 0 1.92-.032 3.712-.768 2.08-.864 6.176-2.4 9.152-4 2.08-1.12 2.976-2.592 2.976-4.576 0-2.72-2.208-4.96-4.96-4.96h-11.52A7.143 7.143 0 0 0 4 11.136c0 3.936 3.008 7.168 7.776 7.168Z" fill="#39594D"/><path fill-rule="evenodd" clip-rule="evenodd" d="M13.728 23.2c0-1.92 1.152-3.68 2.944-4.416l3.616-1.504C23.968 15.776 28 18.464 28 22.432A5.572 5.572 0 0 1 22.432 28h-3.936c-2.624 0-4.768-2.144-4.768-4.8Z" fill="#D18EE2"/><path d="M8.128 19.232A4.138 4.138 0 0 0 4 23.36v.544C4 26.144 5.856 28 8.128 28a4.138 4.138 0 0 0 4.128-4.128v-.544c-.032-2.24-1.856-4.096-4.128-4.096Z" fill="#FF7759"/></svg>

After

Width:  |  Height:  |  Size: 738 B

View File

@ -0,0 +1,55 @@
import { Callbacks } from 'langchain/callbacks'
import { Document } from 'langchain/document'
import { BaseDocumentCompressor } from 'langchain/retrievers/document_compressors'
import axios from 'axios'
export class CohereRerank extends BaseDocumentCompressor {
private cohereAPIKey: any
private COHERE_API_URL = 'https://api.cohere.ai/v1/rerank'
private readonly model: string
private readonly k: number
private readonly maxChunksPerDoc: number
constructor(cohereAPIKey: string, model: string, k: number, maxChunksPerDoc: number) {
super()
this.cohereAPIKey = cohereAPIKey
this.model = model
this.k = k
this.maxChunksPerDoc = maxChunksPerDoc
}
async compressDocuments(
documents: Document<Record<string, any>>[],
query: string,
_?: Callbacks | undefined
): Promise<Document<Record<string, any>>[]> {
// avoid empty api call
if (documents.length === 0) {
return []
}
const config = {
headers: {
Authorization: `Bearer ${this.cohereAPIKey}`,
'Content-Type': 'application/json',
Accept: 'application/json'
}
}
const data = {
model: this.model,
topN: this.k,
max_chunks_per_doc: this.maxChunksPerDoc,
query: query,
return_documents: false,
documents: documents.map((doc) => doc.pageContent)
}
try {
let returnedDocs = await axios.post(this.COHERE_API_URL, data, config)
const finalResults: Document<Record<string, any>>[] = []
returnedDocs.data.results.forEach((result: any) => {
const doc = documents[result.index]
doc.metadata.relevance_score = result.relevance_score
finalResults.push(doc)
})
return finalResults.splice(0, this.k)
} catch (error) {
return documents
}
}
}

View File

@ -0,0 +1,142 @@
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { BaseRetriever } from 'langchain/schema/retriever'
import { ContextualCompressionRetriever } from 'langchain/retrievers/contextual_compression'
import { getCredentialData, getCredentialParam, handleEscapeCharacters } from '../../../src'
import { CohereRerank } from './CohereRerank'
import { VectorStoreRetriever } from 'langchain/vectorstores/base'
class CohereRerankRetriever_Retrievers implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
badge: string
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Cohere Rerank Retriever'
this.name = 'cohereRerankRetriever'
this.version = 1.0
this.type = 'Cohere Rerank Retriever'
this.icon = 'Cohere.svg'
this.category = 'Retrievers'
this.badge = 'NEW'
this.description = 'Cohere Rerank indexes the documents from most to least semantically relevant to the query.'
this.baseClasses = [this.type, 'BaseRetriever']
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['cohereApi']
}
this.inputs = [
{
label: 'Vector Store Retriever',
name: 'baseRetriever',
type: 'VectorStoreRetriever'
},
{
label: 'Model Name',
name: 'model',
type: 'options',
options: [
{
label: 'rerank-english-v2.0',
name: 'rerank-english-v2.0'
},
{
label: 'rerank-multilingual-v2.0',
name: 'rerank-multilingual-v2.0'
}
],
default: 'rerank-english-v2.0',
optional: true
},
{
label: 'Query',
name: 'query',
type: 'string',
description: 'Query to retrieve documents from retriever. If not specified, user question will be used',
optional: true,
acceptVariable: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to the TopK of the Base Retriever',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
},
{
label: 'Max Chunks Per Doc',
name: 'maxChunksPerDoc',
description: 'The maximum number of chunks to produce internally from a document. Default to 10',
placeholder: '10',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Cohere Rerank Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Document',
name: 'document',
baseClasses: ['Document']
},
{
label: 'Text',
name: 'text',
baseClasses: ['string', 'json']
}
]
}
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
const baseRetriever = nodeData.inputs?.baseRetriever as BaseRetriever
const model = nodeData.inputs?.model as string
const query = nodeData.inputs?.query as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const cohereApiKey = getCredentialParam('cohereApiKey', credentialData, nodeData)
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : (baseRetriever as VectorStoreRetriever).k ?? 4
const maxChunksPerDoc = nodeData.inputs?.maxChunksPerDoc as string
const max_chunks_per_doc = maxChunksPerDoc ? parseFloat(maxChunksPerDoc) : 10
const output = nodeData.outputs?.output as string
const cohereCompressor = new CohereRerank(cohereApiKey, model, k, max_chunks_per_doc)
const retriever = new ContextualCompressionRetriever({
baseCompressor: cohereCompressor,
baseRetriever: baseRetriever
})
if (output === 'retriever') return retriever
else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)
else if (output === 'text') {
let finaltext = ''
const docs = await retriever.getRelevantDocuments(query ? query : input)
for (const doc of docs) finaltext += `${doc.pageContent}\n`
return handleEscapeCharacters(finaltext, false)
}
return retriever
}
}
module.exports = { nodeClass: CohereRerankRetriever_Retrievers }

View File

@ -0,0 +1,133 @@
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { BaseRetriever } from 'langchain/schema/retriever'
import { Embeddings } from 'langchain/embeddings/base'
import { ContextualCompressionRetriever } from 'langchain/retrievers/contextual_compression'
import { EmbeddingsFilter } from 'langchain/retrievers/document_compressors/embeddings_filter'
import { handleEscapeCharacters } from '../../../src/utils'
class EmbeddingsFilterRetriever_Retrievers implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
badge: string
constructor() {
this.label = 'Embeddings Filter Retriever'
this.name = 'embeddingsFilterRetriever'
this.version = 1.0
this.type = 'EmbeddingsFilterRetriever'
this.icon = 'compressionRetriever.svg'
this.category = 'Retrievers'
this.badge = 'NEW'
this.description = 'A document compressor that uses embeddings to drop documents unrelated to the query'
this.baseClasses = [this.type, 'BaseRetriever']
this.inputs = [
{
label: 'Vector Store Retriever',
name: 'baseRetriever',
type: 'VectorStoreRetriever'
},
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Query',
name: 'query',
type: 'string',
description: 'Query to retrieve documents from retriever. If not specified, user question will be used',
optional: true,
acceptVariable: true
},
{
label: 'Similarity Threshold',
name: 'similarityThreshold',
description:
'Threshold for determining when two documents are similar enough to be considered redundant. Must be specified if `k` is not set',
type: 'number',
default: 0.8,
step: 0.1,
optional: true
},
{
label: 'K',
name: 'k',
description:
'The number of relevant documents to return. Can be explicitly set to undefined, in which case similarity_threshold must be specified. Defaults to 20',
type: 'number',
default: 20,
step: 1,
optional: true,
additionalParams: true
}
]
this.outputs = [
{
label: 'Embeddings Filter Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Document',
name: 'document',
baseClasses: ['Document']
},
{
label: 'Text',
name: 'text',
baseClasses: ['string', 'json']
}
]
}
async init(nodeData: INodeData, input: string): Promise<any> {
const baseRetriever = nodeData.inputs?.baseRetriever as BaseRetriever
const embeddings = nodeData.inputs?.embeddings as Embeddings
const query = nodeData.inputs?.query as string
const similarityThreshold = nodeData.inputs?.similarityThreshold as string
const k = nodeData.inputs?.k as string
const output = nodeData.outputs?.output as string
if (k === undefined && similarityThreshold === undefined) {
throw new Error(`Must specify one of "k" or "similarity_threshold".`)
}
const similarityThresholdNumber = similarityThreshold ? parseFloat(similarityThreshold) : 0.8
const kNumber = k ? parseFloat(k) : undefined
const baseCompressor = new EmbeddingsFilter({
embeddings: embeddings,
similarityThreshold: similarityThresholdNumber,
k: kNumber
})
const retriever = new ContextualCompressionRetriever({
baseCompressor,
baseRetriever: baseRetriever
})
if (output === 'retriever') return retriever
else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)
else if (output === 'text') {
let finaltext = ''
const docs = await retriever.getRelevantDocuments(query ? query : input)
for (const doc of docs) finaltext += `${doc.pageContent}\n`
return handleEscapeCharacters(finaltext, false)
}
return retriever
}
}
module.exports = { nodeClass: EmbeddingsFilterRetriever_Retrievers }

View File

@ -0,0 +1,7 @@
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-chart-bar" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
<path stroke="none" d="M0 0h24v24H0z" fill="none"/>
<path d="M3 12m0 1a1 1 0 0 1 1 -1h4a1 1 0 0 1 1 1v6a1 1 0 0 1 -1 1h-4a1 1 0 0 1 -1 -1z" />
<path d="M9 8m0 1a1 1 0 0 1 1 -1h4a1 1 0 0 1 1 1v10a1 1 0 0 1 -1 1h-4a1 1 0 0 1 -1 -1z" />
<path d="M15 4m0 1a1 1 0 0 1 1 -1h4a1 1 0 0 1 1 1v14a1 1 0 0 1 -1 1h-4a1 1 0 0 1 -1 -1z" />
<path d="M4 20l14 0" />
</svg>

After

Width:  |  Height:  |  Size: 600 B

View File

@ -1,8 +1,9 @@
import { VectorStore } from 'langchain/vectorstores/base'
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { HydeRetriever, HydeRetrieverOptions, PromptKey } from 'langchain/retrievers/hyde'
import { BaseLanguageModel } from 'langchain/base_language'
import { PromptTemplate } from 'langchain/prompts'
import { handleEscapeCharacters } from '../../../src/utils'
class HydeRetriever_Retrievers implements INode {
label: string
@ -14,11 +15,12 @@ class HydeRetriever_Retrievers implements INode {
category: string
baseClasses: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Hyde Retriever'
this.label = 'HyDE Retriever'
this.name = 'HydeRetriever'
this.version = 2.0
this.version = 3.0
this.type = 'HydeRetriever'
this.icon = 'hyderetriever.svg'
this.category = 'Retrievers'
@ -35,6 +37,14 @@ class HydeRetriever_Retrievers implements INode {
name: 'vectorStore',
type: 'VectorStore'
},
{
label: 'Query',
name: 'query',
type: 'string',
description: 'Query to retrieve documents from retriever. If not specified, user question will be used',
optional: true,
acceptVariable: true
},
{
label: 'Select Defined Prompt',
name: 'promptKey',
@ -121,15 +131,34 @@ Passage:`
optional: true
}
]
this.outputs = [
{
label: 'HyDE Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Document',
name: 'document',
baseClasses: ['Document']
},
{
label: 'Text',
name: 'text',
baseClasses: ['string', 'json']
}
]
}
async init(nodeData: INodeData): Promise<any> {
async init(nodeData: INodeData, input: string): Promise<any> {
const llm = nodeData.inputs?.model as BaseLanguageModel
const vectorStore = nodeData.inputs?.vectorStore as VectorStore
const promptKey = nodeData.inputs?.promptKey as PromptKey
const customPrompt = nodeData.inputs?.customPrompt as string
const query = nodeData.inputs?.query as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const output = nodeData.outputs?.output as string
const obj: HydeRetrieverOptions<any> = {
llm,
@ -141,6 +170,19 @@ Passage:`
else if (promptKey) obj.promptTemplate = promptKey
const retriever = new HydeRetriever(obj)
if (output === 'retriever') return retriever
else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)
else if (output === 'text') {
let finaltext = ''
const docs = await retriever.getRelevantDocuments(query ? query : input)
for (const doc of docs) finaltext += `${doc.pageContent}\n`
return handleEscapeCharacters(finaltext, false)
}
return retriever
}
}

View File

@ -0,0 +1,100 @@
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { BaseRetriever } from 'langchain/schema/retriever'
import { ContextualCompressionRetriever } from 'langchain/retrievers/contextual_compression'
import { BaseLanguageModel } from 'langchain/base_language'
import { LLMChainExtractor } from 'langchain/retrievers/document_compressors/chain_extract'
import { handleEscapeCharacters } from '../../../src/utils'
class LLMFilterCompressionRetriever_Retrievers implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
badge: string
constructor() {
this.label = 'LLM Filter Retriever'
this.name = 'llmFilterRetriever'
this.version = 1.0
this.type = 'LLMFilterRetriever'
this.icon = 'llmFilterRetriever.svg'
this.category = 'Retrievers'
this.badge = 'NEW'
this.description =
'Iterate over the initially returned documents and extract, from each, only the content that is relevant to the query'
this.baseClasses = [this.type, 'BaseRetriever']
this.inputs = [
{
label: 'Vector Store Retriever',
name: 'baseRetriever',
type: 'VectorStoreRetriever'
},
{
label: 'Language Model',
name: 'model',
type: 'BaseLanguageModel'
},
{
label: 'Query',
name: 'query',
type: 'string',
description: 'Query to retrieve documents from retriever. If not specified, user question will be used',
optional: true,
acceptVariable: true
}
]
this.outputs = [
{
label: 'LLM Filter Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Document',
name: 'document',
baseClasses: ['Document']
},
{
label: 'Text',
name: 'text',
baseClasses: ['string', 'json']
}
]
}
async init(nodeData: INodeData, input: string): Promise<any> {
const baseRetriever = nodeData.inputs?.baseRetriever as BaseRetriever
const model = nodeData.inputs?.model as BaseLanguageModel
const query = nodeData.inputs?.query as string
const output = nodeData.outputs?.output as string
if (!model) throw new Error('There must be a LLM model connected to LLM Filter Retriever')
const retriever = new ContextualCompressionRetriever({
baseCompressor: LLMChainExtractor.fromLLM(model),
baseRetriever: baseRetriever
})
if (output === 'retriever') return retriever
else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)
else if (output === 'text') {
let finaltext = ''
const docs = await retriever.getRelevantDocuments(query ? query : input)
for (const doc of docs) finaltext += `${doc.pageContent}\n`
return handleEscapeCharacters(finaltext, false)
}
return retriever
}
}
module.exports = { nodeClass: LLMFilterCompressionRetriever_Retrievers }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-filter-check" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M11.18 20.274l-2.18 .726v-8.5l-4.48 -4.928a2 2 0 0 1 -.52 -1.345v-2.227h16v2.172a2 2 0 0 1 -.586 1.414l-4.414 4.414v3" /><path d="M15 19l2 2l4 -4" /></svg>

After

Width:  |  Height:  |  Size: 446 B

View File

@ -0,0 +1,136 @@
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { BaseLanguageModel } from 'langchain/base_language'
import { ContextualCompressionRetriever } from 'langchain/retrievers/contextual_compression'
import { BaseRetriever } from 'langchain/schema/retriever'
import { ReciprocalRankFusion } from './ReciprocalRankFusion'
import { VectorStoreRetriever } from 'langchain/vectorstores/base'
import { handleEscapeCharacters } from '../../../src/utils'
class RRFRetriever_Retrievers implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs: INodeParams[]
badge: string
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Reciprocal Rank Fusion Retriever'
this.name = 'RRFRetriever'
this.version = 1.0
this.type = 'RRFRetriever'
this.badge = 'NEW'
this.icon = 'rrfRetriever.svg'
this.category = 'Retrievers'
this.description = 'Reciprocal Rank Fusion to re-rank search results by multiple query generation.'
this.baseClasses = [this.type, 'BaseRetriever']
this.inputs = [
{
label: 'Vector Store Retriever',
name: 'baseRetriever',
type: 'VectorStoreRetriever'
},
{
label: 'Language Model',
name: 'model',
type: 'BaseLanguageModel'
},
{
label: 'Query',
name: 'query',
type: 'string',
description: 'Query to retrieve documents from retriever. If not specified, user question will be used',
optional: true,
acceptVariable: true
},
{
label: 'Query Count',
name: 'queryCount',
description: 'Number of synthetic queries to generate. Default to 4',
placeholder: '4',
type: 'number',
default: 4,
additionalParams: true,
optional: true
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to the TopK of the Base Retriever',
placeholder: '0',
type: 'number',
additionalParams: true,
optional: true
},
{
label: 'Constant',
name: 'c',
description:
'A constant added to the rank, controlling the balance between the importance of high-ranked items and the consideration given to lower-ranked items.\n' +
'Default is 60',
placeholder: '60',
type: 'number',
default: 60,
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Reciprocal Rank Fusion Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Document',
name: 'document',
baseClasses: ['Document']
},
{
label: 'Text',
name: 'text',
baseClasses: ['string', 'json']
}
]
}
async init(nodeData: INodeData, input: string): Promise<any> {
const llm = nodeData.inputs?.model as BaseLanguageModel
const baseRetriever = nodeData.inputs?.baseRetriever as BaseRetriever
const query = nodeData.inputs?.query as string
const queryCount = nodeData.inputs?.queryCount as string
const q = queryCount ? parseFloat(queryCount) : 4
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : (baseRetriever as VectorStoreRetriever).k ?? 4
const constantC = nodeData.inputs?.c as string
const c = topK ? parseFloat(constantC) : 60
const output = nodeData.outputs?.output as string
const ragFusion = new ReciprocalRankFusion(llm, baseRetriever as VectorStoreRetriever, q, k, c)
const retriever = new ContextualCompressionRetriever({
baseCompressor: ragFusion,
baseRetriever: baseRetriever
})
if (output === 'retriever') return retriever
else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)
else if (output === 'text') {
let finaltext = ''
const docs = await retriever.getRelevantDocuments(query ? query : input)
for (const doc of docs) finaltext += `${doc.pageContent}\n`
return handleEscapeCharacters(finaltext, false)
}
return retriever
}
}
module.exports = { nodeClass: RRFRetriever_Retrievers }

View File

@ -0,0 +1,96 @@
import { BaseDocumentCompressor } from 'langchain/retrievers/document_compressors'
import { Document } from 'langchain/document'
import { Callbacks } from 'langchain/callbacks'
import { BaseLanguageModel } from 'langchain/base_language'
import { ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate } from 'langchain/prompts'
import { LLMChain } from 'langchain/chains'
import { VectorStoreRetriever } from 'langchain/vectorstores/base'
export class ReciprocalRankFusion extends BaseDocumentCompressor {
private readonly llm: BaseLanguageModel
private readonly queryCount: number
private readonly topK: number
private readonly c: number
private baseRetriever: VectorStoreRetriever
constructor(llm: BaseLanguageModel, baseRetriever: VectorStoreRetriever, queryCount: number, topK: number, c: number) {
super()
this.queryCount = queryCount
this.llm = llm
this.baseRetriever = baseRetriever
this.topK = topK
this.c = c
}
async compressDocuments(
documents: Document<Record<string, any>>[],
query: string,
_?: Callbacks | undefined
): Promise<Document<Record<string, any>>[]> {
// avoid empty api call
if (documents.length === 0) {
return []
}
const chatPrompt = ChatPromptTemplate.fromMessages([
SystemMessagePromptTemplate.fromTemplate(
'You are a helpful assistant that generates multiple search queries based on a single input query.'
),
HumanMessagePromptTemplate.fromTemplate(
'Generate multiple search queries related to: {input}. Provide these alternative questions separated by newlines, do not add any numbers.'
),
HumanMessagePromptTemplate.fromTemplate('OUTPUT (' + this.queryCount + ' queries):')
])
const llmChain = new LLMChain({
llm: this.llm,
prompt: chatPrompt
})
const multipleQueries = await llmChain.call({ input: query })
const queries = []
queries.push(query)
multipleQueries.text.split('\n').map((q: string) => {
queries.push(q)
})
const docList: Document<Record<string, any>>[][] = []
for (let i = 0; i < queries.length; i++) {
const resultOne = await this.baseRetriever.vectorStore.similaritySearch(queries[i], 5)
const docs: any[] = []
resultOne.forEach((doc) => {
docs.push(doc)
})
docList.push(docs)
}
return this.reciprocalRankFunction(docList, this.c)
}
reciprocalRankFunction(docList: Document<Record<string, any>>[][], k: number): Document<Record<string, any>>[] {
docList.forEach((docs: Document<Record<string, any>>[]) => {
docs.forEach((doc: any, index: number) => {
let rank = index + 1
if (doc.metadata.relevancy_score) {
doc.metadata.relevancy_score += 1 / (rank + k)
} else {
doc.metadata.relevancy_score = 1 / (rank + k)
}
})
})
const scoreArray: any[] = []
docList.forEach((docs: Document<Record<string, any>>[]) => {
docs.forEach((doc: any) => {
scoreArray.push(doc.metadata.relevancy_score)
})
})
scoreArray.sort((a, b) => b - a)
const rerankedDocuments: Document<Record<string, any>>[] = []
const seenScores: any[] = []
scoreArray.forEach((score) => {
docList.forEach((docs) => {
docs.forEach((doc: any) => {
if (doc.metadata.relevancy_score === score && seenScores.indexOf(score) === -1) {
rerankedDocuments.push(doc)
seenScores.push(doc.metadata.relevancy_score)
}
})
})
})
return rerankedDocuments.splice(0, this.topK)
}
}

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-math-x-divide-y-2" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M3 21l18 -18" /><path d="M15 14l3 4.5" /><path d="M21 14l-4.5 7" /><path d="M3 4l6 6" /><path d="M3 10l6 -6" /></svg>

After

Width:  |  Height:  |  Size: 413 B

View File

@ -18,7 +18,7 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
constructor() {
this.label = 'Similarity Score Threshold Retriever'
this.name = 'similarityThresholdRetriever'
this.version = 1.0
this.version = 2.0
this.type = 'SimilarityThresholdRetriever'
this.icon = 'similaritythreshold.svg'
this.category = 'Retrievers'
@ -30,6 +30,14 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
name: 'vectorStore',
type: 'VectorStore'
},
{
label: 'Query',
name: 'query',
type: 'string',
description: 'Query to retrieve documents from retriever. If not specified, user question will be used',
optional: true,
acceptVariable: true
},
{
label: 'Minimum Similarity Score (%)',
name: 'minSimilarityScore',
@ -44,7 +52,8 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
description: `The maximum number of results to fetch`,
type: 'number',
default: 20,
step: 1
step: 1,
additionalParams: true
},
{
label: 'K Increment',
@ -52,7 +61,8 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
description: `How much to increase K by each time. It'll fetch N results, then N + kIncrement, then N + kIncrement * 2, etc.`,
type: 'number',
default: 2,
step: 1
step: 1,
additionalParams: true
}
]
this.outputs = [
@ -77,6 +87,7 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
async init(nodeData: INodeData, input: string): Promise<any> {
const vectorStore = nodeData.inputs?.vectorStore as VectorStore
const minSimilarityScore = nodeData.inputs?.minSimilarityScore as number
const query = nodeData.inputs?.query as string
const maxK = nodeData.inputs?.maxK as string
const kIncrement = nodeData.inputs?.kIncrement as string
@ -89,11 +100,11 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
})
if (output === 'retriever') return retriever
else if (output === 'document') return await retriever.getRelevantDocuments(input)
else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)
else if (output === 'text') {
let finaltext = ''
const docs = await retriever.getRelevantDocuments(input)
const docs = await retriever.getRelevantDocuments(query ? query : input)
for (const doc of docs) finaltext += `${doc.pageContent}\n`

View File

@ -1,5 +1,5 @@
import { ICommonObject, IDatabaseEntity, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { convertSchemaToZod, getBaseClasses } from '../../../src/utils'
import { convertSchemaToZod, getBaseClasses, getVars } from '../../../src/utils'
import { DynamicStructuredTool } from './core'
import { z } from 'zod'
import { DataSource } from 'typeorm'
@ -81,23 +81,7 @@ class CustomTool_Tools implements INode {
}
if (customToolFunc) obj.code = customToolFunc
const variables = await appDataSource.getRepository(databaseEntities['Variable']).find()
// override variables defined in overrideConfig
// nodeData.inputs.variables is an Object, check each property and override the variable
if (nodeData?.inputs?.vars) {
for (const propertyName of Object.getOwnPropertyNames(nodeData.inputs.vars)) {
const foundVar = variables.find((v) => v.name === propertyName)
if (foundVar) {
// even if the variable was defined as runtime, we override it with static value
foundVar.type = 'static'
foundVar.value = nodeData.inputs.vars[propertyName]
} else {
// add it the variables, if not found locally in the db
variables.push({ name: propertyName, type: 'static', value: nodeData.inputs.vars[propertyName] })
}
}
}
const variables = await getVars(appDataSource, databaseEntities, nodeData)
const flow = { chatflowId: options.chatflowid }

View File

@ -1,6 +1,6 @@
import { z } from 'zod'
import { NodeVM } from 'vm2'
import { availableDependencies } from '../../../src/utils'
import { availableDependencies, defaultAllowBuiltInDep, prepareSandboxVars } from '../../../src/utils'
import { RunnableConfig } from '@langchain/core/runnables'
import { StructuredTool, ToolParams } from '@langchain/core/tools'
import { CallbackManagerForToolRun, Callbacks, CallbackManager, parseCallbackConfigArg } from '@langchain/core/callbacks/manager'
@ -112,48 +112,13 @@ export class DynamicStructuredTool<
}
}
// inject variables
let vars = {}
if (this.variables) {
for (const item of this.variables) {
let value = item.value
// read from .env file
if (item.type === 'runtime') {
value = process.env[item.name]
}
Object.defineProperty(vars, item.name, {
enumerable: true,
configurable: true,
writable: true,
value: value
})
}
}
sandbox['$vars'] = vars
sandbox['$vars'] = prepareSandboxVars(this.variables)
// inject flow properties
if (this.flowObj) {
sandbox['$flow'] = { ...this.flowObj, ...flowConfig }
}
const defaultAllowBuiltInDep = [
'assert',
'buffer',
'crypto',
'events',
'http',
'https',
'net',
'path',
'querystring',
'timers',
'tls',
'url',
'zlib'
]
const builtinDeps = process.env.TOOL_FUNCTION_BUILTIN_DEP
? defaultAllowBuiltInDep.concat(process.env.TOOL_FUNCTION_BUILTIN_DEP.split(','))
: defaultAllowBuiltInDep

View File

@ -1,8 +1,11 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { DynamicTool } from 'langchain/tools'
import { createRetrieverTool } from 'langchain/agents/toolkits'
import { DynamicStructuredTool } from '@langchain/core/tools'
import { CallbackManagerForToolRun } from '@langchain/core/callbacks/manager'
import { BaseRetriever } from 'langchain/schema/retriever'
import { z } from 'zod'
import { SOURCE_DOCUMENTS_PREFIX } from '../../../src/agents'
class Retriever_Tools implements INode {
label: string
@ -19,7 +22,7 @@ class Retriever_Tools implements INode {
constructor() {
this.label = 'Retriever Tool'
this.name = 'retrieverTool'
this.version = 1.0
this.version = 2.0
this.type = 'RetrieverTool'
this.icon = 'retrievertool.svg'
this.category = 'Tools'
@ -44,6 +47,12 @@ class Retriever_Tools implements INode {
label: 'Retriever',
name: 'retriever',
type: 'BaseRetriever'
},
{
label: 'Return Source Documents',
name: 'returnSourceDocuments',
type: 'boolean',
optional: true
}
]
}
@ -52,12 +61,25 @@ class Retriever_Tools implements INode {
const name = nodeData.inputs?.name as string
const description = nodeData.inputs?.description as string
const retriever = nodeData.inputs?.retriever as BaseRetriever
const returnSourceDocuments = nodeData.inputs?.returnSourceDocuments as boolean
const tool = createRetrieverTool(retriever, {
const input = {
name,
description
}
const func = async ({ input }: { input: string }, runManager?: CallbackManagerForToolRun) => {
const docs = await retriever.getRelevantDocuments(input, runManager?.getChild('retriever'))
const content = docs.map((doc) => doc.pageContent).join('\n\n')
const sourceDocuments = JSON.stringify(docs)
return returnSourceDocuments ? content + SOURCE_DOCUMENTS_PREFIX + sourceDocuments : content
}
const schema = z.object({
input: z.string().describe('query to look up in retriever')
})
const tool = new DynamicStructuredTool({ ...input, func, schema })
return tool
}
}

View File

@ -1,6 +1,7 @@
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { ICommonObject, IDatabaseEntity, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { NodeVM } from 'vm2'
import { availableDependencies, handleEscapeCharacters } from '../../../src/utils'
import { DataSource } from 'typeorm'
import { availableDependencies, defaultAllowBuiltInDep, getVars, handleEscapeCharacters, prepareSandboxVars } from '../../../src/utils'
class CustomFunction_Utilities implements INode {
label: string
@ -51,13 +52,31 @@ class CustomFunction_Utilities implements INode {
label: 'Output',
name: 'output',
baseClasses: ['string', 'number', 'boolean', 'json', 'array']
},
{
label: 'Ending Node',
name: 'EndingNode',
baseClasses: [this.type]
}
]
}
async init(nodeData: INodeData, input: string): Promise<any> {
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
const isEndingNode = nodeData?.outputs?.output === 'EndingNode'
if (isEndingNode && !options.isRun) return // prevent running both init and run twice
const javascriptFunction = nodeData.inputs?.javascriptFunction as string
const functionInputVariablesRaw = nodeData.inputs?.functionInputVariables
const appDataSource = options.appDataSource as DataSource
const databaseEntities = options.databaseEntities as IDatabaseEntity
const variables = await getVars(appDataSource, databaseEntities, nodeData)
const flow = {
chatflowId: options.chatflowid,
sessionId: options.sessionId,
chatId: options.chatId,
input
}
let inputVars: ICommonObject = {}
if (functionInputVariablesRaw) {
@ -69,29 +88,30 @@ class CustomFunction_Utilities implements INode {
}
}
let sandbox: any = { $input: input }
if (Object.keys(inputVars).length) {
for (const item in inputVars) {
sandbox[`$${item}`] = inputVars[item]
// Some values might be a stringified JSON, parse it
for (const key in inputVars) {
if (typeof inputVars[key] === 'string' && inputVars[key].startsWith('{') && inputVars[key].endsWith('}')) {
try {
inputVars[key] = JSON.parse(inputVars[key])
} catch (e) {
continue
}
}
}
const defaultAllowBuiltInDep = [
'assert',
'buffer',
'crypto',
'events',
'http',
'https',
'net',
'path',
'querystring',
'timers',
'tls',
'url',
'zlib'
]
let sandbox: any = { $input: input }
sandbox['$vars'] = prepareSandboxVars(variables)
sandbox['$flow'] = flow
if (Object.keys(inputVars).length) {
for (const item in inputVars) {
let value = inputVars[item]
if (typeof value === 'string') {
value = handleEscapeCharacters(value, true)
}
sandbox[`$${item}`] = value
}
}
const builtinDeps = process.env.TOOL_FUNCTION_BUILTIN_DEP
? defaultAllowBuiltInDep.concat(process.env.TOOL_FUNCTION_BUILTIN_DEP.split(','))
@ -111,7 +131,8 @@ class CustomFunction_Utilities implements INode {
const vm = new NodeVM(nodeVMOptions)
try {
const response = await vm.run(`module.exports = async function() {${javascriptFunction}}()`, __dirname)
if (typeof response === 'string') {
if (typeof response === 'string' && !isEndingNode) {
return handleEscapeCharacters(response, false)
}
return response
@ -119,6 +140,10 @@ class CustomFunction_Utilities implements INode {
throw new Error(e)
}
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
return await this.init(nodeData, input, { ...options, isRun: true })
}
}
module.exports = { nodeClass: CustomFunction_Utilities }

View File

@ -1,6 +1,7 @@
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { ICommonObject, IDatabaseEntity, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { NodeVM } from 'vm2'
import { availableDependencies } from '../../../src/utils'
import { DataSource } from 'typeorm'
import { availableDependencies, defaultAllowBuiltInDep, getVars, handleEscapeCharacters, prepareSandboxVars } from '../../../src/utils'
class IfElseFunction_Utilities implements INode {
label: string
@ -73,10 +74,20 @@ class IfElseFunction_Utilities implements INode {
]
}
async init(nodeData: INodeData, input: string): Promise<any> {
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
const ifFunction = nodeData.inputs?.ifFunction as string
const elseFunction = nodeData.inputs?.elseFunction as string
const functionInputVariablesRaw = nodeData.inputs?.functionInputVariables
const appDataSource = options.appDataSource as DataSource
const databaseEntities = options.databaseEntities as IDatabaseEntity
const variables = await getVars(appDataSource, databaseEntities, nodeData)
const flow = {
chatflowId: options.chatflowid,
sessionId: options.sessionId,
chatId: options.chatId,
input
}
let inputVars: ICommonObject = {}
if (functionInputVariablesRaw) {
@ -84,34 +95,35 @@ class IfElseFunction_Utilities implements INode {
inputVars =
typeof functionInputVariablesRaw === 'object' ? functionInputVariablesRaw : JSON.parse(functionInputVariablesRaw)
} catch (exception) {
throw new Error("Invalid JSON in the PromptTemplate's promptValues: " + exception)
throw new Error("Invalid JSON in the IfElse's Input Variables: " + exception)
}
}
// Some values might be a stringified JSON, parse it
for (const key in inputVars) {
if (typeof inputVars[key] === 'string' && inputVars[key].startsWith('{') && inputVars[key].endsWith('}')) {
try {
inputVars[key] = JSON.parse(inputVars[key])
} catch (e) {
continue
}
}
}
let sandbox: any = { $input: input }
sandbox['$vars'] = prepareSandboxVars(variables)
sandbox['$flow'] = flow
if (Object.keys(inputVars).length) {
for (const item in inputVars) {
sandbox[`$${item}`] = inputVars[item]
let value = inputVars[item]
if (typeof value === 'string') {
value = handleEscapeCharacters(value, true)
}
sandbox[`$${item}`] = value
}
}
const defaultAllowBuiltInDep = [
'assert',
'buffer',
'crypto',
'events',
'http',
'https',
'net',
'path',
'querystring',
'timers',
'tls',
'url',
'zlib'
]
const builtinDeps = process.env.TOOL_FUNCTION_BUILTIN_DEP
? defaultAllowBuiltInDep.concat(process.env.TOOL_FUNCTION_BUILTIN_DEP.split(','))
: defaultAllowBuiltInDep

View File

@ -0,0 +1,40 @@
import { INode, INodeParams } from '../../../src/Interface'
class StickyNote implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs: INodeParams[]
constructor() {
this.label = 'Sticky Note'
this.name = 'stickyNote'
this.version = 1.0
this.type = 'StickyNote'
this.icon = 'stickyNote.svg'
this.category = 'Utilities'
this.description = 'Add a sticky note'
this.inputs = [
{
label: '',
name: 'note',
type: 'string',
rows: 1,
placeholder: 'Type something here',
optional: true
}
]
this.baseClasses = [this.type]
}
async init(): Promise<any> {
return new StickyNote()
}
}
module.exports = { nodeClass: StickyNote }

View File

@ -0,0 +1,5 @@
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor"
stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
<path d="M15.5 3H5a2 2 0 0 0-2 2v14c0 1.1.9 2 2 2h14a2 2 0 0 0 2-2V8.5L15.5 3Z"/>
<path d="M15 3v6h6"/>
</svg>

After

Width:  |  Height:  |  Size: 305 B

View File

@ -4,6 +4,7 @@ import { Document } from 'langchain/document'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData } from '../../../src/utils'
import { AstraDBVectorStore, AstraLibArgs } from '@langchain/community/vectorstores/astradb'
import { addMMRInputParams, resolveVectorStoreOrRetriever } from '../VectorStoreUtils'
class Astra_VectorStores implements INode {
label: string
@ -26,7 +27,7 @@ class Astra_VectorStores implements INode {
this.type = 'Astra'
this.icon = 'astra.svg'
this.category = 'Vector Stores'
this.description = `Upsert embedded data and perform similarity search upon query using DataStax Astra DB, a serverless vector database thats perfect for managing mission-critical AI workloads`
this.description = `Upsert embedded data and perform similarity or mmr search upon query using DataStax Astra DB, a serverless vector database thats perfect for managing mission-critical AI workloads`
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
@ -74,6 +75,7 @@ class Astra_VectorStores implements INode {
optional: true
}
]
addMMRInputParams(this.inputs)
this.outputs = [
{
label: 'Astra Retriever',
@ -139,9 +141,6 @@ class Astra_VectorStores implements INode {
const embeddings = nodeData.inputs?.embeddings as Embeddings
const vectorDimension = nodeData.inputs?.vectorDimension as number
const similarityMetric = nodeData.inputs?.similarityMetric as 'cosine' | 'euclidean' | 'dot_product' | undefined
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
@ -176,14 +175,7 @@ class Astra_VectorStores implements INode {
const vectorStore = await AstraDBVectorStore.fromExistingIndex(embeddings, astraConfig)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
return resolveVectorStoreOrRetriever(nodeData, vectorStore)
}
}

View File

@ -5,6 +5,7 @@ import { Embeddings } from 'langchain/embeddings/base'
import { Document } from 'langchain/document'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { addMMRInputParams, resolveVectorStoreOrRetriever } from '../VectorStoreUtils'
class MongoDBAtlas_VectorStores implements INode {
label: string
@ -24,7 +25,7 @@ class MongoDBAtlas_VectorStores implements INode {
this.label = 'MongoDB Atlas'
this.name = 'mongoDBAtlas'
this.version = 1.0
this.description = `Upsert embedded data and perform similarity search upon query using MongoDB Atlas, a managed cloud mongodb database`
this.description = `Upsert embedded data and perform similarity or mmr search upon query using MongoDB Atlas, a managed cloud mongodb database`
this.type = 'MongoDB Atlas'
this.icon = 'mongodb.svg'
this.category = 'Vector Stores'
@ -95,6 +96,7 @@ class MongoDBAtlas_VectorStores implements INode {
optional: true
}
]
addMMRInputParams(this.inputs)
this.outputs = [
{
label: 'MongoDB Retriever',
@ -162,9 +164,6 @@ class MongoDBAtlas_VectorStores implements INode {
let textKey = nodeData.inputs?.textKey as string
let embeddingKey = nodeData.inputs?.embeddingKey as string
const embeddings = nodeData.inputs?.embeddings as Embeddings
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const output = nodeData.outputs?.output as string
let mongoDBConnectUrl = getCredentialParam('mongoDBConnectUrl', credentialData, nodeData)
@ -181,13 +180,7 @@ class MongoDBAtlas_VectorStores implements INode {
embeddingKey
})
if (output === 'retriever') {
return vectorStore.asRetriever(k)
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
return resolveVectorStoreOrRetriever(nodeData, vectorStore)
}
}

View File

@ -5,6 +5,7 @@ import { Embeddings } from 'langchain/embeddings/base'
import { Document } from 'langchain/document'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { addMMRInputParams, resolveVectorStoreOrRetriever } from '../VectorStoreUtils'
class Pinecone_VectorStores implements INode {
label: string
@ -23,11 +24,11 @@ class Pinecone_VectorStores implements INode {
constructor() {
this.label = 'Pinecone'
this.name = 'pinecone'
this.version = 1.0
this.version = 2.0
this.type = 'Pinecone'
this.icon = 'pinecone.svg'
this.category = 'Vector Stores'
this.description = `Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database`
this.description = `Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database`
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
@ -79,6 +80,7 @@ class Pinecone_VectorStores implements INode {
optional: true
}
]
addMMRInputParams(this.inputs)
this.outputs = [
{
label: 'Pinecone Retriever',
@ -103,11 +105,9 @@ class Pinecone_VectorStores implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData)
const pineconeEnv = getCredentialParam('pineconeEnv', credentialData, nodeData)
const client = new Pinecone({
apiKey: pineconeApiKey,
environment: pineconeEnv
apiKey: pineconeApiKey
})
const pineconeIndex = client.Index(index)
@ -140,17 +140,12 @@ class Pinecone_VectorStores implements INode {
const pineconeMetadataFilter = nodeData.inputs?.pineconeMetadataFilter
const docs = nodeData.inputs?.document as Document[]
const embeddings = nodeData.inputs?.embeddings as Embeddings
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData)
const pineconeEnv = getCredentialParam('pineconeEnv', credentialData, nodeData)
const client = new Pinecone({
apiKey: pineconeApiKey,
environment: pineconeEnv
apiKey: pineconeApiKey
})
const pineconeIndex = client.Index(index)
@ -175,14 +170,7 @@ class Pinecone_VectorStores implements INode {
const vectorStore = await PineconeStore.fromExistingIndex(embeddings, obj)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
return resolveVectorStoreOrRetriever(nodeData, vectorStore)
}
}

View File

@ -95,11 +95,9 @@ class Pinecone_Existing_VectorStores implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData)
const pineconeEnv = getCredentialParam('pineconeEnv', credentialData, nodeData)
const client = new Pinecone({
apiKey: pineconeApiKey,
environment: pineconeEnv
apiKey: pineconeApiKey
})
const pineconeIndex = client.Index(index)

View File

@ -96,11 +96,9 @@ class PineconeUpsert_VectorStores implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData)
const pineconeEnv = getCredentialParam('pineconeEnv', credentialData, nodeData)
const client = new Pinecone({
apiKey: pineconeApiKey,
environment: pineconeEnv
apiKey: pineconeApiKey
})
const pineconeIndex = client.Index(index)

View File

@ -194,7 +194,7 @@ class Qdrant_VectorStores implements INode {
const qdrantVectorDimension = nodeData.inputs?.qdrantVectorDimension
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
let queryFilter = nodeData.inputs?.queryFilter
let queryFilter = nodeData.inputs?.qdrantFilter
const k = topK ? parseFloat(topK) : 4

View File

@ -135,7 +135,7 @@ class Qdrant_Existing_VectorStores implements INode {
const qdrantVectorDimension = nodeData.inputs?.qdrantVectorDimension
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
let queryFilter = nodeData.inputs?.queryFilter
let queryFilter = nodeData.inputs?.qdrantFilter
const k = topK ? parseFloat(topK) : 4

View File

@ -1,5 +1,5 @@
import { flatten } from 'lodash'
import { createClient, SearchOptions } from 'redis'
import { flatten, isEqual } from 'lodash'
import { createClient, SearchOptions, RedisClientOptions } from 'redis'
import { Embeddings } from 'langchain/embeddings/base'
import { RedisVectorStore, RedisVectorStoreConfig } from 'langchain/vectorstores/redis'
import { Document } from 'langchain/document'
@ -7,6 +7,27 @@ import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { escapeAllStrings, escapeSpecialChars, unEscapeSpecialChars } from './utils'
let redisClientSingleton: ReturnType<typeof createClient>
let redisClientOption: RedisClientOptions
const getRedisClient = async (option: RedisClientOptions) => {
if (!redisClientSingleton) {
// if client doesn't exists
redisClientSingleton = createClient(option)
await redisClientSingleton.connect()
redisClientOption = option
return redisClientSingleton
} else if (redisClientSingleton && !isEqual(option, redisClientOption)) {
// if client exists but option changed
redisClientSingleton.quit()
redisClientSingleton = createClient(option)
await redisClientSingleton.connect()
redisClientOption = option
return redisClientSingleton
}
return redisClientSingleton
}
class Redis_VectorStores implements INode {
label: string
name: string
@ -149,8 +170,7 @@ class Redis_VectorStores implements INode {
}
try {
const redisClient = createClient({ url: redisUrl })
await redisClient.connect()
const redisClient = await getRedisClient({ url: redisUrl })
const storeConfig: RedisVectorStoreConfig = {
redisClient: redisClient,
@ -210,8 +230,7 @@ class Redis_VectorStores implements INode {
redisUrl = 'redis://' + username + ':' + password + '@' + host + ':' + portStr
}
const redisClient = createClient({ url: redisUrl })
await redisClient.connect()
const redisClient = await getRedisClient({ url: redisUrl })
const storeConfig: RedisVectorStoreConfig = {
redisClient: redisClient,

View File

@ -7,13 +7,34 @@ import {
INodeOutputsValue,
INodeParams
} from '../../../src'
import { Embeddings } from 'langchain/embeddings/base'
import { VectorStore } from 'langchain/vectorstores/base'
import { Document } from 'langchain/document'
import { createClient, SearchOptions } from 'redis'
import { createClient, SearchOptions, RedisClientOptions } from 'redis'
import { RedisVectorStore } from 'langchain/vectorstores/redis'
import { escapeSpecialChars, unEscapeSpecialChars } from './utils'
import { isEqual } from 'lodash'
let redisClientSingleton: ReturnType<typeof createClient>
let redisClientOption: RedisClientOptions
const getRedisClient = async (option: RedisClientOptions) => {
if (!redisClientSingleton) {
// if client doesn't exists
redisClientSingleton = createClient(option)
await redisClientSingleton.connect()
redisClientOption = option
return redisClientSingleton
} else if (redisClientSingleton && !isEqual(option, redisClientOption)) {
// if client exists but option changed
redisClientSingleton.quit()
redisClientSingleton = createClient(option)
await redisClientSingleton.connect()
redisClientOption = option
return redisClientSingleton
}
return redisClientSingleton
}
export abstract class RedisSearchBase {
label: string
@ -141,8 +162,7 @@ export abstract class RedisSearchBase {
redisUrl = 'redis://' + username + ':' + password + '@' + host + ':' + portStr
}
this.redisClient = createClient({ url: redisUrl })
await this.redisClient.connect()
this.redisClient = await getRedisClient({ url: redisUrl })
const vectorStore = await this.constructVectorStore(embeddings, indexName, replaceIndex, docs)
if (!contentKey || contentKey === '') contentKey = 'content'

View File

@ -3,7 +3,6 @@ import { Embeddings } from 'langchain/embeddings/base'
import { VectorStore } from 'langchain/vectorstores/base'
import { RedisVectorStore, RedisVectorStoreConfig } from 'langchain/vectorstores/redis'
import { Document } from 'langchain/document'
import { RedisSearchBase } from './RedisSearchBase'
class RedisExisting_VectorStores extends RedisSearchBase implements INode {

View File

@ -1,7 +1,6 @@
import { ICommonObject, INode, INodeData } from '../../../src/Interface'
import { Embeddings } from 'langchain/embeddings/base'
import { Document } from 'langchain/document'
import { flatten } from 'lodash'
import { RedisSearchBase } from './RedisSearchBase'
import { VectorStore } from 'langchain/vectorstores/base'

View File

@ -5,6 +5,7 @@ import { Embeddings } from 'langchain/embeddings/base'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { SupabaseLibArgs, SupabaseVectorStore } from 'langchain/vectorstores/supabase'
import { addMMRInputParams, resolveVectorStoreOrRetriever } from '../VectorStoreUtils'
class Supabase_VectorStores implements INode {
label: string
@ -23,11 +24,11 @@ class Supabase_VectorStores implements INode {
constructor() {
this.label = 'Supabase'
this.name = 'supabase'
this.version = 1.0
this.version = 2.0
this.type = 'Supabase'
this.icon = 'supabase.svg'
this.category = 'Vector Stores'
this.description = 'Upsert embedded data and perform similarity search upon query using Supabase via pgvector extension'
this.description = 'Upsert embedded data and perform similarity or mmr search upon query using Supabase via pgvector extension'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
@ -81,6 +82,7 @@ class Supabase_VectorStores implements INode {
optional: true
}
]
addMMRInputParams(this.inputs)
this.outputs = [
{
label: 'Supabase Retriever',
@ -135,9 +137,6 @@ class Supabase_VectorStores implements INode {
const queryName = nodeData.inputs?.queryName as string
const embeddings = nodeData.inputs?.embeddings as Embeddings
const supabaseMetadataFilter = nodeData.inputs?.supabaseMetadataFilter
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const supabaseApiKey = getCredentialParam('supabaseApiKey', credentialData, nodeData)
@ -157,14 +156,7 @@ class Supabase_VectorStores implements INode {
const vectorStore = await SupabaseVectorStore.fromExistingIndex(embeddings, obj)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
return resolveVectorStoreOrRetriever(nodeData, vectorStore)
}
}

View File

@ -0,0 +1,75 @@
import { INodeData } from '../../src'
export const resolveVectorStoreOrRetriever = (nodeData: INodeData, vectorStore: any) => {
const output = nodeData.outputs?.output as string
const searchType = nodeData.outputs?.searchType as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
if (output === 'retriever') {
if ('mmr' === searchType) {
const fetchK = nodeData.inputs?.fetchK as string
const lambda = nodeData.inputs?.lambda as string
const f = fetchK ? parseInt(fetchK) : 20
const l = lambda ? parseFloat(lambda) : 0.5
return vectorStore.asRetriever({
searchType: 'mmr',
k: k,
searchKwargs: {
fetchK: f,
lambda: l
}
})
} else {
// "searchType" is "similarity"
return vectorStore.asRetriever(k)
}
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
}
export const addMMRInputParams = (inputs: any[]) => {
const mmrInputParams = [
{
label: 'Search Type',
name: 'searchType',
type: 'options',
default: 'similarity',
options: [
{
label: 'Similarity',
name: 'similarity'
},
{
label: 'Max Marginal Relevance',
name: 'mmr'
}
],
additionalParams: true,
optional: true
},
{
label: 'Fetch K (for MMR Search)',
name: 'fetchK',
description: 'Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR',
placeholder: '20',
type: 'number',
additionalParams: true,
optional: true
},
{
label: 'Lambda (for MMR Search)',
name: 'lambda',
description:
'Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR',
placeholder: '0.5',
type: 'number',
additionalParams: true,
optional: true
}
]
inputs.push(...mmrInputParams)
}

View File

@ -5,6 +5,7 @@ import { Document } from 'langchain/document'
import { Embeddings } from 'langchain/embeddings/base'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { addMMRInputParams, resolveVectorStoreOrRetriever } from '../VectorStoreUtils'
class Weaviate_VectorStores implements INode {
label: string
@ -23,12 +24,12 @@ class Weaviate_VectorStores implements INode {
constructor() {
this.label = 'Weaviate'
this.name = 'weaviate'
this.version = 1.0
this.version = 2.0
this.type = 'Weaviate'
this.icon = 'weaviate.png'
this.category = 'Vector Stores'
this.description =
'Upsert embedded data and perform similarity search upon query using Weaviate, a scalable open-source vector database'
'Upsert embedded data and perform similarity or mmr search using Weaviate, a scalable open-source vector database'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
@ -107,6 +108,7 @@ class Weaviate_VectorStores implements INode {
optional: true
}
]
addMMRInputParams(this.inputs)
this.outputs = [
{
label: 'Weaviate Retriever',
@ -174,9 +176,6 @@ class Weaviate_VectorStores implements INode {
const weaviateTextKey = nodeData.inputs?.weaviateTextKey as string
const weaviateMetadataKeys = nodeData.inputs?.weaviateMetadataKeys as string
const embeddings = nodeData.inputs?.embeddings as Embeddings
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const weaviateApiKey = getCredentialParam('weaviateApiKey', credentialData, nodeData)
@ -199,14 +198,7 @@ class Weaviate_VectorStores implements INode {
const vectorStore = await WeaviateStore.fromExistingIndex(embeddings, obj)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
return resolveVectorStoreOrRetriever(nodeData, vectorStore)
}
}

View File

@ -5,6 +5,7 @@ import { Embeddings } from 'langchain/embeddings/base'
import { Document } from 'langchain/document'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { addMMRInputParams, resolveVectorStoreOrRetriever } from '../VectorStoreUtils'
class Zep_VectorStores implements INode {
label: string
@ -23,12 +24,12 @@ class Zep_VectorStores implements INode {
constructor() {
this.label = 'Zep'
this.name = 'zep'
this.version = 1.0
this.version = 2.0
this.type = 'Zep'
this.icon = 'zep.svg'
this.category = 'Vector Stores'
this.description =
'Upsert embedded data and perform similarity search upon query using Zep, a fast and scalable building block for LLM apps'
'Upsert embedded data and perform similarity or mmr search upon query using Zep, a fast and scalable building block for LLM apps'
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
@ -88,6 +89,7 @@ class Zep_VectorStores implements INode {
optional: true
}
]
addMMRInputParams(this.inputs)
this.outputs = [
{
label: 'Zep Retriever',
@ -144,9 +146,6 @@ class Zep_VectorStores implements INode {
const zepMetadataFilter = nodeData.inputs?.zepMetadataFilter
const dimension = nodeData.inputs?.dimension as number
const embeddings = nodeData.inputs?.embeddings as Embeddings
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const apiKey = getCredentialParam('apiKey', credentialData, nodeData)
@ -165,14 +164,7 @@ class Zep_VectorStores implements INode {
const vectorStore = await ZepExistingVS.fromExistingIndex(embeddings, zepConfig)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
return resolveVectorStoreOrRetriever(nodeData, vectorStore)
}
}
@ -210,7 +202,7 @@ class ZepExistingVS extends ZepVectorStore {
this.args = args
}
async initalizeCollection(args: IZepConfig & Partial<ZepFilter>) {
async initializeCollection(args: IZepConfig & Partial<ZepFilter>) {
this.client = await ZepClient.init(args.apiUrl, args.apiKey)
try {
this.collection = await this.client.document.getCollection(args.collectionName)
@ -259,7 +251,7 @@ class ZepExistingVS extends ZepVectorStore {
const newfilter = {
where: { and: ANDFilters }
}
await this.initalizeCollection(this.args!).catch((err) => {
await this.initializeCollection(this.args!).catch((err) => {
console.error('Error initializing collection:', err)
throw err
})

View File

@ -1,6 +1,6 @@
{
"name": "flowise-components",
"version": "1.5.0",
"version": "1.5.3",
"description": "Flowiseai Components",
"main": "dist/src/index",
"types": "dist/src/index.d.ts",
@ -26,18 +26,19 @@
"@gomomento/sdk": "^1.51.1",
"@gomomento/sdk-core": "^1.51.1",
"@google-ai/generativelanguage": "^0.2.1",
"@google/generative-ai": "^0.1.3",
"@huggingface/inference": "^2.6.1",
"@langchain/community": "^0.0.16",
"@langchain/google-genai": "^0.0.6",
"@langchain/mistralai": "^0.0.6",
"@notionhq/client": "^2.2.8",
"@opensearch-project/opensearch": "^1.2.0",
"@pinecone-database/pinecone": "^1.1.1",
"@pinecone-database/pinecone": "^2.0.1",
"@qdrant/js-client-rest": "^1.2.2",
"@supabase/supabase-js": "^2.29.0",
"@types/js-yaml": "^4.0.5",
"@types/jsdom": "^21.1.1",
"@upstash/redis": "^1.22.1",
"@upstash/redis": "1.22.1",
"@zilliz/milvus2-sdk-node": "^2.2.24",
"apify-client": "^2.7.1",
"assemblyai": "^4.2.2",

View File

@ -29,6 +29,12 @@ export interface ICommonObject {
[key: string]: any | CommonType | ICommonObject | CommonType[] | ICommonObject[]
}
export interface IVariable {
name: string
value: string
type: string
}
export type IDatabaseEntity = {
[key: string]: any
}
@ -90,7 +96,7 @@ export interface INodeProperties {
type: string
icon: string
version: number
category: string
category: string // TODO: use enum instead of string
baseClasses: string[]
description?: string
filePath?: string
@ -139,6 +145,18 @@ export interface IUsedTool {
toolOutput: string | object
}
export interface IFileUpload {
data?: string
type: string
name: string
mime: string
}
export interface IMultiModalOption {
image?: Record<string, any>
audio?: Record<string, any>
}
/**
* Classes
*/
@ -234,10 +252,3 @@ export abstract class FlowiseSummaryMemory extends ConversationSummaryMemory imp
abstract addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId?: string): Promise<void>
abstract clearChatMessages(overrideSessionId?: string): Promise<void>
}
export interface IFileUpload {
data: string
type: string
name: string
mime: string
}

View File

@ -1,54 +1,57 @@
import { ICommonObject, INodeData } from './Interface'
import { ICommonObject, IFileUpload, IMultiModalOption, INodeData } from './Interface'
import { BaseChatModel } from 'langchain/chat_models/base'
import { ChatOpenAI } from 'langchain/chat_models/openai'
import { ChatOpenAI as LangchainChatOpenAI } from 'langchain/chat_models/openai'
import path from 'path'
import { getUserHome } from './utils'
import { getStoragePath } from './utils'
import fs from 'fs'
import { MessageContent } from '@langchain/core/dist/messages'
import { FlowiseChatOpenAI } from '../nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { ChatOpenAI } from '../nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI'
export const injectChainNodeData = (nodeData: INodeData, options: ICommonObject) => {
let model = nodeData.inputs?.model as BaseChatModel
if (model instanceof FlowiseChatOpenAI) {
if (model instanceof ChatOpenAI) {
// TODO: this should not be static, need to figure out how to pass the nodeData and options to the invoke method
FlowiseChatOpenAI.chainNodeOptions = options
FlowiseChatOpenAI.chainNodeData = nodeData
ChatOpenAI.chainNodeOptions = options
ChatOpenAI.chainNodeData = nodeData
}
}
export const addImagesToMessages = (nodeData: INodeData, options: ICommonObject): MessageContent => {
export const addImagesToMessages = (nodeData: INodeData, options: ICommonObject, multiModalOption?: IMultiModalOption): MessageContent => {
const imageContent: MessageContent = []
let model = nodeData.inputs?.model as BaseChatModel
if (model instanceof ChatOpenAI && (model as any).multiModal) {
if (options?.uploads && options?.uploads.length > 0) {
let model = nodeData.inputs?.model
if (model instanceof LangchainChatOpenAI && multiModalOption) {
// Image Uploaded
if (multiModalOption.image && multiModalOption.image.allowImageUploads && options?.uploads && options?.uploads.length > 0) {
const imageUploads = getImageUploads(options.uploads)
for (const upload of imageUploads) {
let bf = upload.data
if (upload.type == 'stored-file') {
const filePath = path.join(getUserHome(), '.flowise', 'gptvision', upload.data, upload.name)
const filePath = path.join(getStoragePath(), options.chatflowid, options.chatId, upload.name)
// as the image is stored in the server, read the file and convert it to base64
const contents = fs.readFileSync(filePath)
bf = 'data:' + upload.mime + ';base64,' + contents.toString('base64')
imageContent.push({
type: 'image_url',
image_url: {
url: bf,
detail: multiModalOption.image.imageResolution ?? 'low'
}
})
}
imageContent.push({
type: 'image_url',
image_url: {
url: bf,
detail: 'low'
}
})
}
}
}
return imageContent
}
export const getAudioUploads = (uploads: any[]) => {
return uploads.filter((url: any) => url.mime.startsWith('audio/'))
export const getAudioUploads = (uploads: IFileUpload[]) => {
return uploads.filter((upload: IFileUpload) => upload.mime.startsWith('audio/'))
}
export const getImageUploads = (uploads: any[]) => {
return uploads.filter((url: any) => url.mime.startsWith('image/'))
export const getImageUploads = (uploads: IFileUpload[]) => {
return uploads.filter((upload: IFileUpload) => upload.mime.startsWith('image/'))
}

View File

@ -1,5 +1,6 @@
import { flatten } from 'lodash'
import { AgentExecutorInput, BaseSingleActionAgent, BaseMultiActionAgent, RunnableAgent, StoppingMethod } from 'langchain/agents'
import { ChainValues, AgentStep, AgentFinish, AgentAction, BaseMessage, FunctionMessage, AIMessage } from 'langchain/schema'
import { ChainValues, AgentStep, AgentAction, BaseMessage, FunctionMessage, AIMessage } from 'langchain/schema'
import { OutputParserException } from 'langchain/schema/output_parser'
import { CallbackManager, CallbackManagerForChainRun, Callbacks } from 'langchain/callbacks'
import { ToolInputParsingException, Tool } from '@langchain/core/tools'
@ -7,6 +8,11 @@ import { Runnable } from 'langchain/schema/runnable'
import { BaseChain, SerializedLLMChain } from 'langchain/chains'
import { Serializable } from '@langchain/core/load/serializable'
export const SOURCE_DOCUMENTS_PREFIX = '\n\n----FLOWISE_SOURCE_DOCUMENTS----\n\n'
type AgentFinish = {
returnValues: Record<string, any>
log: string
}
type AgentExecutorOutput = ChainValues
interface AgentExecutorIteratorInput {
@ -315,10 +321,12 @@ export class AgentExecutor extends BaseChain<ChainValues, AgentExecutorOutput> {
const steps: AgentStep[] = []
let iterations = 0
let sourceDocuments: Array<Document> = []
const getOutput = async (finishStep: AgentFinish): Promise<AgentExecutorOutput> => {
const { returnValues } = finishStep
const additional = await this.agent.prepareForOutput(returnValues, steps)
if (sourceDocuments.length) additional.sourceDocuments = flatten(sourceDocuments)
if (this.returnIntermediateSteps) {
return { ...returnValues, intermediateSteps: steps, ...additional }
@ -406,6 +414,17 @@ export class AgentExecutor extends BaseChain<ChainValues, AgentExecutorOutput> {
return { action, observation: observation ?? '' }
}
}
if (observation?.includes(SOURCE_DOCUMENTS_PREFIX)) {
const observationArray = observation.split(SOURCE_DOCUMENTS_PREFIX)
observation = observationArray[0]
const docs = observationArray[1]
try {
const parsedDocs = JSON.parse(docs)
sourceDocuments.push(parsedDocs)
} catch (e) {
console.error('Error parsing source documents from tool')
}
}
return { action, observation: observation ?? '' }
})
)
@ -500,6 +519,10 @@ export class AgentExecutor extends BaseChain<ChainValues, AgentExecutorOutput> {
chatId: this.chatId,
input: this.input
})
if (observation?.includes(SOURCE_DOCUMENTS_PREFIX)) {
const observationArray = observation.split(SOURCE_DOCUMENTS_PREFIX)
observation = observationArray[0]
}
} catch (e) {
if (e instanceof ToolInputParsingException) {
if (this.handleParsingErrors === true) {

View File

@ -1,17 +1,16 @@
import { ICommonObject } from './Interface'
import { getCredentialData, getUserHome } from './utils'
import { ICommonObject, IFileUpload } from './Interface'
import { getCredentialData, getStoragePath } from './utils'
import { type ClientOptions, OpenAIClient } from '@langchain/openai'
import fs from 'fs'
import path from 'path'
import { AssemblyAI } from 'assemblyai'
export const convertSpeechToText = async (upload: any, speechToTextConfig: any, options: ICommonObject) => {
export const convertSpeechToText = async (upload: IFileUpload, speechToTextConfig: ICommonObject, options: ICommonObject) => {
if (speechToTextConfig) {
const credentialId = speechToTextConfig.credentialId as string
const credentialData = await getCredentialData(credentialId ?? '', options)
const filePath = path.join(getUserHome(), '.flowise', 'gptvision', upload.data, upload.name)
const filePath = path.join(getStoragePath(), options.chatflowid, options.chatId, upload.name)
// as the image is stored in the server, read the file and convert it to base64
const audio_file = fs.createReadStream(filePath)
if (speechToTextConfig.name === 'openAIWhisper') {

View File

@ -5,7 +5,7 @@ import * as path from 'path'
import { JSDOM } from 'jsdom'
import { z } from 'zod'
import { DataSource } from 'typeorm'
import { ICommonObject, IDatabaseEntity, IMessage, INodeData } from './Interface'
import { ICommonObject, IDatabaseEntity, IMessage, INodeData, IVariable } from './Interface'
import { AES, enc } from 'crypto-js'
import { ChatMessageHistory } from 'langchain/memory'
import { AIMessage, HumanMessage, BaseMessage } from 'langchain/schema'
@ -70,6 +70,22 @@ export const availableDependencies = [
'weaviate-ts-client'
]
export const defaultAllowBuiltInDep = [
'assert',
'buffer',
'crypto',
'events',
'http',
'https',
'net',
'path',
'querystring',
'timers',
'tls',
'url',
'zlib'
]
/**
* Get base classes of components
*
@ -673,3 +689,79 @@ export const convertBaseMessagetoIMessage = (messages: BaseMessage[]): IMessage[
}
return formatmessages
}
/**
* Convert MultiOptions String to String Array
* @param {string} inputString
* @returns {string[]}
*/
export const convertMultiOptionsToStringArray = (inputString: string): string[] => {
let ArrayString: string[] = []
try {
ArrayString = JSON.parse(inputString)
} catch (e) {
ArrayString = []
}
return ArrayString
}
/**
* Get variables
* @param {DataSource} appDataSource
* @param {IDatabaseEntity} databaseEntities
* @param {INodeData} nodeData
*/
export const getVars = async (appDataSource: DataSource, databaseEntities: IDatabaseEntity, nodeData: INodeData) => {
const variables = ((await appDataSource.getRepository(databaseEntities['Variable']).find()) as IVariable[]) ?? []
// override variables defined in overrideConfig
// nodeData.inputs.variables is an Object, check each property and override the variable
if (nodeData?.inputs?.vars) {
for (const propertyName of Object.getOwnPropertyNames(nodeData.inputs.vars)) {
const foundVar = variables.find((v) => v.name === propertyName)
if (foundVar) {
// even if the variable was defined as runtime, we override it with static value
foundVar.type = 'static'
foundVar.value = nodeData.inputs.vars[propertyName]
} else {
// add it the variables, if not found locally in the db
variables.push({ name: propertyName, type: 'static', value: nodeData.inputs.vars[propertyName] })
}
}
}
return variables
}
/**
* Prepare sandbox variables
* @param {IVariable[]} variables
*/
export const prepareSandboxVars = (variables: IVariable[]) => {
let vars = {}
if (variables) {
for (const item of variables) {
let value = item.value
// read from .env file
if (item.type === 'runtime') {
value = process.env[item.name] ?? ''
}
Object.defineProperty(vars, item.name, {
enumerable: true,
configurable: true,
writable: true,
value: value
})
}
}
return vars
}
/**
* Prepare storage path
*/
export const getStoragePath = (): string => {
return process.env.BLOB_STORAGE_PATH ? path.join(process.env.BLOB_STORAGE_PATH) : path.join(getUserHome(), '.flowise', 'storage')
}

View File

@ -1,4 +1,6 @@
PORT=3000
# CORS_ORIGINS="*"
# IFRAME_ORIGINS="*"
# DATABASE_PATH=/your_database_path/.flowise
# APIKEY_PATH=/your_api_key_path/.flowise
# SECRETKEY_PATH=/your_api_key_path/.flowise
@ -13,6 +15,7 @@ PORT=3000
# DATABASE_USER=""
# DATABASE_PASSWORD=""
# DATABASE_SSL=true
# DATABASE_SSL_KEY_BASE64=<Self signed certificate in BASE64>
# FLOWISE_USERNAME=user
# FLOWISE_PASSWORD=1234
@ -26,3 +29,5 @@ PORT=3000
# LANGCHAIN_ENDPOINT=https://api.smith.langchain.com
# LANGCHAIN_API_KEY=your_api_key
# LANGCHAIN_PROJECT=your_project
# DISABLE_FLOWISE_TELEMETRY=true

View File

@ -88,7 +88,7 @@
"data": {
"id": "chatOpenAI_1",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -111,6 +111,22 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
@ -127,6 +143,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"
@ -407,7 +427,7 @@
"data": {
"id": "chatOpenAI_2",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -430,6 +450,22 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
@ -446,6 +482,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"

View File

@ -396,7 +396,7 @@
"data": {
"id": "chatOpenAI_2",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -419,6 +419,22 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
@ -435,6 +451,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"
@ -567,7 +587,7 @@
"data": {
"id": "chatOpenAI_1",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -590,6 +610,22 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
@ -606,6 +642,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"
@ -738,7 +778,7 @@
"data": {
"id": "chatOpenAI_3",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -761,6 +801,22 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
@ -777,6 +833,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"

View File

@ -175,7 +175,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -198,6 +198,22 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
@ -214,6 +230,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"
@ -381,13 +401,23 @@
"type": "BaseLLMOutputParser",
"optional": true,
"id": "llmChain_0-input-outputParser-BaseLLMOutputParser"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "llmChain_0-input-inputModeration-Moderation"
}
],
"inputs": {
"model": "{{chatOpenAI_0.data.instance}}",
"prompt": "{{fewShotPromptTemplate_1.data.instance}}",
"outputParser": "",
"chainName": ""
"chainName": "",
"inputModeration": ""
},
"outputAnchors": [
{

View File

@ -251,7 +251,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -274,6 +274,22 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
@ -290,6 +306,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"
@ -422,7 +442,7 @@
"data": {
"id": "openAIEmbeddings_0",
"label": "OpenAI Embeddings",
"version": 1,
"version": 2,
"name": "openAIEmbeddings",
"type": "OpenAIEmbeddings",
"baseClasses": ["OpenAIEmbeddings", "Embeddings"],
@ -436,6 +456,28 @@
"credentialNames": ["openAIApi"],
"id": "openAIEmbeddings_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "text-embedding-3-large",
"name": "text-embedding-3-large"
},
{
"label": "text-embedding-3-small",
"name": "text-embedding-3-small"
},
{
"label": "text-embedding-ada-002",
"name": "text-embedding-ada-002"
}
],
"default": "text-embedding-ada-002",
"optional": true,
"id": "openAIEmbeddings_0-input-modelName-options"
},
{
"label": "Strip New Lines",
"name": "stripNewLines",
@ -474,7 +516,8 @@
"stripNewLines": "",
"batchSize": "",
"timeout": "",
"basepath": ""
"basepath": "",
"modelName": "text-embedding-ada-002"
},
"outputAnchors": [
{
@ -506,12 +549,12 @@
"data": {
"id": "pinecone_0",
"label": "Pinecone",
"version": 1,
"version": 2,
"name": "pinecone",
"type": "Pinecone",
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
"category": "Vector Stores",
"description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
"description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
"inputParams": [
{
"label": "Connect Credential",
@ -552,6 +595,45 @@
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-topK-number"
},
{
"label": "Search Type",
"name": "searchType",
"type": "options",
"default": "similarity",
"options": [
{
"label": "Similarity",
"name": "similarity"
},
{
"label": "Max Marginal Relevance",
"name": "mmr"
}
],
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-searchType-options"
},
{
"label": "Fetch K (for MMR Search)",
"name": "fetchK",
"description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
"placeholder": "20",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-fetchK-number"
},
{
"label": "Lambda (for MMR Search)",
"name": "lambda",
"description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
"placeholder": "0.5",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-lambda-number"
}
],
"inputAnchors": [
@ -576,7 +658,10 @@
"pineconeIndex": "",
"pineconeNamespace": "",
"pineconeMetadataFilter": "",
"topK": ""
"topK": "",
"searchType": "similarity",
"fetchK": "",
"lambda": ""
},
"outputAnchors": [
{

View File

@ -77,7 +77,7 @@
"data": {
"id": "openAIEmbeddings_0",
"label": "OpenAI Embeddings",
"version": 1,
"version": 2,
"name": "openAIEmbeddings",
"type": "OpenAIEmbeddings",
"baseClasses": ["OpenAIEmbeddings", "Embeddings"],
@ -91,6 +91,28 @@
"credentialNames": ["openAIApi"],
"id": "openAIEmbeddings_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "text-embedding-3-large",
"name": "text-embedding-3-large"
},
{
"label": "text-embedding-3-small",
"name": "text-embedding-3-small"
},
{
"label": "text-embedding-ada-002",
"name": "text-embedding-ada-002"
}
],
"default": "text-embedding-ada-002",
"optional": true,
"id": "openAIEmbeddings_0-input-modelName-options"
},
{
"label": "Strip New Lines",
"name": "stripNewLines",
@ -129,7 +151,8 @@
"stripNewLines": "",
"batchSize": "",
"timeout": "",
"basepath": ""
"basepath": "",
"modelName": "text-embedding-ada-002"
},
"outputAnchors": [
{
@ -161,12 +184,12 @@
"data": {
"id": "pinecone_0",
"label": "Pinecone",
"version": 1,
"version": 2,
"name": "pinecone",
"type": "Pinecone",
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
"category": "Vector Stores",
"description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
"description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
"inputParams": [
{
"label": "Connect Credential",
@ -207,6 +230,45 @@
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-topK-number"
},
{
"label": "Search Type",
"name": "searchType",
"type": "options",
"default": "similarity",
"options": [
{
"label": "Similarity",
"name": "similarity"
},
{
"label": "Max Marginal Relevance",
"name": "mmr"
}
],
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-searchType-options"
},
{
"label": "Fetch K (for MMR Search)",
"name": "fetchK",
"description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
"placeholder": "20",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-fetchK-number"
},
{
"label": "Lambda (for MMR Search)",
"name": "lambda",
"description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
"placeholder": "0.5",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-lambda-number"
}
],
"inputAnchors": [
@ -231,7 +293,10 @@
"pineconeIndex": "",
"pineconeNamespace": "",
"pineconeMetadataFilter": "",
"topK": ""
"topK": "",
"searchType": "similarity",
"fetchK": "",
"lambda": ""
},
"outputAnchors": [
{
@ -279,7 +344,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -302,6 +367,14 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"

View File

@ -70,7 +70,7 @@
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
"version": 2,
"version": 3,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@ -92,6 +92,22 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
@ -108,6 +124,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"

View File

@ -194,7 +194,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -217,6 +217,22 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
@ -233,6 +249,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"
@ -440,7 +460,7 @@
"data": {
"id": "openAIEmbeddings_0",
"label": "OpenAI Embeddings",
"version": 1,
"version": 2,
"name": "openAIEmbeddings",
"type": "OpenAIEmbeddings",
"baseClasses": ["OpenAIEmbeddings", "Embeddings"],
@ -454,6 +474,28 @@
"credentialNames": ["openAIApi"],
"id": "openAIEmbeddings_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "text-embedding-3-large",
"name": "text-embedding-3-large"
},
{
"label": "text-embedding-3-small",
"name": "text-embedding-3-small"
},
{
"label": "text-embedding-ada-002",
"name": "text-embedding-ada-002"
}
],
"default": "text-embedding-ada-002",
"optional": true,
"id": "openAIEmbeddings_0-input-modelName-options"
},
{
"label": "Strip New Lines",
"name": "stripNewLines",
@ -492,7 +534,8 @@
"stripNewLines": "",
"batchSize": "",
"timeout": "",
"basepath": ""
"basepath": "",
"modelName": "text-embedding-ada-002"
},
"outputAnchors": [
{

View File

@ -215,7 +215,7 @@
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
"version": 2,
"version": 3,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@ -237,6 +237,22 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
@ -253,6 +269,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"

View File

@ -6,15 +6,15 @@
"height": 376,
"id": "bufferMemory_0",
"position": {
"x": 451.4449437285705,
"y": 118.30026803362762
"x": 240.5161028076149,
"y": 165.35849026339048
},
"type": "customNode",
"data": {
"id": "bufferMemory_0",
"label": "Buffer Memory",
"name": "bufferMemory",
"version": 1,
"name": "bufferMemory",
"type": "BufferMemory",
"baseClasses": ["BufferMemory", "BaseChatMemory", "BaseMemory"],
"category": "Memory",
@ -53,8 +53,8 @@
},
"selected": false,
"positionAbsolute": {
"x": 451.4449437285705,
"y": 118.30026803362762
"x": 240.5161028076149,
"y": 165.35849026339048
},
"dragging": false
},
@ -63,17 +63,17 @@
"height": 383,
"id": "conversationChain_0",
"position": {
"x": 1176.1569322079652,
"y": 303.56879146735974
"x": 958.9887390513221,
"y": 318.8734467468765
},
"type": "customNode",
"data": {
"id": "conversationChain_0",
"label": "Conversation Chain",
"version": 3,
"name": "conversationChain",
"version": 1,
"type": "ConversationChain",
"baseClasses": ["ConversationChain", "LLMChain", "BaseChain"],
"baseClasses": ["ConversationChain", "LLMChain", "BaseChain", "Runnable"],
"category": "Chains",
"description": "Chat models specific conversational chain with memory",
"inputParams": [
@ -82,9 +82,11 @@
"name": "systemMessagePrompt",
"type": "string",
"rows": 4,
"description": "If Chat Prompt Template is provided, this will be ignored",
"additionalParams": true,
"optional": true,
"placeholder": "You are a helpful assistant that write codes",
"default": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.",
"placeholder": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.",
"id": "conversationChain_0-input-systemMessagePrompt-string"
}
],
@ -102,27 +104,36 @@
"id": "conversationChain_0-input-memory-BaseMemory"
},
{
"label": "Document",
"name": "document",
"type": "Document",
"description": "Include whole document into the context window",
"label": "Chat Prompt Template",
"name": "chatPromptTemplate",
"type": "ChatPromptTemplate",
"description": "Override existing prompt with Chat Prompt Template. Human Message must includes {input} variable",
"optional": true,
"id": "conversationChain_0-input-chatPromptTemplate-ChatPromptTemplate"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "conversationChain_0-input-document-Document"
"id": "conversationChain_0-input-inputModeration-Moderation"
}
],
"inputs": {
"inputModeration": "",
"model": "{{chatAnthropic_0.data.instance}}",
"memory": "{{bufferMemory_0.data.instance}}",
"document": ["{{pdfFile_0.data.instance}}"],
"systemMessagePrompt": ""
"chatPromptTemplate": "{{chatPromptTemplate_0.data.instance}}",
"systemMessagePrompt": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know."
},
"outputAnchors": [
{
"id": "conversationChain_0-output-conversationChain-ConversationChain|LLMChain|BaseChain",
"id": "conversationChain_0-output-conversationChain-ConversationChain|LLMChain|BaseChain|Runnable",
"name": "conversationChain",
"label": "ConversationChain",
"type": "ConversationChain | LLMChain | BaseChain"
"type": "ConversationChain | LLMChain | BaseChain | Runnable"
}
],
"outputs": {},
@ -130,27 +141,27 @@
},
"selected": false,
"positionAbsolute": {
"x": 1176.1569322079652,
"y": 303.56879146735974
"x": 958.9887390513221,
"y": 318.8734467468765
},
"dragging": false
},
{
"width": 300,
"height": 523,
"height": 574,
"id": "chatAnthropic_0",
"position": {
"x": 800.5525382783799,
"y": -130.7988221837009
"x": 585.3308245972187,
"y": -116.32789506560908
},
"type": "customNode",
"data": {
"id": "chatAnthropic_0",
"label": "ChatAnthropic",
"name": "chatAnthropic",
"version": 3,
"name": "chatAnthropic",
"type": "ChatAnthropic",
"baseClasses": ["ChatAnthropic", "BaseChatModel", "BaseLanguageModel"],
"baseClasses": ["ChatAnthropic", "BaseChatModel", "BaseLanguageModel", "Runnable"],
"category": "Chat Models",
"description": "Wrapper around ChatAnthropic large language models that use the Chat endpoint",
"inputParams": [
@ -226,7 +237,7 @@
"name": "claude-instant-v1.1-100k"
}
],
"default": "claude-v1",
"default": "claude-2",
"optional": true,
"id": "chatAnthropic_0-input-modelName-options"
},
@ -234,6 +245,7 @@
"label": "Temperature",
"name": "temperature",
"type": "number",
"step": 0.1,
"default": 0.9,
"optional": true,
"id": "chatAnthropic_0-input-temperature-number"
@ -242,6 +254,7 @@
"label": "Max Tokens",
"name": "maxTokensToSample",
"type": "number",
"step": 1,
"optional": true,
"additionalParams": true,
"id": "chatAnthropic_0-input-maxTokensToSample-number"
@ -250,6 +263,7 @@
"label": "Top P",
"name": "topP",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatAnthropic_0-input-topP-number"
@ -258,6 +272,7 @@
"label": "Top K",
"name": "topK",
"type": "number",
"step": 0.1,
"optional": true,
"additionalParams": true,
"id": "chatAnthropic_0-input-topK-number"
@ -273,6 +288,7 @@
}
],
"inputs": {
"cache": "",
"modelName": "claude-2.1",
"temperature": 0.9,
"maxTokensToSample": "",
@ -281,10 +297,10 @@
},
"outputAnchors": [
{
"id": "chatAnthropic_0-output-chatAnthropic-ChatAnthropic|BaseChatModel|BaseLanguageModel",
"id": "chatAnthropic_0-output-chatAnthropic-ChatAnthropic|BaseChatModel|BaseLanguageModel|Runnable",
"name": "chatAnthropic",
"label": "ChatAnthropic",
"type": "ChatAnthropic | BaseChatModel | BaseLanguageModel"
"type": "ChatAnthropic | BaseChatModel | BaseLanguageModel | Runnable"
}
],
"outputs": {},
@ -292,61 +308,106 @@
},
"selected": false,
"positionAbsolute": {
"x": 800.5525382783799,
"y": -130.7988221837009
"x": 585.3308245972187,
"y": -116.32789506560908
},
"dragging": false
},
{
"width": 300,
"height": 507,
"id": "pdfFile_0",
"height": 688,
"id": "chatPromptTemplate_0",
"position": {
"x": 94.16886576108482,
"y": 37.12056504707391
"x": -106.44189698270114,
"y": 20.133956087516538
},
"type": "customNode",
"data": {
"id": "pdfFile_0",
"label": "Pdf File",
"name": "pdfFile",
"id": "chatPromptTemplate_0",
"label": "Chat Prompt Template",
"version": 1,
"name": "chatPromptTemplate",
"type": "ChatPromptTemplate",
"baseClasses": ["ChatPromptTemplate", "BaseChatPromptTemplate", "BasePromptTemplate", "Runnable"],
"category": "Prompts",
"description": "Schema to represent a chat prompt",
"inputParams": [
{
"label": "System Message",
"name": "systemMessagePrompt",
"type": "string",
"rows": 4,
"placeholder": "You are a helpful assistant that translates {input_language} to {output_language}.",
"id": "chatPromptTemplate_0-input-systemMessagePrompt-string"
},
{
"label": "Human Message",
"name": "humanMessagePrompt",
"type": "string",
"rows": 4,
"placeholder": "{text}",
"id": "chatPromptTemplate_0-input-humanMessagePrompt-string"
},
{
"label": "Format Prompt Values",
"name": "promptValues",
"type": "json",
"optional": true,
"acceptVariable": true,
"list": true,
"id": "chatPromptTemplate_0-input-promptValues-json"
}
],
"inputAnchors": [],
"inputs": {
"systemMessagePrompt": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\nThe AI has the following context:\n{context}",
"humanMessagePrompt": "{input}",
"promptValues": "{\"context\":\"{{plainText_0.data.instance}}\",\"input\":\"{{question}}\"}"
},
"outputAnchors": [
{
"id": "chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable",
"name": "chatPromptTemplate",
"label": "ChatPromptTemplate",
"type": "ChatPromptTemplate | BaseChatPromptTemplate | BasePromptTemplate | Runnable"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": -106.44189698270114,
"y": 20.133956087516538
},
"dragging": false
},
{
"width": 300,
"height": 485,
"id": "plainText_0",
"position": {
"x": -487.7511991135089,
"y": 77.83838996645807
},
"type": "customNode",
"data": {
"id": "plainText_0",
"label": "Plain Text",
"version": 2,
"name": "plainText",
"type": "Document",
"baseClasses": ["Document"],
"category": "Document Loaders",
"description": "Load data from PDF files",
"description": "Load data from plain text",
"inputParams": [
{
"label": "Pdf File",
"name": "pdfFile",
"type": "file",
"fileType": ".pdf",
"id": "pdfFile_0-input-pdfFile-file"
},
{
"label": "Usage",
"name": "usage",
"type": "options",
"options": [
{
"label": "One document per page",
"name": "perPage"
},
{
"label": "One document per file",
"name": "perFile"
}
],
"default": "perPage",
"id": "pdfFile_0-input-usage-options"
},
{
"label": "Use Legacy Build",
"name": "legacyBuild",
"type": "boolean",
"optional": true,
"additionalParams": true,
"id": "pdfFile_0-input-legacyBuild-boolean"
"label": "Text",
"name": "text",
"type": "string",
"rows": 4,
"placeholder": "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua...",
"id": "plainText_0-input-text-string"
},
{
"label": "Metadata",
@ -354,7 +415,7 @@
"type": "json",
"optional": true,
"additionalParams": true,
"id": "pdfFile_0-input-metadata-json"
"id": "plainText_0-input-metadata-json"
}
],
"inputAnchors": [
@ -363,30 +424,45 @@
"name": "textSplitter",
"type": "TextSplitter",
"optional": true,
"id": "pdfFile_0-input-textSplitter-TextSplitter"
"id": "plainText_0-input-textSplitter-TextSplitter"
}
],
"inputs": {
"text": "Welcome to Skyworld Hotel, where your dreams take flight and your stay soars to new heights. Nestled amidst breathtaking cityscape views, our upscale establishment offers an unparalleled blend of luxury and comfort. Our rooms are elegantly appointed, featuring modern amenities and plush furnishings to ensure your relaxation.\n\nIndulge in culinary delights at our rooftop restaurant, offering a gastronomic journey with panoramic vistas. Skyworld Hotel boasts state-of-the-art conference facilities, perfect for business travelers, and an inviting spa for relaxation seekers. Our attentive staff is dedicated to ensuring your every need is met, making your stay memorable.\n\nCentrally located, we offer easy access to local attractions, making us an ideal choice for both leisure and business travelers. Experience the world of hospitality like never before at Skyworld Hotel.",
"textSplitter": "",
"usage": "perPage",
"legacyBuild": "",
"metadata": ""
},
"outputAnchors": [
{
"id": "pdfFile_0-output-pdfFile-Document",
"name": "pdfFile",
"label": "Document",
"type": "Document"
"name": "output",
"label": "Output",
"type": "options",
"options": [
{
"id": "plainText_0-output-document-Document",
"name": "document",
"label": "Document",
"type": "Document"
},
{
"id": "plainText_0-output-text-string|json",
"name": "text",
"label": "Text",
"type": "string | json"
}
],
"default": "document"
}
],
"outputs": {},
"outputs": {
"output": "text"
},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 94.16886576108482,
"y": 37.12056504707391
"x": -487.7511991135089,
"y": 77.83838996645807
},
"dragging": false
}
@ -398,32 +474,31 @@
"target": "conversationChain_0",
"targetHandle": "conversationChain_0-input-memory-BaseMemory",
"type": "buttonedge",
"id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-conversationChain_0-conversationChain_0-input-memory-BaseMemory",
"data": {
"label": ""
}
"id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-conversationChain_0-conversationChain_0-input-memory-BaseMemory"
},
{
"source": "chatAnthropic_0",
"sourceHandle": "chatAnthropic_0-output-chatAnthropic-ChatAnthropic|BaseChatModel|BaseLanguageModel",
"sourceHandle": "chatAnthropic_0-output-chatAnthropic-ChatAnthropic|BaseChatModel|BaseLanguageModel|Runnable",
"target": "conversationChain_0",
"targetHandle": "conversationChain_0-input-model-BaseChatModel",
"type": "buttonedge",
"id": "chatAnthropic_0-chatAnthropic_0-output-chatAnthropic-ChatAnthropic|BaseChatModel|BaseLanguageModel-conversationChain_0-conversationChain_0-input-model-BaseChatModel",
"data": {
"label": ""
}
"id": "chatAnthropic_0-chatAnthropic_0-output-chatAnthropic-ChatAnthropic|BaseChatModel|BaseLanguageModel|Runnable-conversationChain_0-conversationChain_0-input-model-BaseChatModel"
},
{
"source": "pdfFile_0",
"sourceHandle": "pdfFile_0-output-pdfFile-Document",
"target": "conversationChain_0",
"targetHandle": "conversationChain_0-input-document-Document",
"source": "plainText_0",
"sourceHandle": "plainText_0-output-text-string|json",
"target": "chatPromptTemplate_0",
"targetHandle": "chatPromptTemplate_0-input-promptValues-json",
"type": "buttonedge",
"id": "pdfFile_0-pdfFile_0-output-pdfFile-Document-conversationChain_0-conversationChain_0-input-document-Document",
"data": {
"label": ""
}
"id": "plainText_0-plainText_0-output-text-string|json-chatPromptTemplate_0-chatPromptTemplate_0-input-promptValues-json"
},
{
"source": "chatPromptTemplate_0",
"sourceHandle": "chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable",
"target": "conversationChain_0",
"targetHandle": "conversationChain_0-input-chatPromptTemplate-ChatPromptTemplate",
"type": "buttonedge",
"id": "chatPromptTemplate_0-chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable-conversationChain_0-conversationChain_0-input-chatPromptTemplate-ChatPromptTemplate"
}
]
}

View File

@ -156,7 +156,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -179,6 +179,22 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
@ -195,6 +211,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"

View File

@ -14,7 +14,7 @@
"data": {
"id": "openAIEmbeddings_0",
"label": "OpenAI Embeddings",
"version": 1,
"version": 2,
"name": "openAIEmbeddings",
"type": "OpenAIEmbeddings",
"baseClasses": ["OpenAIEmbeddings", "Embeddings"],
@ -28,6 +28,28 @@
"credentialNames": ["openAIApi"],
"id": "openAIEmbeddings_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "text-embedding-3-large",
"name": "text-embedding-3-large"
},
{
"label": "text-embedding-3-small",
"name": "text-embedding-3-small"
},
{
"label": "text-embedding-ada-002",
"name": "text-embedding-ada-002"
}
],
"default": "text-embedding-ada-002",
"optional": true,
"id": "openAIEmbeddings_0-input-modelName-options"
},
{
"label": "Strip New Lines",
"name": "stripNewLines",
@ -66,7 +88,8 @@
"stripNewLines": "",
"batchSize": "",
"timeout": "",
"basepath": ""
"basepath": "",
"modelName": "text-embedding-ada-002"
},
"outputAnchors": [
{
@ -194,6 +217,13 @@
"rows": 3,
"placeholder": "Searches and returns documents regarding the state-of-the-union.",
"id": "retrieverTool_0-input-description-string"
},
{
"label": "Return Source Documents",
"name": "returnSourceDocuments",
"type": "boolean",
"optional": true,
"id": "retrieverTool_0-input-returnSourceDocuments-boolean"
}
],
"inputAnchors": [
@ -207,7 +237,8 @@
"inputs": {
"name": "search_website",
"description": "Searches and return documents regarding Jane - a culinary institution that offers top quality coffee, pastries, breakfast, lunch, and a variety of baked goods. They have multiple locations, including Jane on Fillmore, Jane on Larkin, Jane the Bakery, Toy Boat By Jane, and Little Jane on Grant. They emphasize healthy eating with a focus on flavor and quality ingredients. They bake everything in-house and work with local suppliers to source ingredients directly from farmers. They also offer catering services and delivery options.",
"retriever": "{{pinecone_0.data.instance}}"
"retriever": "{{pinecone_0.data.instance}}",
"returnSourceDocuments": true
},
"outputAnchors": [
{
@ -296,12 +327,12 @@
"data": {
"id": "pinecone_0",
"label": "Pinecone",
"version": 1,
"version": 2,
"name": "pinecone",
"type": "Pinecone",
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
"category": "Vector Stores",
"description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
"description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
"inputParams": [
{
"label": "Connect Credential",
@ -342,6 +373,45 @@
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-topK-number"
},
{
"label": "Search Type",
"name": "searchType",
"type": "options",
"default": "similarity",
"options": [
{
"label": "Similarity",
"name": "similarity"
},
{
"label": "Max Marginal Relevance",
"name": "mmr"
}
],
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-searchType-options"
},
{
"label": "Fetch K (for MMR Search)",
"name": "fetchK",
"description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
"placeholder": "20",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-fetchK-number"
},
{
"label": "Lambda (for MMR Search)",
"name": "lambda",
"description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
"placeholder": "0.5",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-lambda-number"
}
],
"inputAnchors": [
@ -366,7 +436,10 @@
"pineconeIndex": "",
"pineconeNamespace": "",
"pineconeMetadataFilter": "",
"topK": ""
"topK": "",
"searchType": "similarity",
"fetchK": "",
"lambda": ""
},
"outputAnchors": [
{
@ -414,7 +487,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -437,6 +510,14 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"

View File

@ -14,7 +14,7 @@
"data": {
"id": "openAIEmbeddings_0",
"label": "OpenAI Embeddings",
"version": 1,
"version": 2,
"name": "openAIEmbeddings",
"type": "OpenAIEmbeddings",
"baseClasses": ["OpenAIEmbeddings", "Embeddings"],
@ -28,6 +28,28 @@
"credentialNames": ["openAIApi"],
"id": "openAIEmbeddings_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "text-embedding-3-large",
"name": "text-embedding-3-large"
},
{
"label": "text-embedding-3-small",
"name": "text-embedding-3-small"
},
{
"label": "text-embedding-ada-002",
"name": "text-embedding-ada-002"
}
],
"default": "text-embedding-ada-002",
"optional": true,
"id": "openAIEmbeddings_0-input-modelName-options"
},
{
"label": "Strip New Lines",
"name": "stripNewLines",
@ -66,7 +88,8 @@
"stripNewLines": "",
"batchSize": "",
"timeout": "",
"basepath": ""
"basepath": "",
"modelName": "text-embedding-ada-002"
},
"outputAnchors": [
{
@ -346,7 +369,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -369,6 +392,14 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
@ -536,12 +567,12 @@
"data": {
"id": "pinecone_0",
"label": "Pinecone",
"version": 1,
"version": 2,
"name": "pinecone",
"type": "Pinecone",
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
"category": "Vector Stores",
"description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
"description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
"inputParams": [
{
"label": "Connect Credential",
@ -582,6 +613,45 @@
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-topK-number"
},
{
"label": "Search Type",
"name": "searchType",
"type": "options",
"default": "similarity",
"options": [
{
"label": "Similarity",
"name": "similarity"
},
{
"label": "Max Marginal Relevance",
"name": "mmr"
}
],
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-searchType-options"
},
{
"label": "Fetch K (for MMR Search)",
"name": "fetchK",
"description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
"placeholder": "20",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-fetchK-number"
},
{
"label": "Lambda (for MMR Search)",
"name": "lambda",
"description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
"placeholder": "0.5",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-lambda-number"
}
],
"inputAnchors": [
@ -606,7 +676,10 @@
"pineconeIndex": "",
"pineconeNamespace": "",
"pineconeMetadataFilter": "",
"topK": ""
"topK": "",
"searchType": "similarity",
"fetchK": "",
"lambda": ""
},
"outputAnchors": [
{

View File

@ -376,7 +376,7 @@
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
"version": 2,
"version": 3,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@ -398,6 +398,22 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
@ -414,6 +430,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"
@ -547,7 +567,7 @@
"id": "openAIEmbeddings_0",
"label": "OpenAI Embeddings",
"name": "openAIEmbeddings",
"version": 1,
"version": 2,
"type": "OpenAIEmbeddings",
"baseClasses": ["OpenAIEmbeddings", "Embeddings"],
"category": "Embeddings",
@ -560,6 +580,28 @@
"credentialNames": ["openAIApi"],
"id": "openAIEmbeddings_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "text-embedding-3-large",
"name": "text-embedding-3-large"
},
{
"label": "text-embedding-3-small",
"name": "text-embedding-3-small"
},
{
"label": "text-embedding-ada-002",
"name": "text-embedding-ada-002"
}
],
"default": "text-embedding-ada-002",
"optional": true,
"id": "openAIEmbeddings_0-input-modelName-options"
},
{
"label": "Strip New Lines",
"name": "stripNewLines",
@ -598,7 +640,8 @@
"stripNewLines": "",
"batchSize": "",
"timeout": "",
"basepath": ""
"basepath": "",
"modelName": "text-embedding-ada-002"
},
"outputAnchors": [
{

View File

@ -234,13 +234,23 @@
"type": "BaseLLMOutputParser",
"optional": true,
"id": "llmChain_0-input-outputParser-BaseLLMOutputParser"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "llmChain_0-input-inputModeration-Moderation"
}
],
"inputs": {
"model": "{{huggingFaceInference_LLMs_0.data.instance}}",
"prompt": "{{promptTemplate_0.data.instance}}",
"outputParser": "",
"chainName": ""
"chainName": "",
"inputModeration": ""
},
"outputAnchors": [
{

View File

@ -489,13 +489,23 @@
"type": "BaseLLMOutputParser",
"optional": true,
"id": "llmChain_0-input-outputParser-BaseLLMOutputParser"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "llmChain_0-input-inputModeration-Moderation"
}
],
"inputs": {
"model": "{{openAI_1.data.instance}}",
"prompt": "{{promptTemplate_0.data.instance}}",
"outputParser": "",
"chainName": "FirstChain"
"chainName": "FirstChain",
"inputModeration": ""
},
"outputAnchors": [
{
@ -578,13 +588,23 @@
"type": "BaseLLMOutputParser",
"optional": true,
"id": "llmChain_1-input-outputParser-BaseLLMOutputParser"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "llmChain_1-input-inputModeration-Moderation"
}
],
"inputs": {
"model": "{{openAI_2.data.instance}}",
"prompt": "{{promptTemplate_1.data.instance}}",
"outputParser": "",
"chainName": "LastChain"
"chainName": "LastChain",
"inputModeration": ""
},
"outputAnchors": [
{
@ -742,8 +762,8 @@
"model": "{{chatOpenAI_0.data.instance}}",
"prompt": "{{promptTemplate_2.data.instance}}",
"outputParser": "",
"inputModeration": "",
"chainName": "FallbackChain"
"chainName": "FallbackChain",
"inputModeration": ""
},
"outputAnchors": [
{
@ -888,7 +908,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -911,6 +931,14 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"

View File

@ -289,13 +289,23 @@
"type": "BaseLLMOutputParser",
"optional": true,
"id": "llmChain_0-input-outputParser-BaseLLMOutputParser"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "llmChain_0-input-inputModeration-Moderation"
}
],
"inputs": {
"model": "{{replicate_0.data.instance}}",
"prompt": "{{promptTemplate_0.data.instance}}",
"outputParser": "",
"chainName": ""
"chainName": "",
"inputModeration": ""
},
"outputAnchors": [
{
@ -378,13 +388,23 @@
"type": "BaseLLMOutputParser",
"optional": true,
"id": "llmChain_1-input-outputParser-BaseLLMOutputParser"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "llmChain_1-input-inputModeration-Moderation"
}
],
"inputs": {
"model": "{{chatOpenAI_0.data.instance}}",
"prompt": "{{promptTemplate_1.data.instance}}",
"outputParser": "",
"chainName": ""
"chainName": "",
"inputModeration": ""
},
"outputAnchors": [
{
@ -432,7 +452,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -455,6 +475,14 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"

View File

@ -164,7 +164,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -187,6 +187,14 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"

View File

@ -49,13 +49,23 @@
"type": "BaseLLMOutputParser",
"optional": true,
"id": "llmChain_0-input-outputParser-BaseLLMOutputParser"
},
{
"label": "Input Moderation",
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
"name": "inputModeration",
"type": "Moderation",
"optional": true,
"list": true,
"id": "llmChain_0-input-inputModeration-Moderation"
}
],
"inputs": {
"model": "{{chatOpenAI_0.data.instance}}",
"prompt": "{{promptTemplate_0.data.instance}}",
"outputParser": "{{csvOutputParser_0.data.instance}}",
"chainName": ""
"chainName": "",
"inputModeration": ""
},
"outputAnchors": [
{
@ -213,7 +223,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -236,6 +246,22 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
@ -252,6 +278,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"

View File

@ -112,7 +112,7 @@
"data": {
"id": "openAIEmbeddings_0",
"label": "OpenAI Embeddings",
"version": 1,
"version": 2,
"name": "openAIEmbeddings",
"type": "OpenAIEmbeddings",
"baseClasses": ["OpenAIEmbeddings", "Embeddings"],
@ -126,6 +126,28 @@
"credentialNames": ["openAIApi"],
"id": "openAIEmbeddings_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "text-embedding-3-large",
"name": "text-embedding-3-large"
},
{
"label": "text-embedding-3-small",
"name": "text-embedding-3-small"
},
{
"label": "text-embedding-ada-002",
"name": "text-embedding-ada-002"
}
],
"default": "text-embedding-ada-002",
"optional": true,
"id": "openAIEmbeddings_0-input-modelName-options"
},
{
"label": "Strip New Lines",
"name": "stripNewLines",
@ -164,7 +186,8 @@
"stripNewLines": "",
"batchSize": "",
"timeout": "",
"basepath": ""
"basepath": "",
"modelName": "text-embedding-ada-002"
},
"outputAnchors": [
{
@ -483,7 +506,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -506,6 +529,14 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"

View File

@ -346,7 +346,7 @@
"data": {
"id": "openAIEmbeddings_0",
"label": "OpenAI Embeddings",
"version": 1,
"version": 2,
"name": "openAIEmbeddings",
"type": "OpenAIEmbeddings",
"baseClasses": ["OpenAIEmbeddings", "Embeddings"],
@ -360,6 +360,28 @@
"credentialNames": ["openAIApi"],
"id": "openAIEmbeddings_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "text-embedding-3-large",
"name": "text-embedding-3-large"
},
{
"label": "text-embedding-3-small",
"name": "text-embedding-3-small"
},
{
"label": "text-embedding-ada-002",
"name": "text-embedding-ada-002"
}
],
"default": "text-embedding-ada-002",
"optional": true,
"id": "openAIEmbeddings_0-input-modelName-options"
},
{
"label": "Strip New Lines",
"name": "stripNewLines",
@ -398,7 +420,8 @@
"stripNewLines": "",
"batchSize": "",
"timeout": "",
"basepath": ""
"basepath": "",
"modelName": "text-embedding-ada-002"
},
"outputAnchors": [
{
@ -430,7 +453,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -453,6 +476,14 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
@ -620,12 +651,12 @@
"data": {
"id": "pinecone_0",
"label": "Pinecone",
"version": 1,
"version": 2,
"name": "pinecone",
"type": "Pinecone",
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
"category": "Vector Stores",
"description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
"description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
"inputParams": [
{
"label": "Connect Credential",
@ -666,6 +697,45 @@
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-topK-number"
},
{
"label": "Search Type",
"name": "searchType",
"type": "options",
"default": "similarity",
"options": [
{
"label": "Similarity",
"name": "similarity"
},
{
"label": "Max Marginal Relevance",
"name": "mmr"
}
],
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-searchType-options"
},
{
"label": "Fetch K (for MMR Search)",
"name": "fetchK",
"description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
"placeholder": "20",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-fetchK-number"
},
{
"label": "Lambda (for MMR Search)",
"name": "lambda",
"description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
"placeholder": "0.5",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-lambda-number"
}
],
"inputAnchors": [
@ -690,7 +760,10 @@
"pineconeIndex": "",
"pineconeNamespace": "",
"pineconeMetadataFilter": "{\"id\":{\"$in\":[\"doc1\",\"doc2\"]}}",
"topK": ""
"topK": "",
"searchType": "similarity",
"fetchK": "",
"lambda": ""
},
"outputAnchors": [
{

View File

@ -278,7 +278,7 @@
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
"version": 2,
"version": 3,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@ -300,6 +300,22 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
@ -316,6 +332,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"

View File

@ -281,7 +281,7 @@
"data": {
"id": "openAIEmbeddings_0",
"label": "OpenAI Embeddings",
"version": 1,
"version": 2,
"name": "openAIEmbeddings",
"type": "OpenAIEmbeddings",
"baseClasses": ["OpenAIEmbeddings", "Embeddings"],
@ -295,6 +295,28 @@
"credentialNames": ["openAIApi"],
"id": "openAIEmbeddings_0-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "text-embedding-3-large",
"name": "text-embedding-3-large"
},
{
"label": "text-embedding-3-small",
"name": "text-embedding-3-small"
},
{
"label": "text-embedding-ada-002",
"name": "text-embedding-ada-002"
}
],
"default": "text-embedding-ada-002",
"optional": true,
"id": "openAIEmbeddings_0-input-modelName-options"
},
{
"label": "Strip New Lines",
"name": "stripNewLines",
@ -333,7 +355,8 @@
"stripNewLines": "",
"batchSize": "",
"timeout": "",
"basepath": ""
"basepath": "",
"modelName": "text-embedding-ada-002"
},
"outputAnchors": [
{
@ -365,7 +388,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -388,6 +411,14 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
@ -555,12 +586,12 @@
"data": {
"id": "pinecone_0",
"label": "Pinecone",
"version": 1,
"version": 2,
"name": "pinecone",
"type": "Pinecone",
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
"category": "Vector Stores",
"description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
"description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
"inputParams": [
{
"label": "Connect Credential",
@ -601,6 +632,45 @@
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-topK-number"
},
{
"label": "Search Type",
"name": "searchType",
"type": "options",
"default": "similarity",
"options": [
{
"label": "Similarity",
"name": "similarity"
},
{
"label": "Max Marginal Relevance",
"name": "mmr"
}
],
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-searchType-options"
},
{
"label": "Fetch K (for MMR Search)",
"name": "fetchK",
"description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
"placeholder": "20",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-fetchK-number"
},
{
"label": "Lambda (for MMR Search)",
"name": "lambda",
"description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
"placeholder": "0.5",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-lambda-number"
}
],
"inputAnchors": [
@ -625,7 +695,10 @@
"pineconeIndex": "",
"pineconeNamespace": "",
"pineconeMetadataFilter": "",
"topK": ""
"topK": "",
"searchType": "similarity",
"fetchK": "",
"lambda": ""
},
"outputAnchors": [
{
@ -840,6 +913,45 @@
"additionalParams": true,
"optional": true,
"id": "supabase_0-input-topK-number"
},
{
"label": "Search Type",
"name": "searchType",
"type": "options",
"default": "similarity",
"options": [
{
"label": "Similarity",
"name": "similarity"
},
{
"label": "Max Marginal Relevance",
"name": "mmr"
}
],
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-searchType-options"
},
{
"label": "Fetch K (for MMR Search)",
"name": "fetchK",
"description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
"placeholder": "20",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-fetchK-number"
},
{
"label": "Lambda (for MMR Search)",
"name": "lambda",
"description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
"placeholder": "0.5",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-lambda-number"
}
],
"inputAnchors": [
@ -865,7 +977,10 @@
"tableName": "",
"queryName": "",
"supabaseMetadataFilter": "",
"topK": ""
"topK": "",
"searchType": "similarity",
"fetchK": "",
"lambda": ""
},
"outputAnchors": [
{

View File

@ -271,7 +271,7 @@
"data": {
"id": "openAIEmbeddings_1",
"label": "OpenAI Embeddings",
"version": 1,
"version": 2,
"name": "openAIEmbeddings",
"type": "OpenAIEmbeddings",
"baseClasses": ["OpenAIEmbeddings", "Embeddings"],
@ -285,6 +285,28 @@
"credentialNames": ["openAIApi"],
"id": "openAIEmbeddings_1-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "text-embedding-3-large",
"name": "text-embedding-3-large"
},
{
"label": "text-embedding-3-small",
"name": "text-embedding-3-small"
},
{
"label": "text-embedding-ada-002",
"name": "text-embedding-ada-002"
}
],
"default": "text-embedding-ada-002",
"optional": true,
"id": "openAIEmbeddings_1-input-modelName-options"
},
{
"label": "Strip New Lines",
"name": "stripNewLines",
@ -323,7 +345,8 @@
"stripNewLines": "",
"batchSize": "",
"timeout": "",
"basepath": ""
"basepath": "",
"modelName": "text-embedding-ada-002"
},
"outputAnchors": [
{
@ -355,7 +378,7 @@
"data": {
"id": "openAIEmbeddings_2",
"label": "OpenAI Embeddings",
"version": 1,
"version": 2,
"name": "openAIEmbeddings",
"type": "OpenAIEmbeddings",
"baseClasses": ["OpenAIEmbeddings", "Embeddings"],
@ -369,6 +392,28 @@
"credentialNames": ["openAIApi"],
"id": "openAIEmbeddings_2-input-credential-credential"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "text-embedding-3-large",
"name": "text-embedding-3-large"
},
{
"label": "text-embedding-3-small",
"name": "text-embedding-3-small"
},
{
"label": "text-embedding-ada-002",
"name": "text-embedding-ada-002"
}
],
"default": "text-embedding-ada-002",
"optional": true,
"id": "openAIEmbeddings_2-input-modelName-options"
},
{
"label": "Strip New Lines",
"name": "stripNewLines",
@ -407,7 +452,8 @@
"stripNewLines": "",
"batchSize": "",
"timeout": "",
"basepath": ""
"basepath": "",
"modelName": "text-embedding-ada-002"
},
"outputAnchors": [
{
@ -439,7 +485,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -462,6 +508,14 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
@ -949,7 +1003,7 @@
"data": {
"id": "chatOpenAI_1",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -972,6 +1026,14 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
@ -1139,7 +1201,7 @@
"data": {
"id": "chatOpenAI_2",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -1162,6 +1224,14 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"

View File

@ -279,7 +279,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 2,
"version": 3,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -302,6 +302,22 @@
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-turbo-preview",
"name": "gpt-4-turbo-preview"
},
{
"label": "gpt-4-0125-preview",
"name": "gpt-4-0125-preview"
},
{
"label": "gpt-4-1106-preview",
"name": "gpt-4-1106-preview"
},
{
"label": "gpt-4-vision-preview",
"name": "gpt-4-vision-preview"
},
{
"label": "gpt-4-0613",
"name": "gpt-4-0613"
@ -318,6 +334,10 @@
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-1106",
"name": "gpt-3.5-turbo-1106"
},
{
"label": "gpt-3.5-turbo-0613",
"name": "gpt-3.5-turbo-0613"

Some files were not shown because too many files have changed in this diff Show More