Merge pull request #1419 from vinodkiran/FEATURE/Vision

FEATURE: Add Multi Modal Capabilities to Flowise
pull/1832/head
Henry Heng 2024-02-27 11:58:47 +08:00 committed by GitHub
commit a134ea85eb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
91 changed files with 4345 additions and 353 deletions

View File

@ -125,6 +125,7 @@ Flowise 支持不同的环境变量来配置您的实例。您可以在 `package
| FLOWISE_PASSWORD | 登录密码 | 字符串 | |
| FLOWISE_FILE_SIZE_LIMIT | 上传文件大小限制 | 字符串 | 50mb |
| DEBUG | 打印组件的日志 | 布尔值 | |
| BLOB_STORAGE_PATH | 存储位置 | 字符串 | `your-home-dir/.flowise/storage` |
| LOG_PATH | 存储日志文件的位置 | 字符串 | `your-path/Flowise/logs` |
| LOG_LEVEL | 日志的不同级别 | 枚举字符串: `error`, `info`, `verbose`, `debug` | `info` |
| APIKEY_PATH | 存储 API 密钥的位置 | 字符串 | `your-path/Flowise/packages/server` |

View File

@ -129,6 +129,7 @@ Flowise support different environment variables to configure your instance. You
| FLOWISE_PASSWORD | Password to login | String | |
| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb |
| DEBUG | Print logs from components | Boolean | |
| BLOB_STORAGE_PATH | Location where uploaded files are stored | String | `your-home-dir/.flowise/storage` |
| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` |
| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` |
| APIKEY_PATH | Location where api keys are saved | String | `your-path/Flowise/packages/server` |

View File

@ -3,6 +3,7 @@ DATABASE_PATH=/root/.flowise
APIKEY_PATH=/root/.flowise
SECRETKEY_PATH=/root/.flowise
LOG_PATH=/root/.flowise/logs
BLOB_STORAGE_PATH=/root/.flowise/storage
# CORS_ORIGINS="*"
# IFRAME_ORIGINS="*"

View File

@ -31,5 +31,6 @@ If you like to persist your data (flows, logs, apikeys, credentials), set these
- APIKEY_PATH=/root/.flowise
- LOG_PATH=/root/.flowise/logs
- SECRETKEY_PATH=/root/.flowise
- BLOB_STORAGE_PATH=/root/.flowise/storage
Flowise also support different environment variables to configure your instance. Read [more](https://docs.flowiseai.com/environment-variables)

View File

@ -26,6 +26,7 @@ services:
- FLOWISE_SECRETKEY_OVERWRITE=${FLOWISE_SECRETKEY_OVERWRITE}
- LOG_LEVEL=${LOG_LEVEL}
- LOG_PATH=${LOG_PATH}
- BLOB_STORAGE_PATH=${BLOB_STORAGE_PATH}
- DISABLE_FLOWISE_TELEMETRY=${DISABLE_FLOWISE_TELEMETRY}
ports:
- '${PORT}:${PORT}'

View File

@ -0,0 +1,23 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class AssemblyAIApi implements INodeCredential {
label: string
name: string
version: number
inputs: INodeParams[]
constructor() {
this.label = 'AssemblyAI API'
this.name = 'assemblyAIApi'
this.version = 1.0
this.inputs = [
{
label: 'AssemblyAI Api Key',
name: 'assemblyAIApiKey',
type: 'password'
}
]
}
}
module.exports = { credClass: AssemblyAIApi }

View File

@ -4,13 +4,15 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'
import { ChainValues } from '@langchain/core/utils/types'
import { AgentStep } from '@langchain/core/agents'
import { renderTemplate } from '@langchain/core/prompts'
import { renderTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
import { RunnableSequence } from '@langchain/core/runnables'
import { ChatConversationalAgent } from 'langchain/agents'
import { getBaseClasses } from '../../../src/utils'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { AgentExecutor } from '../../../src/agents'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { addImagesToMessages } from '../../../src/multiModalUtils'
const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI.
@ -81,12 +83,18 @@ class ConversationalAgent_Agents implements INode {
}
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
const memory = nodeData.inputs?.memory as FlowiseMemory
const executor = await prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
const executor = await prepareAgent(
nodeData,
options,
{ sessionId: this.sessionId, chatId: options.chatId, input },
options.chatHistory
)
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)
@ -120,6 +128,7 @@ class ConversationalAgent_Agents implements INode {
const prepareAgent = async (
nodeData: INodeData,
options: ICommonObject,
flowObj: { sessionId?: string; chatId?: string; input?: string },
chatHistory: IMessage[] = []
) => {
@ -131,11 +140,6 @@ const prepareAgent = async (
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
const inputKey = memory.inputKey ? memory.inputKey : 'input'
/** Bind a stop token to the model */
const modelWithStop = model.bind({
stop: ['\nObservation']
})
const outputParser = ChatConversationalAgent.getDefaultOutputParser({
llm: model,
toolNames: tools.map((tool) => tool.name)
@ -146,6 +150,41 @@ const prepareAgent = async (
outputParser
})
if (model instanceof ChatOpenAI) {
let humanImageMessages: HumanMessage[] = []
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
if (messageContent?.length) {
// Change model to gpt-4-vision
model.modelName = 'gpt-4-vision-preview'
// Change default max token to higher when using gpt-4-vision
model.maxTokens = 1024
for (const msg of messageContent) {
humanImageMessages.push(new HumanMessage({ content: [msg] }))
}
// Pop the `agent_scratchpad` MessagePlaceHolder
let messagePlaceholder = prompt.promptMessages.pop() as MessagesPlaceholder
// Add the HumanMessage for images
prompt.promptMessages.push(...humanImageMessages)
// Add the `agent_scratchpad` MessagePlaceHolder back
prompt.promptMessages.push(messagePlaceholder)
} else {
// revert to previous values if image upload is empty
model.modelName = model.configuredModel
model.maxTokens = model.configuredMaxToken
}
}
/** Bind a stop token to the model */
const modelWithStop = model.bind({
stop: ['\nObservation']
})
const runnableAgent = RunnableSequence.from([
{
[inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,
@ -166,7 +205,7 @@ const prepareAgent = async (
sessionId: flowObj?.sessionId,
chatId: flowObj?.chatId,
input: flowObj?.input,
verbose: process.env.DEBUG === 'true' ? true : false
verbose: process.env.DEBUG === 'true'
})
return executor

View File

@ -8,6 +8,10 @@ import { additionalCallbacks } from '../../../src/handler'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { createReactAgent } from '../../../src/agents'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { HumanMessage } from '@langchain/core/messages'
import { addImagesToMessages } from '../../../src/multiModalUtils'
import { ChatPromptTemplate, HumanMessagePromptTemplate } from 'langchain/prompts'
class MRKLAgentChat_Agents implements INode {
label: string
@ -61,18 +65,39 @@ class MRKLAgentChat_Agents implements INode {
let tools = nodeData.inputs?.tools as Tool[]
tools = flatten(tools)
const promptWithChat = await pull<PromptTemplate>('hwchase17/react-chat')
const prompt = await pull<PromptTemplate>('hwchase17/react-chat')
let chatPromptTemplate = undefined
if (model instanceof ChatOpenAI) {
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
if (messageContent?.length) {
// Change model to gpt-4-vision
model.modelName = 'gpt-4-vision-preview'
// Change default max token to higher when using gpt-4-vision
model.maxTokens = 1024
const oldTemplate = prompt.template as string
chatPromptTemplate = ChatPromptTemplate.fromMessages([HumanMessagePromptTemplate.fromTemplate(oldTemplate)])
chatPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent }))
} else {
// revert to previous values if image upload is empty
model.modelName = model.configuredModel
model.maxTokens = model.configuredMaxToken
}
}
const agent = await createReactAgent({
llm: model,
tools,
prompt: promptWithChat
prompt: chatPromptTemplate ?? prompt
})
const executor = new AgentExecutor({
agent,
tools,
verbose: process.env.DEBUG === 'true' ? true : false
verbose: process.env.DEBUG === 'true'
})
const callbacks = await additionalCallbacks(nodeData, options)

View File

@ -1,14 +1,16 @@
import { ConversationChain } from 'langchain/chains'
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from '@langchain/core/prompts'
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { RunnableSequence } from '@langchain/core/runnables'
import { StringOutputParser } from '@langchain/core/output_parsers'
import { HumanMessage } from '@langchain/core/messages'
import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console'
import { ConversationChain } from 'langchain/chains'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
import { addImagesToMessages } from '../../../src/multiModalUtils'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.`
const inputKey = 'input'
@ -86,12 +88,14 @@ class ConversationChain_Chains implements INode {
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const chain = prepareChain(nodeData, this.sessionId, options.chatHistory)
const chain = prepareChain(nodeData, options, this.sessionId)
return chain
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
const memory = nodeData.inputs?.memory
const chain = prepareChain(nodeData, options, this.sessionId)
const moderations = nodeData.inputs?.inputModeration as Moderation[]
if (moderations && moderations.length > 0) {
@ -105,8 +109,6 @@ class ConversationChain_Chains implements INode {
}
}
const chain = prepareChain(nodeData, this.sessionId, options.chatHistory)
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const additionalCallback = await additionalCallbacks(nodeData, options)
@ -143,7 +145,7 @@ class ConversationChain_Chains implements INode {
}
}
const prepareChatPrompt = (nodeData: INodeData) => {
const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage[]) => {
const memory = nodeData.inputs?.memory as FlowiseMemory
const prompt = nodeData.inputs?.systemMessagePrompt as string
const chatPromptTemplate = nodeData.inputs?.chatPromptTemplate as ChatPromptTemplate
@ -151,12 +153,10 @@ const prepareChatPrompt = (nodeData: INodeData) => {
if (chatPromptTemplate && chatPromptTemplate.promptMessages.length) {
const sysPrompt = chatPromptTemplate.promptMessages[0]
const humanPrompt = chatPromptTemplate.promptMessages[chatPromptTemplate.promptMessages.length - 1]
const chatPrompt = ChatPromptTemplate.fromMessages([
sysPrompt,
new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'),
humanPrompt
])
const messages = [sysPrompt, new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), humanPrompt]
if (humanImageMessages.length) messages.push(...humanImageMessages)
const chatPrompt = ChatPromptTemplate.fromMessages(messages)
if ((chatPromptTemplate as any).promptValues) {
// @ts-ignore
chatPrompt.promptValues = (chatPromptTemplate as any).promptValues
@ -165,21 +165,46 @@ const prepareChatPrompt = (nodeData: INodeData) => {
return chatPrompt
}
const chatPrompt = ChatPromptTemplate.fromMessages([
const messages = [
SystemMessagePromptTemplate.fromTemplate(prompt ? prompt : systemMessage),
new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'),
HumanMessagePromptTemplate.fromTemplate(`{${inputKey}}`)
])
]
if (humanImageMessages.length) messages.push(...(humanImageMessages as any[]))
const chatPrompt = ChatPromptTemplate.fromMessages(messages)
return chatPrompt
}
const prepareChain = (nodeData: INodeData, sessionId?: string, chatHistory: IMessage[] = []) => {
const model = nodeData.inputs?.model as BaseChatModel
const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: string) => {
const chatHistory = options.chatHistory
let model = nodeData.inputs?.model as ChatOpenAI
const memory = nodeData.inputs?.memory as FlowiseMemory
const memoryKey = memory.memoryKey ?? 'chat_history'
const chatPrompt = prepareChatPrompt(nodeData)
let humanImageMessages: HumanMessage[] = []
if (model instanceof ChatOpenAI) {
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
if (messageContent?.length) {
// Change model to gpt-4-vision
model.modelName = 'gpt-4-vision-preview'
// Change default max token to higher when using gpt-4-vision
model.maxTokens = 1024
for (const msg of messageContent) {
humanImageMessages.push(new HumanMessage({ content: [msg] }))
}
} else {
// revert to previous values if image upload is empty
model.modelName = model.configuredModel
model.maxTokens = model.configuredMaxToken
}
}
const chatPrompt = prepareChatPrompt(nodeData, humanImageMessages)
let promptVariables = {}
const promptValuesRaw = (chatPrompt as any).promptValues
if (promptValuesRaw) {
@ -203,7 +228,7 @@ const prepareChain = (nodeData: INodeData, sessionId?: string, chatHistory: IMes
},
...promptVariables
},
chatPrompt,
prepareChatPrompt(nodeData, humanImageMessages),
model,
new StringOutputParser()
])

View File

@ -1,5 +1,6 @@
import { BaseLanguageModel, BaseLanguageModelCallOptions } from '@langchain/core/language_models/base'
import { BaseLLMOutputParser, BaseOutputParser } from '@langchain/core/output_parsers'
import { ChatPromptTemplate, FewShotPromptTemplate, PromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts'
import { OutputFixingParser } from 'langchain/output_parsers'
import { LLMChain } from 'langchain/chains'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
@ -7,6 +8,9 @@ import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { addImagesToMessages } from '../../../src/multiModalUtils'
import { HumanMessage } from 'langchain/schema'
class LLMChain_Chains implements INode {
label: string
@ -160,12 +164,7 @@ const runPrediction = async (
const socketIO = isStreaming ? options.socketIO : undefined
const socketIOClientId = isStreaming ? options.socketIOClientId : ''
const moderations = nodeData.inputs?.inputModeration as Moderation[]
/**
* Apply string transformation to reverse converted special chars:
* FROM: { "value": "hello i am benFLOWISE_NEWLINEFLOWISE_NEWLINEFLOWISE_TABhow are you?" }
* TO: { "value": "hello i am ben\n\n\thow are you?" }
*/
const promptValues = handleEscapeCharacters(promptValuesRaw, true)
let model = nodeData.inputs?.model as ChatOpenAI
if (moderations && moderations.length > 0) {
try {
@ -178,6 +177,46 @@ const runPrediction = async (
}
}
/**
* Apply string transformation to reverse converted special chars:
* FROM: { "value": "hello i am benFLOWISE_NEWLINEFLOWISE_NEWLINEFLOWISE_TABhow are you?" }
* TO: { "value": "hello i am ben\n\n\thow are you?" }
*/
const promptValues = handleEscapeCharacters(promptValuesRaw, true)
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
if (chain.llm instanceof ChatOpenAI) {
const chatOpenAI = chain.llm as ChatOpenAI
if (messageContent?.length) {
// Change model to gpt-4-vision && max token to higher when using gpt-4-vision
chatOpenAI.modelName = 'gpt-4-vision-preview'
chatOpenAI.maxTokens = 1024
// Add image to the message
if (chain.prompt instanceof PromptTemplate) {
const existingPromptTemplate = chain.prompt.template as string
let newChatPromptTemplate = ChatPromptTemplate.fromMessages([
HumanMessagePromptTemplate.fromTemplate(existingPromptTemplate)
])
newChatPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent }))
chain.prompt = newChatPromptTemplate
} else if (chain.prompt instanceof ChatPromptTemplate) {
chain.prompt.promptMessages.push(new HumanMessage({ content: messageContent }))
} else if (chain.prompt instanceof FewShotPromptTemplate) {
let existingFewShotPromptTemplate = chain.prompt.examplePrompt.template as string
let newFewShotPromptTemplate = ChatPromptTemplate.fromMessages([
HumanMessagePromptTemplate.fromTemplate(existingFewShotPromptTemplate)
])
newFewShotPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent }))
// @ts-ignore
chain.prompt.examplePrompt = newFewShotPromptTemplate
}
} else {
// revert to previous values if image upload is empty
chatOpenAI.modelName = model.configuredModel
chatOpenAI.maxTokens = model.configuredMaxToken
}
}
if (promptValues && inputVariables.length > 0) {
let seen: string[] = []

View File

@ -1,8 +1,10 @@
import { ChatOpenAI, OpenAIChatInput } from '@langchain/openai'
import type { ClientOptions } from 'openai'
import { ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput, AzureOpenAIInput, LegacyOpenAIInput } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ChatOpenAI } from './FlowiseChatOpenAI'
class ChatOpenAI_ChatModels implements INode {
label: string
@ -19,12 +21,12 @@ class ChatOpenAI_ChatModels implements INode {
constructor() {
this.label = 'ChatOpenAI'
this.name = 'chatOpenAI'
this.version = 4.0
this.version = 5.0
this.type = 'ChatOpenAI'
this.icon = 'openai.svg'
this.category = 'Chat Models'
this.description = 'Wrapper around OpenAI large language models that use the Chat endpoint'
this.baseClasses = [this.type, ...getBaseClasses(ChatOpenAI)]
this.baseClasses = [this.type, ...getBaseClasses(LangchainChatOpenAI)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
@ -168,6 +170,38 @@ class ChatOpenAI_ChatModels implements INode {
type: 'json',
optional: true,
additionalParams: true
},
{
label: 'Allow Image Uploads',
name: 'allowImageUploads',
type: 'boolean',
description:
'Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent',
default: false,
optional: true
},
{
label: 'Image Resolution',
description: 'This parameter controls the resolution in which the model views the image.',
name: 'imageResolution',
type: 'options',
options: [
{
label: 'Low',
name: 'low'
},
{
label: 'High',
name: 'high'
},
{
label: 'Auto',
name: 'auto'
}
],
default: 'low',
optional: false,
additionalParams: true
}
]
}
@ -184,12 +218,17 @@ class ChatOpenAI_ChatModels implements INode {
const basePath = nodeData.inputs?.basepath as string
const baseOptions = nodeData.inputs?.baseOptions
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
const imageResolution = nodeData.inputs?.imageResolution as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<OpenAIChatInput> & BaseLLMParams & { openAIApiKey?: string } = {
const obj: Partial<OpenAIChatInput> &
Partial<AzureOpenAIInput> &
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput; multiModalOption?: IMultiModalOption } = {
temperature: parseFloat(temperature),
modelName,
openAIApiKey,
@ -212,10 +251,24 @@ class ChatOpenAI_ChatModels implements INode {
throw new Error("Invalid JSON in the ChatOpenAI's BaseOptions: " + exception)
}
}
const model = new ChatOpenAI(obj, {
basePath,
baseOptions: parsedBaseOptions
})
if (basePath || parsedBaseOptions) {
obj.configuration = {
baseURL: basePath,
baseOptions: parsedBaseOptions
}
}
const multiModalOption: IMultiModalOption = {
image: {
allowImageUploads: allowImageUploads ?? false,
imageResolution
}
}
obj.multiModalOption = multiModalOption
const model = new ChatOpenAI(nodeData.id, obj)
return model
}
}

View File

@ -0,0 +1,38 @@
import type { ClientOptions } from 'openai'
import {
ChatOpenAI as LangchainChatOpenAI,
OpenAIChatInput,
LegacyOpenAIInput,
AzureOpenAIInput,
ChatOpenAICallOptions
} from '@langchain/openai'
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
import { IMultiModalOption } from '../../../src'
import { BaseMessageLike, LLMResult } from 'langchain/schema'
import { Callbacks } from '@langchain/core/callbacks/manager'
export class ChatOpenAI extends LangchainChatOpenAI {
configuredModel: string
configuredMaxToken?: number
multiModalOption?: IMultiModalOption
id: string
constructor(
id: string,
fields?: Partial<OpenAIChatInput> &
Partial<AzureOpenAIInput> &
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput; multiModalOption?: IMultiModalOption },
/** @deprecated */
configuration?: ClientOptions & LegacyOpenAIInput
) {
super(fields, configuration)
this.id = id
this.multiModalOption = fields?.multiModalOption
this.configuredModel = fields?.modelName ?? 'gpt-3.5-turbo'
this.configuredMaxToken = fields?.maxTokens
}
async generate(messages: BaseMessageLike[][], options?: string[] | ChatOpenAICallOptions, callbacks?: Callbacks): Promise<LLMResult> {
return super.generate(messages, options, callbacks)
}
}

View File

@ -0,0 +1,33 @@
import { INode, INodeParams } from '../../../src/Interface'
class AssemblyAI_SpeechToText implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs?: INodeParams[]
credential: INodeParams
constructor() {
this.label = 'AssemblyAI'
this.name = 'assemblyAI'
this.version = 1.0
this.type = 'AssemblyAI'
this.icon = 'assemblyai.png'
this.category = 'SpeechToText'
this.baseClasses = [this.type]
this.inputs = []
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['assemblyAIApi']
}
}
}
module.exports = { nodeClass: AssemblyAI_SpeechToText }

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.5 KiB

View File

@ -46,6 +46,7 @@
"@upstash/redis": "1.22.1",
"@zilliz/milvus2-sdk-node": "^2.2.24",
"apify-client": "^2.7.1",
"assemblyai": "^4.2.2",
"axios": "1.6.2",
"cheerio": "^1.0.0-rc.12",
"chromadb": "^1.5.11",

View File

@ -21,6 +21,8 @@ export type CommonType = string | number | boolean | undefined | null
export type MessageType = 'apiMessage' | 'userMessage'
export type ImageDetail = 'auto' | 'low' | 'high'
/**
* Others
*/
@ -146,6 +148,33 @@ export interface IUsedTool {
toolOutput: string | object
}
export interface IFileUpload {
data?: string
type: string
name: string
mime: string
}
export interface IMultiModalOption {
image?: Record<string, any>
audio?: Record<string, any>
}
export type MessageContentText = {
type: 'text'
text: string
}
export type MessageContentImageUrl = {
type: 'image_url'
image_url:
| string
| {
url: string
detail?: ImageDetail
}
}
/**
* Classes
*/

View File

@ -6,3 +6,4 @@ dotenv.config({ path: envPath, override: true })
export * from './Interface'
export * from './utils'
export * from './speechToText'

View File

@ -0,0 +1,48 @@
import { ICommonObject, IFileUpload, IMultiModalOption, INodeData, MessageContentImageUrl } from './Interface'
import { ChatOpenAI as LangchainChatOpenAI } from 'langchain/chat_models/openai'
import path from 'path'
import { getStoragePath } from './utils'
import fs from 'fs'
export const addImagesToMessages = (
nodeData: INodeData,
options: ICommonObject,
multiModalOption?: IMultiModalOption
): MessageContentImageUrl[] => {
const imageContent: MessageContentImageUrl[] = []
let model = nodeData.inputs?.model
if (model instanceof LangchainChatOpenAI && multiModalOption) {
// Image Uploaded
if (multiModalOption.image && multiModalOption.image.allowImageUploads && options?.uploads && options?.uploads.length > 0) {
const imageUploads = getImageUploads(options.uploads)
for (const upload of imageUploads) {
let bf = upload.data
if (upload.type == 'stored-file') {
const filePath = path.join(getStoragePath(), options.chatflowid, options.chatId, upload.name)
// as the image is stored in the server, read the file and convert it to base64
const contents = fs.readFileSync(filePath)
bf = 'data:' + upload.mime + ';base64,' + contents.toString('base64')
imageContent.push({
type: 'image_url',
image_url: {
url: bf,
detail: multiModalOption.image.imageResolution ?? 'low'
}
})
}
}
}
}
return imageContent
}
export const getAudioUploads = (uploads: IFileUpload[]) => {
return uploads.filter((upload: IFileUpload) => upload.mime.startsWith('audio/'))
}
export const getImageUploads = (uploads: IFileUpload[]) => {
return uploads.filter((upload: IFileUpload) => upload.mime.startsWith('image/'))
}

View File

@ -0,0 +1,51 @@
import { ICommonObject, IFileUpload } from './Interface'
import { getCredentialData, getStoragePath } from './utils'
import { type ClientOptions, OpenAIClient } from '@langchain/openai'
import fs from 'fs'
import path from 'path'
import { AssemblyAI } from 'assemblyai'
export const convertSpeechToText = async (upload: IFileUpload, speechToTextConfig: ICommonObject, options: ICommonObject) => {
if (speechToTextConfig) {
const credentialId = speechToTextConfig.credentialId as string
const credentialData = await getCredentialData(credentialId ?? '', options)
const filePath = path.join(getStoragePath(), options.chatflowid, options.chatId, upload.name)
const audio_file = fs.createReadStream(filePath)
if (speechToTextConfig.name === 'openAIWhisper') {
const openAIClientOptions: ClientOptions = {
apiKey: credentialData.openAIApiKey
}
const openAIClient = new OpenAIClient(openAIClientOptions)
const transcription = await openAIClient.audio.transcriptions.create({
file: audio_file,
model: 'whisper-1',
language: speechToTextConfig?.language,
temperature: speechToTextConfig?.temperature ? parseFloat(speechToTextConfig.temperature) : undefined,
prompt: speechToTextConfig?.prompt
})
if (transcription?.text) {
return transcription.text
}
} else if (speechToTextConfig.name === 'assemblyAiTranscribe') {
const client = new AssemblyAI({
apiKey: credentialData.assemblyAIApiKey
})
const params = {
audio: audio_file,
speaker_labels: false
}
const transcription = await client.transcripts.transcribe(params)
if (transcription?.text) {
return transcription.text
}
}
} else {
throw new Error('Speech to text is not selected, but found a recorded audio file. Please fix the chain.')
}
return undefined
}

View File

@ -770,3 +770,10 @@ export const prepareSandboxVars = (variables: IVariable[]) => {
}
return vars
}
/**
* Prepare storage path
*/
export const getStoragePath = (): string => {
return process.env.BLOB_STORAGE_PATH ? path.join(process.env.BLOB_STORAGE_PATH) : path.join(getUserHome(), '.flowise', 'storage')
}

View File

@ -5,6 +5,7 @@ PORT=3000
# APIKEY_PATH=/your_api_key_path/.flowise
# SECRETKEY_PATH=/your_api_key_path/.flowise
# LOG_PATH=/your_log_path/.flowise/logs
# BLOB_STORAGE_PATH=/your_database_path/.flowise/storage
# NUMBER_OF_PROXIES= 1

View File

@ -90,7 +90,7 @@
"data": {
"id": "chatOpenAI_1",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -237,6 +237,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_1-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_1-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_1-input-imageResolution-options"
}
],
"inputAnchors": [
@ -257,7 +290,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{
@ -437,7 +472,7 @@
"data": {
"id": "chatOpenAI_2",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -584,6 +619,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_2-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_2-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_2-input-imageResolution-options"
}
],
"inputAnchors": [
@ -604,7 +672,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -398,7 +398,7 @@
"data": {
"id": "chatOpenAI_2",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -545,6 +545,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_2-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_2-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_2-input-imageResolution-options"
}
],
"inputAnchors": [
@ -565,7 +598,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{
@ -597,7 +632,7 @@
"data": {
"id": "chatOpenAI_1",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -744,6 +779,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_1-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_1-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_1-input-imageResolution-options"
}
],
"inputAnchors": [
@ -764,7 +832,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{
@ -796,7 +866,7 @@
"data": {
"id": "chatOpenAI_3",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -943,6 +1013,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_3-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_3-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_3-input-imageResolution-options"
}
],
"inputAnchors": [
@ -963,7 +1066,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -181,7 +181,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -334,6 +334,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -355,7 +388,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -177,7 +177,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -324,6 +324,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -344,7 +377,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -253,7 +253,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -400,6 +400,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -420,7 +453,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -346,7 +346,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -499,6 +499,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -520,7 +553,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -72,7 +72,7 @@
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
"version": 4,
"version": 5,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@ -218,6 +218,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -238,7 +271,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -196,7 +196,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -349,6 +349,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -370,7 +403,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -217,7 +217,7 @@
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
"version": 4,
"version": 5,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@ -363,6 +363,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -383,7 +416,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -158,7 +158,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -305,6 +305,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -325,7 +358,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -489,7 +489,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -642,6 +642,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -663,7 +696,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -371,7 +371,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -524,6 +524,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -545,7 +578,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -378,7 +378,7 @@
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
"version": 4,
"version": 5,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@ -524,6 +524,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -544,7 +577,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -910,7 +910,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -1063,6 +1063,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -1084,7 +1117,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -454,7 +454,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -607,6 +607,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -628,7 +661,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -166,7 +166,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -319,6 +319,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -340,7 +373,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -225,7 +225,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -378,6 +378,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -399,7 +432,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -508,7 +508,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -661,6 +661,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -682,7 +715,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -455,7 +455,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -608,6 +608,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -629,7 +662,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -280,7 +280,7 @@
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
"version": 4,
"version": 5,
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
"category": "Chat Models",
@ -426,6 +426,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -446,7 +479,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -390,7 +390,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -543,6 +543,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -564,7 +597,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -487,7 +487,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -640,6 +640,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -661,7 +694,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{
@ -1013,7 +1048,7 @@
"data": {
"id": "chatOpenAI_1",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -1166,6 +1201,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_1-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_1-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_1-input-imageResolution-options"
}
],
"inputAnchors": [
@ -1187,7 +1255,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{
@ -1219,7 +1289,7 @@
"data": {
"id": "chatOpenAI_2",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -1372,6 +1442,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_2-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_2-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_2-input-imageResolution-options"
}
],
"inputAnchors": [
@ -1393,7 +1496,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -281,7 +281,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -428,6 +428,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -448,7 +481,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -429,7 +429,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -582,6 +582,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -603,7 +636,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{
@ -635,7 +670,7 @@
"data": {
"id": "chatOpenAI_1",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -788,6 +823,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_1-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_1-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_1-input-imageResolution-options"
}
],
"inputAnchors": [
@ -809,7 +877,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -206,7 +206,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -359,6 +359,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -380,7 +413,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -15,7 +15,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -162,6 +162,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -182,7 +215,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -175,7 +175,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -328,6 +328,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -349,7 +382,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{
@ -381,7 +416,7 @@
"data": {
"id": "chatOpenAI_1",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -534,6 +569,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_1-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_1-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_1-input-imageResolution-options"
}
],
"inputAnchors": [
@ -555,7 +623,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{
@ -1313,7 +1383,7 @@
"data": {
"id": "chatOpenAI_2",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -1466,6 +1536,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_2-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_2-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_2-input-imageResolution-options"
}
],
"inputAnchors": [
@ -1487,7 +1590,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -16,7 +16,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -169,6 +169,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -190,7 +223,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -16,7 +16,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -169,6 +169,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -190,7 +223,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -84,7 +84,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -231,6 +231,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -251,7 +284,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -127,7 +127,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -274,6 +274,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -294,7 +327,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{
@ -433,7 +468,7 @@
"data": {
"id": "chatOpenAI_1",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
@ -580,6 +615,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_1-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_1-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_1-input-imageResolution-options"
}
],
"inputAnchors": [
@ -600,7 +668,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -394,7 +394,7 @@
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"version": 4,
"version": 5,
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
@ -547,6 +547,39 @@
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-baseOptions-json"
},
{
"label": "Allow Image Uploads",
"name": "allowImageUploads",
"type": "boolean",
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
"default": false,
"optional": true,
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
},
{
"label": "Image Resolution",
"description": "This parameter controls the resolution in which the model views the image.",
"name": "imageResolution",
"type": "options",
"options": [
{
"label": "Low",
"name": "low"
},
{
"label": "High",
"name": "high"
},
{
"label": "Auto",
"name": "auto"
}
],
"default": "low",
"optional": false,
"additionalParams": true,
"id": "chatOpenAI_0-input-imageResolution-options"
}
],
"inputAnchors": [
@ -568,7 +601,9 @@
"presencePenalty": "",
"timeout": "",
"basepath": "",
"baseOptions": ""
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low"
},
"outputAnchors": [
{

View File

@ -48,6 +48,7 @@
"@oclif/core": "^1.13.10",
"async-mutex": "^0.4.0",
"axios": "1.6.2",
"content-disposition": "0.5.4",
"cors": "^2.8.5",
"crypto-js": "^4.1.1",
"dotenv": "^16.0.0",
@ -70,6 +71,7 @@
"winston": "^3.9.0"
},
"devDependencies": {
"@types/content-disposition": "0.5.8",
"@types/cors": "^2.8.12",
"@types/crypto-js": "^4.1.1",
"@types/multer": "^1.4.7",

View File

@ -1,4 +1,4 @@
import { ICommonObject, INode, INodeData as INodeDataFromComponent, INodeParams } from 'flowise-components'
import { ICommonObject, IFileUpload, INode, INodeData as INodeDataFromComponent, INodeParams } from 'flowise-components'
export type MessageType = 'apiMessage' | 'userMessage'
@ -31,6 +31,7 @@ export interface IChatMessage {
sourceDocuments?: string
usedTools?: string
fileAnnotations?: string
fileUploads?: string
chatType: string
chatId: string
memoryType?: string
@ -176,6 +177,7 @@ export interface IncomingInput {
socketIOClientId?: string
chatId?: string
stopNodeId?: string
uploads?: IFileUpload[]
}
export interface IActiveChatflows {
@ -212,3 +214,8 @@ export interface ICredentialReqBody {
export interface ICredentialReturnResponse extends ICredential {
plainDataObj: ICredentialDataDecrypted
}
export interface IUploadFileSizeAndTypes {
fileTypes: string[]
maxUploadSize: number
}

View File

@ -56,7 +56,7 @@ export class NodesPool {
}
}
const skipCategories = ['Analytic']
const skipCategories = ['Analytic', 'SpeechToText']
if (!skipCategories.includes(newNodeInstance.category)) {
this.componentNodes[newNodeInstance.name] = newNodeInstance
}

View File

@ -23,6 +23,7 @@ export default class Start extends Command {
CORS_ORIGINS: Flags.string(),
IFRAME_ORIGINS: Flags.string(),
DEBUG: Flags.string(),
BLOB_STORAGE_PATH: Flags.string(),
APIKEY_PATH: Flags.string(),
SECRETKEY_PATH: Flags.string(),
FLOWISE_SECRETKEY_OVERWRITE: Flags.string(),
@ -92,6 +93,9 @@ export default class Start extends Command {
if (flags.FLOWISE_PASSWORD) process.env.FLOWISE_PASSWORD = flags.FLOWISE_PASSWORD
if (flags.APIKEY_PATH) process.env.APIKEY_PATH = flags.APIKEY_PATH
// Storage
if (flags.BLOB_STORAGE_PATH) process.env.BLOB_STORAGE_PATH = flags.BLOB_STORAGE_PATH
//API Configuration
if (flags.FLOWISE_FILE_SIZE_LIMIT) process.env.FLOWISE_FILE_SIZE_LIMIT = flags.FLOWISE_FILE_SIZE_LIMIT

View File

@ -31,6 +31,9 @@ export class ChatFlow implements IChatFlow {
@Column({ nullable: true, type: 'text' })
analytic?: string
@Column({ nullable: true, type: 'text' })
speechToText?: string
@CreateDateColumn()
createdDate: Date

View File

@ -26,6 +26,9 @@ export class ChatMessage implements IChatMessage {
@Column({ nullable: true, type: 'text' })
fileAnnotations?: string
@Column({ nullable: true, type: 'text' })
fileUploads?: string
@Column()
chatType: string

View File

@ -0,0 +1,12 @@
import { MigrationInterface, QueryRunner } from 'typeorm'
export class AddFileUploadsToChatMessage1701788586491 implements MigrationInterface {
public async up(queryRunner: QueryRunner): Promise<void> {
const columnExists = await queryRunner.hasColumn('chat_message', 'fileUploads')
if (!columnExists) queryRunner.query(`ALTER TABLE \`chat_message\` ADD COLUMN \`fileUploads\` TEXT;`)
}
public async down(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(`ALTER TABLE \`chat_message\` DROP COLUMN \`fileUploads\`;`)
}
}

View File

@ -0,0 +1,12 @@
import { MigrationInterface, QueryRunner } from 'typeorm'
export class AddSpeechToText1706364937060 implements MigrationInterface {
public async up(queryRunner: QueryRunner): Promise<void> {
const columnExists = await queryRunner.hasColumn('chat_flow', 'speechToText')
if (!columnExists) queryRunner.query(`ALTER TABLE \`chat_flow\` ADD COLUMN \`speechToText\` TEXT;`)
}
public async down(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(`ALTER TABLE \`chat_flow\` DROP COLUMN \`speechToText\`;`)
}
}

View File

@ -10,7 +10,9 @@ import { AddAssistantEntity1699325775451 } from './1699325775451-AddAssistantEnt
import { AddUsedToolsToChatMessage1699481607341 } from './1699481607341-AddUsedToolsToChatMessage'
import { AddCategoryToChatFlow1699900910291 } from './1699900910291-AddCategoryToChatFlow'
import { AddFileAnnotationsToChatMessage1700271021237 } from './1700271021237-AddFileAnnotationsToChatMessage'
import { AddFileUploadsToChatMessage1701788586491 } from './1701788586491-AddFileUploadsToChatMessage'
import { AddVariableEntity1699325775451 } from './1702200925471-AddVariableEntity'
import { AddSpeechToText1706364937060 } from './1706364937060-AddSpeechToText'
export const mysqlMigrations = [
Init1693840429259,
@ -25,5 +27,7 @@ export const mysqlMigrations = [
AddUsedToolsToChatMessage1699481607341,
AddCategoryToChatFlow1699900910291,
AddFileAnnotationsToChatMessage1700271021237,
AddVariableEntity1699325775451
AddFileUploadsToChatMessage1701788586491,
AddVariableEntity1699325775451,
AddSpeechToText1706364937060
]

View File

@ -0,0 +1,11 @@
import { MigrationInterface, QueryRunner } from 'typeorm'
export class AddFileUploadsToChatMessage1701788586491 implements MigrationInterface {
public async up(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(`ALTER TABLE "chat_message" ADD COLUMN IF NOT EXISTS "fileUploads" TEXT;`)
}
public async down(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(`ALTER TABLE "chat_message" DROP COLUMN "fileUploads";`)
}
}

View File

@ -0,0 +1,11 @@
import { MigrationInterface, QueryRunner } from 'typeorm'
export class AddSpeechToText1706364937060 implements MigrationInterface {
public async up(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(`ALTER TABLE "chat_flow" ADD COLUMN IF NOT EXISTS "speechToText" TEXT;`)
}
public async down(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(`ALTER TABLE "chat_flow" DROP COLUMN "speechToText";`)
}
}

View File

@ -10,7 +10,9 @@ import { AddAssistantEntity1699325775451 } from './1699325775451-AddAssistantEnt
import { AddUsedToolsToChatMessage1699481607341 } from './1699481607341-AddUsedToolsToChatMessage'
import { AddCategoryToChatFlow1699900910291 } from './1699900910291-AddCategoryToChatFlow'
import { AddFileAnnotationsToChatMessage1700271021237 } from './1700271021237-AddFileAnnotationsToChatMessage'
import { AddFileUploadsToChatMessage1701788586491 } from './1701788586491-AddFileUploadsToChatMessage'
import { AddVariableEntity1699325775451 } from './1702200925471-AddVariableEntity'
import { AddSpeechToText1706364937060 } from './1706364937060-AddSpeechToText'
export const postgresMigrations = [
Init1693891895163,
@ -25,5 +27,7 @@ export const postgresMigrations = [
AddUsedToolsToChatMessage1699481607341,
AddCategoryToChatFlow1699900910291,
AddFileAnnotationsToChatMessage1700271021237,
AddVariableEntity1699325775451
AddFileUploadsToChatMessage1701788586491,
AddVariableEntity1699325775451,
AddSpeechToText1706364937060
]

View File

@ -0,0 +1,20 @@
import { MigrationInterface, QueryRunner } from 'typeorm'
export class AddFileUploadsToChatMessage1701788586491 implements MigrationInterface {
public async up(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(
`CREATE TABLE "temp_chat_message" ("id" varchar PRIMARY KEY NOT NULL, "role" varchar NOT NULL, "chatflowid" varchar NOT NULL, "content" text NOT NULL, "sourceDocuments" text, "usedTools" text, "fileAnnotations" text, "fileUploads" text, "createdDate" datetime NOT NULL DEFAULT (datetime('now')), "chatType" VARCHAR NOT NULL DEFAULT 'INTERNAL', "chatId" VARCHAR NOT NULL, "memoryType" VARCHAR, "sessionId" VARCHAR);`
)
await queryRunner.query(
`INSERT INTO "temp_chat_message" ("id", "role", "chatflowid", "content", "sourceDocuments", "fileAnnotations", "usedTools", "createdDate", "chatType", "chatId", "memoryType", "sessionId") SELECT "id", "role", "chatflowid", "content", "sourceDocuments", "usedTools", "fileAnnotations", "createdDate", "chatType", "chatId", "memoryType", "sessionId" FROM "chat_message";`
)
await queryRunner.query(`DROP TABLE "chat_message";`)
await queryRunner.query(`ALTER TABLE "temp_chat_message" RENAME TO "chat_message";`)
await queryRunner.query(`CREATE INDEX "IDX_e574527322272fd838f4f0f3d3" ON "chat_message" ("chatflowid") ;`)
}
public async down(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(`DROP TABLE IF EXISTS "temp_chat_message";`)
await queryRunner.query(`ALTER TABLE "chat_message" DROP COLUMN "fileUploads";`)
}
}

View File

@ -0,0 +1,11 @@
import { MigrationInterface, QueryRunner } from 'typeorm'
export class AddSpeechToText1706364937060 implements MigrationInterface {
public async up(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(`ALTER TABLE "chat_flow" ADD COLUMN "speechToText" TEXT;`)
}
public async down(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(`ALTER TABLE "chat_flow" DROP COLUMN "speechToText";`)
}
}

View File

@ -10,7 +10,9 @@ import { AddAssistantEntity1699325775451 } from './1699325775451-AddAssistantEnt
import { AddUsedToolsToChatMessage1699481607341 } from './1699481607341-AddUsedToolsToChatMessage'
import { AddCategoryToChatFlow1699900910291 } from './1699900910291-AddCategoryToChatFlow'
import { AddFileAnnotationsToChatMessage1700271021237 } from './1700271021237-AddFileAnnotationsToChatMessage'
import { AddFileUploadsToChatMessage1701788586491 } from './1701788586491-AddFileUploadsToChatMessage'
import { AddVariableEntity1699325775451 } from './1702200925471-AddVariableEntity'
import { AddSpeechToText1706364937060 } from './1706364937060-AddSpeechToText'
export const sqliteMigrations = [
Init1693835579790,
@ -25,5 +27,7 @@ export const sqliteMigrations = [
AddUsedToolsToChatMessage1699481607341,
AddCategoryToChatFlow1699900910291,
AddFileAnnotationsToChatMessage1700271021237,
AddVariableEntity1699325775451
AddFileUploadsToChatMessage1701788586491,
AddVariableEntity1699325775451,
AddSpeechToText1706364937060
]

View File

@ -5,6 +5,7 @@ import cors from 'cors'
import http from 'http'
import * as fs from 'fs'
import basicAuth from 'express-basic-auth'
import contentDisposition from 'content-disposition'
import { Server } from 'socket.io'
import logger from './utils/logger'
import { expressRequestLogger } from './utils/logger'
@ -21,7 +22,8 @@ import {
chatType,
IChatMessage,
IDepthQueue,
INodeDirectedGraph
INodeDirectedGraph,
IUploadFileSizeAndTypes
} from './Interface'
import {
getNodeModulesPackagePath,
@ -46,6 +48,7 @@ import {
getAllConnectedNodes,
clearSessionMemory,
findMemoryNode,
deleteFolderRecursive,
getTelemetryFlowObj,
getAppVersion
} from './utils'
@ -59,7 +62,18 @@ import { Tool } from './database/entities/Tool'
import { Assistant } from './database/entities/Assistant'
import { ChatflowPool } from './ChatflowPool'
import { CachePool } from './CachePool'
import { ICommonObject, IMessage, INodeOptionsValue, handleEscapeCharacters, webCrawl, xmlScrape } from 'flowise-components'
import {
ICommonObject,
IMessage,
INodeOptionsValue,
INodeParams,
handleEscapeCharacters,
convertSpeechToText,
xmlScrape,
webCrawl,
getStoragePath,
IFileUpload
} from 'flowise-components'
import { createRateLimiter, getRateLimiter, initializeRateLimiter } from './utils/rateLimit'
import { addAPIKey, compareKeys, deleteAPIKey, getApiKey, getAPIKeys, updateAPIKey } from './utils/apiKey'
import { sanitizeMiddleware, getCorsOptions, getAllowedIframeOrigins } from './utils/XSS'
@ -168,7 +182,9 @@ export class App {
'/api/v1/node-icon/',
'/api/v1/components-credentials-icon/',
'/api/v1/chatflows-streaming',
'/api/v1/chatflows-uploads',
'/api/v1/openai-assistants-file',
'/api/v1/get-upload-file',
'/api/v1/ip'
]
this.app.use((req, res, next) => {
@ -389,10 +405,13 @@ export class App {
id: req.params.id
})
if (!chatflow) return res.status(404).send(`Chatflow ${req.params.id} not found`)
if (chatflow.chatbotConfig) {
const uploadsConfig = await this.getUploadsConfig(req.params.id)
// even if chatbotConfig is not set but uploads are enabled
// send uploadsConfig to the chatbot
if (chatflow.chatbotConfig || uploadsConfig) {
try {
const parsedConfig = JSON.parse(chatflow.chatbotConfig)
return res.json(parsedConfig)
const parsedConfig = chatflow.chatbotConfig ? JSON.parse(chatflow.chatbotConfig) : {}
return res.json({ ...parsedConfig, uploads: uploadsConfig })
} catch (e) {
return res.status(500).send(`Error parsing Chatbot Config for Chatflow ${req.params.id}`)
}
@ -452,6 +471,15 @@ export class App {
// Delete chatflow via id
this.app.delete('/api/v1/chatflows/:id', async (req: Request, res: Response) => {
const results = await this.AppDataSource.getRepository(ChatFlow).delete({ id: req.params.id })
try {
// Delete all uploads corresponding to this chatflow
const directory = path.join(getStoragePath(), req.params.id)
deleteFolderRecursive(directory)
} catch (e) {
logger.error(`[server]: Error deleting file storage for chatflow ${req.params.id}: ${e}`)
}
return res.json(results)
})
@ -502,6 +530,16 @@ export class App {
return res.json(obj)
})
// Check if chatflow valid for uploads
this.app.get('/api/v1/chatflows-uploads/:id', async (req: Request, res: Response) => {
try {
const uploadsConfig = await this.getUploadsConfig(req.params.id)
return res.json(uploadsConfig)
} catch (e) {
return res.status(500).send(e)
}
})
// ----------------------------------------
// ChatMessage
// ----------------------------------------
@ -599,6 +637,16 @@ export class App {
if (sessionId) deleteOptions.sessionId = sessionId
if (chatType) deleteOptions.chatType = chatType
// Delete all uploads corresponding to this chatflow/chatId
if (chatId) {
try {
const directory = path.join(getStoragePath(), chatflowid, chatId)
deleteFolderRecursive(directory)
} catch (e) {
logger.error(`[server]: Error deleting file storage for chatflow ${chatflowid}, chatId ${chatId}: ${e}`)
}
}
const results = await this.AppDataSource.getRepository(ChatMessage).delete(deleteOptions)
return res.json(results)
})
@ -1081,6 +1129,11 @@ export class App {
}
})
function streamFileToUser(res: Response, filePath: string) {
const fileStream = fs.createReadStream(filePath)
fileStream.pipe(res)
}
// Download file from assistant
this.app.post('/api/v1/openai-assistants-file', async (req: Request, res: Response) => {
const filePath = path.join(getUserHome(), '.flowise', 'openai-assistant', req.body.fileName)
@ -1090,9 +1143,48 @@ export class App {
if (filePath.includes('..')) return res.status(500).send(`Invalid file path`)
//only return from the .flowise openai-assistant folder
if (!(filePath.includes('.flowise') && filePath.includes('openai-assistant'))) return res.status(500).send(`Invalid file path`)
res.setHeader('Content-Disposition', 'attachment; filename=' + path.basename(filePath))
const fileStream = fs.createReadStream(filePath)
fileStream.pipe(res)
if (fs.existsSync(filePath)) {
res.setHeader('Content-Disposition', contentDisposition(path.basename(filePath)))
streamFileToUser(res, filePath)
} else {
return res.status(404).send(`File ${req.body.fileName} not found`)
}
})
this.app.get('/api/v1/get-upload-path', async (req: Request, res: Response) => {
return res.json({
storagePath: getStoragePath()
})
})
// stream uploaded image
this.app.get('/api/v1/get-upload-file', async (req: Request, res: Response) => {
try {
if (!req.query.chatflowId || !req.query.chatId || !req.query.fileName) {
return res.status(500).send(`Invalid file path`)
}
const chatflowId = req.query.chatflowId as string
const chatId = req.query.chatId as string
const fileName = req.query.fileName as string
const filePath = path.join(getStoragePath(), chatflowId, chatId, fileName)
//raise error if file path is not absolute
if (!path.isAbsolute(filePath)) return res.status(500).send(`Invalid file path`)
//raise error if file path contains '..'
if (filePath.includes('..')) return res.status(500).send(`Invalid file path`)
//only return from the storage folder
if (!filePath.startsWith(getStoragePath())) return res.status(500).send(`Invalid file path`)
if (fs.existsSync(filePath)) {
res.setHeader('Content-Disposition', contentDisposition(path.basename(filePath)))
streamFileToUser(res, filePath)
} else {
return res.status(404).send(`File ${fileName} not found`)
}
} catch (error) {
return res.status(500).send(`Invalid file path`)
}
})
// ----------------------------------------
@ -1424,6 +1516,74 @@ export class App {
return false
}
/**
* Method that checks if uploads are enabled in the chatflow
* @param {string} chatflowid
*/
async getUploadsConfig(chatflowid: string): Promise<any> {
const chatflow = await this.AppDataSource.getRepository(ChatFlow).findOneBy({
id: chatflowid
})
if (!chatflow) return `Chatflow ${chatflowid} not found`
const uploadAllowedNodes = ['llmChain', 'conversationChain', 'mrklAgentChat', 'conversationalAgent']
const uploadProcessingNodes = ['chatOpenAI']
const flowObj = JSON.parse(chatflow.flowData)
const imgUploadSizeAndTypes: IUploadFileSizeAndTypes[] = []
let isSpeechToTextEnabled = false
if (chatflow.speechToText) {
const speechToTextProviders = JSON.parse(chatflow.speechToText)
for (const provider in speechToTextProviders) {
if (provider !== 'none') {
const providerObj = speechToTextProviders[provider]
if (providerObj.status) {
isSpeechToTextEnabled = true
break
}
}
}
}
let isImageUploadAllowed = false
const nodes: IReactFlowNode[] = flowObj.nodes
/*
* Condition for isImageUploadAllowed
* 1.) one of the uploadAllowedNodes exists
* 2.) one of the uploadProcessingNodes exists + allowImageUploads is ON
*/
if (!nodes.some((node) => uploadAllowedNodes.includes(node.data.name))) {
return {
isSpeechToTextEnabled,
isImageUploadAllowed: false,
imgUploadSizeAndTypes
}
}
nodes.forEach((node: IReactFlowNode) => {
if (uploadProcessingNodes.indexOf(node.data.name) > -1) {
// TODO: for now the maxUploadSize is hardcoded to 5MB, we need to add it to the node properties
node.data.inputParams.map((param: INodeParams) => {
if (param.name === 'allowImageUploads' && node.data.inputs?.['allowImageUploads']) {
imgUploadSizeAndTypes.push({
fileTypes: 'image/gif;image/jpeg;image/png;image/webp;'.split(';'),
maxUploadSize: 5
})
isImageUploadAllowed = true
}
})
}
})
return {
isSpeechToTextEnabled,
isImageUploadAllowed,
imgUploadSizeAndTypes
}
}
/**
* Method that get chat messages.
* @param {string} chatflowid
@ -1637,6 +1797,57 @@ export class App {
if (!isKeyValidated) return res.status(401).send('Unauthorized')
}
let fileUploads: IFileUpload[] = []
if (incomingInput.uploads) {
fileUploads = incomingInput.uploads
for (let i = 0; i < fileUploads.length; i += 1) {
const upload = fileUploads[i]
if ((upload.type === 'file' || upload.type === 'audio') && upload.data) {
const filename = upload.name
const dir = path.join(getStoragePath(), chatflowid, chatId)
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true })
}
const filePath = path.join(dir, filename)
const splitDataURI = upload.data.split(',')
const bf = Buffer.from(splitDataURI.pop() || '', 'base64')
fs.writeFileSync(filePath, bf)
// Omit upload.data since we don't store the content in database
upload.type = 'stored-file'
fileUploads[i] = omit(upload, ['data'])
}
// Run Speech to Text conversion
if (upload.mime === 'audio/webm') {
let speechToTextConfig: ICommonObject = {}
if (chatflow.speechToText) {
const speechToTextProviders = JSON.parse(chatflow.speechToText)
for (const provider in speechToTextProviders) {
const providerObj = speechToTextProviders[provider]
if (providerObj.status) {
speechToTextConfig = providerObj
speechToTextConfig['name'] = provider
break
}
}
}
if (speechToTextConfig) {
const options: ICommonObject = {
chatId,
chatflowid,
appDataSource: this.AppDataSource,
databaseEntities: databaseEntities
}
const speechToTextResult = await convertSpeechToText(upload, speechToTextConfig, options)
if (speechToTextResult) {
incomingInput.question = speechToTextResult
}
}
}
}
}
let isStreamValid = false
const files = (req.files as any[]) || []
@ -1802,7 +2013,10 @@ export class App {
chatflowid,
this.AppDataSource,
incomingInput?.overrideConfig,
this.cachePool
this.cachePool,
false,
undefined,
incomingInput.uploads
)
const nodeToExecute =
@ -1841,6 +2055,7 @@ export class App {
appDataSource: this.AppDataSource,
databaseEntities,
analytic: chatflow.analytic,
uploads: incomingInput.uploads,
socketIO,
socketIOClientId: incomingInput.socketIOClientId
})
@ -1851,7 +2066,8 @@ export class App {
logger,
appDataSource: this.AppDataSource,
databaseEntities,
analytic: chatflow.analytic
analytic: chatflow.analytic,
uploads: incomingInput.uploads
})
result = typeof result === 'string' ? { text: result } : result
@ -1869,7 +2085,8 @@ export class App {
chatId,
memoryType,
sessionId,
createdDate: userMessageDateTime
createdDate: userMessageDateTime,
fileUploads: incomingInput.uploads ? JSON.stringify(fileUploads) : undefined
}
await this.addChatMessage(userMessage)
@ -1891,7 +2108,6 @@ export class App {
if (result?.usedTools) apiMessage.usedTools = JSON.stringify(result.usedTools)
if (result?.fileAnnotations) apiMessage.fileAnnotations = JSON.stringify(result.fileAnnotations)
const chatMessage = await this.addChatMessage(apiMessage)
result.chatMessageId = chatMessage.id
logger.debug(`[server]: Finished running ${nodeToExecuteData.label} (${nodeToExecuteData.id})`)
await this.telemetry.sendTelemetry('prediction_sent', {
@ -1903,7 +2119,11 @@ export class App {
})
// Prepare response
// return the question in the response
// this is used when input text is empty but question is in audio format
result.question = incomingInput.question
result.chatId = chatId
result.chatMessageId = chatMessage.id
if (sessionId) result.sessionId = sessionId
if (memoryType) result.memoryType = memoryType

View File

@ -27,7 +27,8 @@ import {
ICommonObject,
IDatabaseEntity,
IMessage,
FlowiseMemory
FlowiseMemory,
IFileUpload
} from 'flowise-components'
import { randomBytes } from 'crypto'
import { AES, enc } from 'crypto-js'
@ -279,7 +280,8 @@ export const buildFlow = async (
overrideConfig?: ICommonObject,
cachePool?: CachePool,
isUpsert?: boolean,
stopNodeId?: string
stopNodeId?: string,
uploads?: IFileUpload[]
) => {
const flowNodes = cloneDeep(reactFlowNodes)
@ -325,7 +327,8 @@ export const buildFlow = async (
appDataSource,
databaseEntities,
cachePool,
dynamicVariables
dynamicVariables,
uploads
})
logger.debug(`[server]: Finished upserting ${reactFlowNode.data.label} (${reactFlowNode.data.id})`)
break
@ -340,7 +343,8 @@ export const buildFlow = async (
appDataSource,
databaseEntities,
cachePool,
dynamicVariables
dynamicVariables,
uploads
})
// Save dynamic variables
@ -596,7 +600,6 @@ export const resolveVariables = (
}
const paramsObj = flowNodeData[types] ?? {}
getParamValues(paramsObj)
return flowNodeData
@ -1128,6 +1131,34 @@ export const getAllValuesFromJson = (obj: any): any[] => {
return values
}
/**
* Delete file & folder recursively
* @param {string} directory
*/
export const deleteFolderRecursive = (directory: string) => {
if (fs.existsSync(directory)) {
fs.readdir(directory, (error, files) => {
if (error) throw new Error('Could not read directory')
files.forEach((file) => {
const file_path = path.join(directory, file)
fs.stat(file_path, (error, stat) => {
if (error) throw new Error('File do not exist')
if (!stat.isDirectory()) {
fs.unlink(file_path, (error) => {
if (error) throw new Error('Could not delete file')
})
} else {
deleteFolderRecursive(file_path)
}
})
})
})
}
}
/**
* Get only essential flow data items for telemetry
* @param {IReactFlowNode[]} nodes

View File

@ -14,6 +14,8 @@ const deleteChatflow = (id) => client.delete(`/chatflows/${id}`)
const getIsChatflowStreaming = (id) => client.get(`/chatflows-streaming/${id}`)
const getAllowChatflowUploads = (id) => client.get(`/chatflows-uploads/${id}`)
export default {
getAllChatflows,
getSpecificChatflow,
@ -21,5 +23,6 @@ export default {
createNewChatflow,
updateChatflow,
deleteChatflow,
getIsChatflowStreaming
getIsChatflowStreaming,
getAllowChatflowUploads
}

View File

@ -4,10 +4,12 @@ const getInternalChatmessageFromChatflow = (id) => client.get(`/internal-chatmes
const getAllChatmessageFromChatflow = (id, params = {}) => client.get(`/chatmessage/${id}`, { params: { order: 'DESC', ...params } })
const getChatmessageFromPK = (id, params = {}) => client.get(`/chatmessage/${id}`, { params: { order: 'ASC', ...params } })
const deleteChatmessage = (id, params = {}) => client.delete(`/chatmessage/${id}`, { params: { ...params } })
const getStoragePath = () => client.get(`/get-upload-path`)
export default {
getInternalChatmessageFromChatflow,
getAllChatmessageFromChatflow,
getChatmessageFromPK,
deleteChatmessage
deleteChatmessage,
getStoragePath
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.5 KiB

View File

@ -0,0 +1,8 @@
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M14.9814 25.2665C15.353 26.2672 16.0645 27.1054 16.9914 27.6347C17.9183 28.164 19.0018 28.3507 20.0524 28.1622C21.103 27.9737 22.054 27.422 22.7391 26.6034C23.4242 25.7849 23.7998 24.7517 23.8004 23.6842V17.5533C23.8004 17.1909 23.6043 16.8569 23.2879 16.6802L15.9995 12.6108" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M7.17701 19.5848C6.49568 20.4069 6.12505 21.4424 6.12993 22.5101C6.13481 23.5779 6.51489 24.6099 7.2037 25.4258C7.89252 26.2416 8.84622 26.7893 9.89802 26.9732C10.9498 27.157 12.0328 26.9653 12.9575 26.4314L18.1044 23.4263C18.4114 23.247 18.6002 22.9182 18.6002 22.5627V14.106" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M8.19877 9.98459C6.39026 9.67775 4.57524 10.4982 3.60403 12.1806C3.00524 13.2178 2.84295 14.4504 3.15284 15.6073C3.46273 16.7642 4.21943 17.7507 5.25652 18.3498L10.3049 21.3269C10.6109 21.5074 10.9898 21.5119 11.3001 21.3388L18.6 17.2655" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M17.0172 6.06585C16.6456 5.06522 15.9342 4.227 15.0072 3.6977C14.0803 3.1684 12.9969 2.98168 11.9462 3.17018C10.8956 3.35869 9.94464 3.91042 9.25954 4.72895C8.57444 5.54747 8.19879 6.58074 8.19824 7.64814V13.6575C8.19824 14.0154 8.38951 14.346 8.69977 14.5244L15.9992 18.7215" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M24.8216 11.7476C25.5029 10.9255 25.8735 9.89004 25.8687 8.8223C25.8638 7.75457 25.4837 6.72253 24.7949 5.90667C24.1061 5.09082 23.1524 4.54308 22.1006 4.35924C21.0488 4.17541 19.9658 4.36718 19.0411 4.90101L13.8942 7.90613C13.5872 8.08539 13.3984 8.41418 13.3984 8.76971V17.2265" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M23.7997 21.2595C25.6082 21.5663 27.4232 20.7459 28.3944 19.0635C28.9932 18.0263 29.1555 16.7937 28.8456 15.6368C28.5357 14.4799 27.779 13.4934 26.7419 12.8943L21.6409 9.91752C21.3316 9.73703 20.9494 9.7357 20.6388 9.91405L17.2696 11.849L13.3984 14.0723" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
</svg>

After

Width:  |  Height:  |  Size: 2.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 323 KiB

View File

@ -1,8 +1,17 @@
// assets
import { IconTrash, IconFileUpload, IconFileExport, IconCopy, IconSearch, IconMessage, IconPictureInPictureOff } from '@tabler/icons'
import {
IconTrash,
IconFileUpload,
IconFileExport,
IconCopy,
IconSearch,
IconMessage,
IconPictureInPictureOff,
IconMicrophone
} from '@tabler/icons'
// constant
const icons = { IconTrash, IconFileUpload, IconFileExport, IconCopy, IconSearch, IconMessage, IconPictureInPictureOff }
const icons = { IconTrash, IconFileUpload, IconFileExport, IconCopy, IconSearch, IconMessage, IconPictureInPictureOff, IconMicrophone }
// ==============================|| SETTINGS MENU ITEMS ||============================== //
@ -25,6 +34,13 @@ const settings = {
url: '',
icon: icons.IconMessage
},
{
id: 'enableSpeechToText',
title: 'Speech to Text',
type: 'item',
url: '',
icon: icons.IconMicrophone
},
{
id: 'duplicateChatflow',
title: 'Duplicate Chatflow',

View File

@ -0,0 +1,57 @@
import { styled } from '@mui/material/styles'
import ButtonBase from '@mui/material/ButtonBase'
export const ImageButton = styled(ButtonBase)(({ theme }) => ({
position: 'relative',
height: 200,
borderRadius: '10px',
[theme.breakpoints.down('sm')]: {
width: '100% !important', // Overrides inline-style
height: 100
},
'&:hover, &.Mui-focusVisible': {
zIndex: 1,
'& .MuiImageBackdrop-root': {
opacity: 0.4
},
'& .MuiImageMarked-root': {
opacity: 1
},
'& .MuiTypography-root': {
border: '4px solid currentColor'
}
}
}))
export const ImageSrc = styled('span')({
position: 'absolute',
borderRadius: '10px',
left: 0,
right: 0,
top: 0,
bottom: 0,
backgroundSize: 'cover',
backgroundPosition: 'center 40%'
})
export const ImageBackdrop = styled('span')(({ theme }) => ({
position: 'absolute',
borderRadius: '10px',
left: 0,
right: 0,
top: 0,
bottom: 0,
backgroundColor: theme.palette.common.black,
opacity: 0.1,
transition: theme.transitions.create('opacity')
}))
export const ImageMarked = styled('span')(() => ({
height: 25,
width: 25,
backgroundColor: 'transparent',
position: 'absolute',
top: 'auto',
left: 'auto',
opacity: 0
}))

View File

@ -1,7 +1,4 @@
.button-container {
position: absolute;
bottom: 0;
z-index: 1000;
display: flex;
overflow-x: auto;
-webkit-overflow-scrolling: touch; /* For momentum scroll on mobile devices */
@ -10,5 +7,4 @@
.button {
flex: 0 0 auto; /* Don't grow, don't shrink, base width on content */
margin: 5px; /* Adjust as needed for spacing between buttons */
}

View File

@ -3,9 +3,12 @@ import PropTypes from 'prop-types'
import { Chip } from '@mui/material'
import './StarterPromptsCard.css'
const StarterPromptsCard = ({ isGrid, starterPrompts, onPromptClick }) => {
const StarterPromptsCard = ({ isGrid, starterPrompts, sx, onPromptClick }) => {
return (
<Box className={'button-container'} sx={{ maxWidth: isGrid ? 'inherit' : '400px', m: 1 }}>
<Box
className={'button-container'}
sx={{ width: '100%', maxWidth: isGrid ? 'inherit' : '400px', p: 1.5, display: 'flex', gap: 1, ...sx }}
>
{starterPrompts.map((sp, index) => (
<Chip label={sp.prompt} className={'button'} key={index} onClick={(e) => onPromptClick(sp.prompt, e)} />
))}
@ -15,7 +18,8 @@ const StarterPromptsCard = ({ isGrid, starterPrompts, onPromptClick }) => {
StarterPromptsCard.propTypes = {
isGrid: PropTypes.bool,
starterPrompts: PropTypes.arrayOf(PropTypes.string),
starterPrompts: PropTypes.array,
sx: PropTypes.object,
onPromptClick: PropTypes.func
}

View File

@ -0,0 +1,348 @@
import { createPortal } from 'react-dom'
import { useDispatch } from 'react-redux'
import { useState, useEffect } from 'react'
import PropTypes from 'prop-types'
import { enqueueSnackbar as enqueueSnackbarAction, closeSnackbar as closeSnackbarAction, SET_CHATFLOW } from 'store/actions'
// material-ui
import {
Typography,
Box,
Button,
Dialog,
DialogContent,
DialogTitle,
DialogActions,
FormControl,
ListItem,
ListItemAvatar,
ListItemText,
MenuItem,
Select
} from '@mui/material'
import { IconX } from '@tabler/icons'
// Project import
import CredentialInputHandler from 'views/canvas/CredentialInputHandler'
import { TooltipWithParser } from 'ui-component/tooltip/TooltipWithParser'
import { SwitchInput } from 'ui-component/switch/Switch'
import { Input } from 'ui-component/input/Input'
import { StyledButton } from 'ui-component/button/StyledButton'
import { Dropdown } from 'ui-component/dropdown/Dropdown'
import openAISVG from 'assets/images/openai.svg'
import assemblyAIPng from 'assets/images/assemblyai.png'
// store
import { HIDE_CANVAS_DIALOG, SHOW_CANVAS_DIALOG } from 'store/actions'
import useNotifier from 'utils/useNotifier'
// API
import chatflowsApi from 'api/chatflows'
const speechToTextProviders = {
openAIWhisper: {
label: 'OpenAI Whisper',
name: 'openAIWhisper',
icon: openAISVG,
url: 'https://platform.openai.com/docs/guides/speech-to-text',
inputs: [
{
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['openAIApi']
},
{
label: 'Language',
name: 'language',
type: 'string',
description:
'The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.',
placeholder: 'en',
optional: true
},
{
label: 'Prompt',
name: 'prompt',
type: 'string',
rows: 4,
description: `An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.`,
optional: true
},
{
label: 'Temperature',
name: 'temperature',
type: 'number',
step: 0.1,
description: `The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.`,
optional: true
}
]
},
assemblyAiTranscribe: {
label: 'Assembly AI',
name: 'assemblyAiTranscribe',
icon: assemblyAIPng,
url: 'https://www.assemblyai.com/',
inputs: [
{
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['assemblyAIApi']
}
]
}
}
const SpeechToTextDialog = ({ show, dialogProps, onCancel }) => {
const portalElement = document.getElementById('portal')
const dispatch = useDispatch()
useNotifier()
const enqueueSnackbar = (...args) => dispatch(enqueueSnackbarAction(...args))
const closeSnackbar = (...args) => dispatch(closeSnackbarAction(...args))
const [speechToText, setSpeechToText] = useState({})
const [selectedProvider, setSelectedProvider] = useState('none')
const onSave = async () => {
const speechToText = setValue(true, selectedProvider, 'status')
try {
const saveResp = await chatflowsApi.updateChatflow(dialogProps.chatflow.id, {
speechToText: JSON.stringify(speechToText)
})
if (saveResp.data) {
enqueueSnackbar({
message: 'Speech To Text Configuration Saved',
options: {
key: new Date().getTime() + Math.random(),
variant: 'success',
action: (key) => (
<Button style={{ color: 'white' }} onClick={() => closeSnackbar(key)}>
<IconX />
</Button>
)
}
})
dispatch({ type: SET_CHATFLOW, chatflow: saveResp.data })
}
onCancel()
} catch (error) {
const errorData = error.response.data || `${error.response.status}: ${error.response.statusText}`
enqueueSnackbar({
message: `Failed to save Speech To Text Configuration: ${errorData}`,
options: {
key: new Date().getTime() + Math.random(),
variant: 'error',
persist: true,
action: (key) => (
<Button style={{ color: 'white' }} onClick={() => closeSnackbar(key)}>
<IconX />
</Button>
)
}
})
}
}
const setValue = (value, providerName, inputParamName) => {
let newVal = {}
if (!Object.prototype.hasOwnProperty.call(speechToText, providerName)) {
newVal = { ...speechToText, [providerName]: {} }
} else {
newVal = { ...speechToText }
}
newVal[providerName][inputParamName] = value
if (inputParamName === 'status' && value === true) {
// ensure that the others are turned off
Object.keys(speechToTextProviders).forEach((key) => {
const provider = speechToTextProviders[key]
if (provider.name !== providerName) {
newVal[provider.name] = { ...speechToText[provider.name], status: false }
}
})
}
setSpeechToText(newVal)
return newVal
}
const handleProviderChange = (event) => {
setSelectedProvider(event.target.value)
}
useEffect(() => {
if (dialogProps.chatflow && dialogProps.chatflow.speechToText) {
try {
const speechToText = JSON.parse(dialogProps.chatflow.speechToText)
let selectedProvider = 'none'
Object.keys(speechToTextProviders).forEach((key) => {
const providerConfig = speechToText[key]
if (providerConfig && providerConfig.status) {
selectedProvider = key
}
})
setSelectedProvider(selectedProvider)
setSpeechToText(speechToText)
} catch (e) {
setSpeechToText({})
setSelectedProvider('none')
console.error(e)
}
}
return () => {
setSpeechToText({})
setSelectedProvider('none')
}
}, [dialogProps])
useEffect(() => {
if (show) dispatch({ type: SHOW_CANVAS_DIALOG })
else dispatch({ type: HIDE_CANVAS_DIALOG })
return () => dispatch({ type: HIDE_CANVAS_DIALOG })
}, [show, dispatch])
const component = (
<Dialog
onClose={onCancel}
open={show}
fullWidth
maxWidth='sm'
aria-labelledby='alert-dialog-title'
aria-describedby='alert-dialog-description'
>
<DialogTitle sx={{ fontSize: '1rem' }} id='alert-dialog-title'>
Speech To Text Configuration
</DialogTitle>
<DialogContent>
<Box fullWidth sx={{ my: 2, display: 'flex', flexDirection: 'column', gap: 1 }}>
<Typography>Speech To Text Providers</Typography>
<FormControl fullWidth>
<Select value={selectedProvider} onChange={handleProviderChange}>
<MenuItem value='none'>None</MenuItem>
<MenuItem value='openAIWhisper'>OpenAI Whisper</MenuItem>
<MenuItem value='assemblyAiTranscribe'>Assembly AI</MenuItem>
</Select>
</FormControl>
</Box>
{selectedProvider !== 'none' && (
<>
<ListItem style={{ padding: 0, margin: 0 }} alignItems='center'>
<ListItemAvatar>
<div
style={{
width: 50,
height: 50,
borderRadius: '50%',
backgroundColor: 'white'
}}
>
<img
style={{
width: '100%',
height: '100%',
padding: 10,
objectFit: 'contain'
}}
alt='AI'
src={speechToTextProviders[selectedProvider].icon}
/>
</div>
</ListItemAvatar>
<ListItemText
sx={{ ml: 1 }}
primary={speechToTextProviders[selectedProvider].label}
secondary={
<a target='_blank' rel='noreferrer' href={speechToTextProviders[selectedProvider].url}>
{speechToTextProviders[selectedProvider].url}
</a>
}
/>
</ListItem>
{speechToTextProviders[selectedProvider].inputs.map((inputParam, index) => (
<Box key={index} sx={{ p: 2 }}>
<div style={{ display: 'flex', flexDirection: 'row' }}>
<Typography>
{inputParam.label}
{!inputParam.optional && <span style={{ color: 'red' }}>&nbsp;*</span>}
{inputParam.description && (
<TooltipWithParser style={{ marginLeft: 10 }} title={inputParam.description} />
)}
</Typography>
</div>
{inputParam.type === 'credential' && (
<CredentialInputHandler
key={speechToText[selectedProvider]?.credentialId}
data={
speechToText[selectedProvider]?.credentialId
? { credential: speechToText[selectedProvider].credentialId }
: {}
}
inputParam={inputParam}
onSelect={(newValue) => setValue(newValue, selectedProvider, 'credentialId')}
/>
)}
{inputParam.type === 'boolean' && (
<SwitchInput
onChange={(newValue) => setValue(newValue, selectedProvider, inputParam.name)}
value={
speechToText[selectedProvider]
? speechToText[selectedProvider][inputParam.name]
: inputParam.default ?? false
}
/>
)}
{(inputParam.type === 'string' || inputParam.type === 'password' || inputParam.type === 'number') && (
<Input
inputParam={inputParam}
onChange={(newValue) => setValue(newValue, selectedProvider, inputParam.name)}
value={
speechToText[selectedProvider]
? speechToText[selectedProvider][inputParam.name]
: inputParam.default ?? ''
}
/>
)}
{inputParam.type === 'options' && (
<Dropdown
name={inputParam.name}
options={inputParam.options}
onSelect={(newValue) => setValue(newValue, selectedProvider, inputParam.name)}
value={
speechToText[selectedProvider]
? speechToText[selectedProvider][inputParam.name]
: inputParam.default ?? 'choose an option'
}
/>
)}
</Box>
))}
</>
)}
</DialogContent>
<DialogActions>
<StyledButton
disabled={selectedProvider !== 'none' && !speechToText[selectedProvider]?.credentialId}
variant='contained'
onClick={onSave}
>
Save
</StyledButton>
</DialogActions>
</Dialog>
)
return createPortal(component, portalElement)
}
SpeechToTextDialog.propTypes = {
show: PropTypes.bool,
dialogProps: PropTypes.object,
onCancel: PropTypes.func
}
export default SpeechToTextDialog

View File

@ -21,7 +21,9 @@ import {
DialogTitle,
ListItem,
ListItemText,
Chip
Chip,
Card,
CardMedia
} from '@mui/material'
import { useTheme } from '@mui/material/styles'
import DatePicker from 'react-datepicker'
@ -47,7 +49,7 @@ import useApi from 'hooks/useApi'
import useConfirm from 'hooks/useConfirm'
// Utils
import { isValidURL, removeDuplicateURL } from 'utils/genericHelper'
import { getOS, isValidURL, removeDuplicateURL } from 'utils/genericHelper'
import useNotifier from 'utils/useNotifier'
import { baseURL } from 'store/constant'
@ -69,6 +71,12 @@ DatePickerCustomInput.propTypes = {
onClick: PropTypes.func
}
const messageImageStyle = {
width: '128px',
height: '128px',
objectFit: 'cover'
}
const ViewMessagesDialog = ({ show, dialogProps, onCancel }) => {
const portalElement = document.getElementById('portal')
const dispatch = useDispatch()
@ -92,6 +100,8 @@ const ViewMessagesDialog = ({ show, dialogProps, onCancel }) => {
const getChatmessageApi = useApi(chatmessageApi.getAllChatmessageFromChatflow)
const getChatmessageFromPKApi = useApi(chatmessageApi.getChatmessageFromPK)
const getStoragePathFromServer = useApi(chatmessageApi.getStoragePath)
let storagePath = ''
const onStartDateSelected = (date) => {
setStartDate(date)
@ -120,16 +130,35 @@ const ViewMessagesDialog = ({ show, dialogProps, onCancel }) => {
})
}
const exportMessages = () => {
const exportMessages = async () => {
if (!storagePath && getStoragePathFromServer.data) {
storagePath = getStoragePathFromServer.data.storagePath
}
const obj = {}
let fileSeparator = '/'
if ('windows' === getOS()) {
fileSeparator = '\\'
}
for (let i = 0; i < allChatlogs.length; i += 1) {
const chatmsg = allChatlogs[i]
const chatPK = getChatPK(chatmsg)
let filePaths = []
if (chatmsg.fileUploads) {
chatmsg.fileUploads = JSON.parse(chatmsg.fileUploads)
chatmsg.fileUploads.forEach((file) => {
if (file.type === 'stored-file') {
filePaths.push(
`${storagePath}${fileSeparator}${chatmsg.chatflowid}${fileSeparator}${chatmsg.chatId}${fileSeparator}${file.name}`
)
}
})
}
const msg = {
content: chatmsg.content,
role: chatmsg.role === 'apiMessage' ? 'bot' : 'user',
time: chatmsg.createdDate
}
if (filePaths.length) msg.filePaths = filePaths
if (chatmsg.sourceDocuments) msg.sourceDocuments = JSON.parse(chatmsg.sourceDocuments)
if (chatmsg.usedTools) msg.usedTools = JSON.parse(chatmsg.usedTools)
if (chatmsg.fileAnnotations) msg.fileAnnotations = JSON.parse(chatmsg.fileAnnotations)
@ -249,6 +278,14 @@ const ViewMessagesDialog = ({ show, dialogProps, onCancel }) => {
})
}
}
if (chatmsg.fileUploads) {
chatmsg.fileUploads = JSON.parse(chatmsg.fileUploads)
chatmsg.fileUploads.forEach((file) => {
if (file.type === 'stored-file') {
file.data = `${baseURL}/api/v1/get-upload-file?chatflowId=${chatmsg.chatflowid}&chatId=${chatmsg.chatId}&fileName=${file.name}`
}
})
}
const obj = {
...chatmsg,
message: chatmsg.content,
@ -357,6 +394,8 @@ const ViewMessagesDialog = ({ show, dialogProps, onCancel }) => {
useEffect(() => {
if (getChatmessageApi.data) {
getStoragePathFromServer.request()
setAllChatLogs(getChatmessageApi.data)
const chatPK = processChatLogs(getChatmessageApi.data)
setSelectedMessageIndex(0)
@ -593,8 +632,8 @@ const ViewMessagesDialog = ({ show, dialogProps, onCancel }) => {
sx={{
background:
message.type === 'apiMessage' ? theme.palette.asyncSelect.main : '',
pl: 1,
pr: 1
py: '1rem',
px: '1.5rem'
}}
key={index}
style={{ display: 'flex', justifyContent: 'center', alignContent: 'center' }}
@ -644,6 +683,51 @@ const ViewMessagesDialog = ({ show, dialogProps, onCancel }) => {
})}
</div>
)}
{message.fileUploads && message.fileUploads.length > 0 && (
<div
style={{
display: 'flex',
flexWrap: 'wrap',
flexDirection: 'column',
width: '100%',
gap: '8px'
}}
>
{message.fileUploads.map((item, index) => {
return (
<>
{item.mime.startsWith('image/') ? (
<Card
key={index}
sx={{
p: 0,
m: 0,
maxWidth: 128,
marginRight: '10px',
flex: '0 0 auto'
}}
>
<CardMedia
component='img'
image={item.data}
sx={{ height: 64 }}
alt={'preview'}
style={messageImageStyle}
/>
</Card>
) : (
// eslint-disable-next-line jsx-a11y/media-has-caption
<audio controls='controls'>
Your browser does not support the &lt;audio&gt;
tag.
<source src={item.data} type={item.mime} />
</audio>
)}
</>
)
})}
</div>
)}
<div className='markdownanswer'>
{/* Messages are being rendered in Markdown format */}
<MemoizedReactMarkdown

View File

@ -607,3 +607,25 @@ export const getConfigExamplesForCurl = (configData, bodyType, isMultiple, stopN
}
return finalStr
}
export const getOS = () => {
let userAgent = window.navigator.userAgent.toLowerCase(),
macosPlatforms = /(macintosh|macintel|macppc|mac68k|macos)/i,
windowsPlatforms = /(win32|win64|windows|wince)/i,
iosPlatforms = /(iphone|ipad|ipod)/i,
os = null
if (macosPlatforms.test(userAgent)) {
os = 'macos'
} else if (iosPlatforms.test(userAgent)) {
os = 'ios'
} else if (windowsPlatforms.test(userAgent)) {
os = 'windows'
} else if (/android/.test(userAgent)) {
os = 'android'
} else if (!os && /linux/.test(userAgent)) {
os = 'linux'
}
return os
}

View File

@ -28,6 +28,7 @@ import useApi from 'hooks/useApi'
import { generateExportFlowData } from 'utils/genericHelper'
import { uiBaseURL } from 'store/constant'
import { SET_CHATFLOW } from 'store/actions'
import SpeechToTextDialog from '../../ui-component/dialog/SpeechToTextDialog'
// ==============================|| CANVAS HEADER ||============================== //
@ -46,6 +47,8 @@ const CanvasHeader = ({ chatflow, handleSaveFlow, handleDeleteFlow, handleLoadFl
const [apiDialogProps, setAPIDialogProps] = useState({})
const [analyseDialogOpen, setAnalyseDialogOpen] = useState(false)
const [analyseDialogProps, setAnalyseDialogProps] = useState({})
const [speechToAudioDialogOpen, setSpeechToAudioDialogOpen] = useState(false)
const [speechToAudioDialogProps, setSpeechToAudioialogProps] = useState({})
const [conversationStartersDialogOpen, setConversationStartersDialogOpen] = useState(false)
const [conversationStartersDialogProps, setConversationStartersDialogProps] = useState({})
const [viewMessagesDialogOpen, setViewMessagesDialogOpen] = useState(false)
@ -71,6 +74,12 @@ const CanvasHeader = ({ chatflow, handleSaveFlow, handleDeleteFlow, handleLoadFl
chatflow: chatflow
})
setAnalyseDialogOpen(true)
} else if (setting === 'enableSpeechToText') {
setSpeechToAudioialogProps({
title: 'Speech to Text',
chatflow: chatflow
})
setSpeechToAudioDialogOpen(true)
} else if (setting === 'viewMessages') {
setViewMessagesDialogProps({
title: 'View Messages',
@ -385,6 +394,11 @@ const CanvasHeader = ({ chatflow, handleSaveFlow, handleDeleteFlow, handleLoadFl
/>
<APICodeDialog show={apiDialogOpen} dialogProps={apiDialogProps} onCancel={() => setAPIDialogOpen(false)} />
<AnalyseFlowDialog show={analyseDialogOpen} dialogProps={analyseDialogProps} onCancel={() => setAnalyseDialogOpen(false)} />
<SpeechToTextDialog
show={speechToAudioDialogOpen}
dialogProps={speechToAudioDialogProps}
onCancel={() => setSpeechToAudioDialogOpen(false)}
/>
<StarterPromptsDialog
show={conversationStartersDialogOpen}
dialogProps={conversationStartersDialogProps}

View File

@ -1,5 +1,5 @@
import PropTypes from 'prop-types'
import { useRef, useState } from 'react'
import { useEffect, useRef, useState } from 'react'
// material-ui
import { IconButton } from '@mui/material'
@ -88,6 +88,10 @@ const CredentialInputHandler = ({ inputParam, data, onSelect, disabled = false }
setShowSpecificCredentialDialog(true)
}
useEffect(() => {
setCredentialId(data?.credential ?? '')
}, [data])
return (
<div ref={ref}>
{inputParam && (

View File

@ -7,7 +7,7 @@ import { ChatMessage } from './ChatMessage'
import { StyledButton } from 'ui-component/button/StyledButton'
import { IconEraser } from '@tabler/icons'
const ChatExpandDialog = ({ show, dialogProps, onClear, onCancel }) => {
const ChatExpandDialog = ({ show, dialogProps, onClear, onCancel, previews, setPreviews }) => {
const portalElement = document.getElementById('portal')
const customization = useSelector((state) => state.customization)
@ -21,7 +21,7 @@ const ChatExpandDialog = ({ show, dialogProps, onClear, onCancel }) => {
aria-describedby='alert-dialog-description'
sx={{ overflow: 'visible' }}
>
<DialogTitle sx={{ fontSize: '1rem' }} id='alert-dialog-title'>
<DialogTitle sx={{ fontSize: '1rem', p: 1.5 }} id='alert-dialog-title'>
<div style={{ display: 'flex', flexDirection: 'row' }}>
{dialogProps.title}
<div style={{ flex: 1 }}></div>
@ -43,8 +43,17 @@ const ChatExpandDialog = ({ show, dialogProps, onClear, onCancel }) => {
)}
</div>
</DialogTitle>
<DialogContent sx={{ display: 'flex', justifyContent: 'flex-end', flexDirection: 'column' }}>
<ChatMessage isDialog={true} open={dialogProps.open} chatflowid={dialogProps.chatflowid} />
<DialogContent
className='cloud-dialog-wrapper'
sx={{ display: 'flex', justifyContent: 'flex-end', flexDirection: 'column', p: 0 }}
>
<ChatMessage
isDialog={true}
open={dialogProps.open}
chatflowid={dialogProps.chatflowid}
previews={previews}
setPreviews={setPreviews}
/>
</DialogContent>
</Dialog>
) : null
@ -56,7 +65,9 @@ ChatExpandDialog.propTypes = {
show: PropTypes.bool,
dialogProps: PropTypes.object,
onClear: PropTypes.func,
onCancel: PropTypes.func
onCancel: PropTypes.func,
previews: PropTypes.array,
setPreviews: PropTypes.func
}
export default ChatExpandDialog

View File

@ -1,8 +1,6 @@
.messagelist {
width: 100%;
height: 100%;
overflow-y: scroll;
overflow-x: hidden;
height: auto;
border-radius: 0.5rem;
}
@ -108,31 +106,57 @@
}
.center {
width: 100%;
display: flex;
justify-content: center;
align-items: center;
position: relative;
flex-direction: column;
padding: 10px;
padding: 12px;
}
.cloud {
.cloud-wrapper {
width: 400px;
height: calc(100vh - 260px);
border-radius: 0.5rem;
display: flex;
justify-content: center;
align-items: center;
height: calc(100vh - 180px);
}
.cloud-dialog-wrapper {
width: 100%;
height: calc(100vh - 120px);
}
.cloud-wrapper > div,
.cloud-dialog-wrapper > div {
width: 100%;
height: 100%;
display: flex;
align-items: flex-start;
justify-content: flex-start;
flex-direction: column;
position: relative;
}
.image-dropzone {
position: absolute;
width: 100%;
height: 100%;
top: 0;
left: 0;
bottom: 0;
right: 0;
z-index: 2001; /* Ensure it's above other content */
}
.cloud,
.cloud-dialog {
width: 100%;
height: 100vh;
height: auto;
max-height: calc(100% - 54px);
overflow-y: scroll;
border-radius: 0.5rem;
display: flex;
justify-content: center;
align-items: center;
align-items: flex-start;
flex-grow: 1;
}
.cloud-message {
@ -144,3 +168,38 @@
justify-content: center;
align-items: center;
}
.preview {
position: absolute;
bottom: 0;
z-index: 1000;
display: flex;
overflow-x: auto;
-webkit-overflow-scrolling: touch; /* For momentum scroll on mobile devices */
scrollbar-width: none; /* For Firefox */
}
.file-drop-field {
position: relative; /* Needed to position the icon correctly */
/* Other styling for the field */
}
.drop-overlay {
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
background-color: rgba(137, 134, 134, 0.83); /* Semi-transparent white */
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
z-index: 2000; /* Ensure it's above other content */
border: 2px dashed #0094ff; /* Example style */
}
.center audio {
height: 100%;
border-radius: 0;
}

View File

@ -1,4 +1,4 @@
import { useState, useRef, useEffect, useCallback } from 'react'
import { useState, useRef, useEffect, useCallback, Fragment } from 'react'
import { useSelector } from 'react-redux'
import PropTypes from 'prop-types'
import socketIOClient from 'socket.io-client'
@ -9,15 +9,34 @@ import remarkGfm from 'remark-gfm'
import remarkMath from 'remark-math'
import axios from 'axios'
import { CircularProgress, OutlinedInput, Divider, InputAdornment, IconButton, Box, Chip, Button } from '@mui/material'
import {
Box,
Button,
Card,
CardMedia,
Chip,
CircularProgress,
Divider,
IconButton,
InputAdornment,
OutlinedInput,
Typography
} from '@mui/material'
import { useTheme } from '@mui/material/styles'
import { IconSend, IconDownload } from '@tabler/icons'
import { IconCircleDot, IconDownload, IconSend, IconMicrophone, IconPhotoPlus, IconTrash, IconX } from '@tabler/icons'
import robotPNG from 'assets/images/robot.png'
import userPNG from 'assets/images/account.png'
import audioUploadSVG from 'assets/images/wave-sound.jpg'
// project import
import { CodeBlock } from 'ui-component/markdown/CodeBlock'
import { MemoizedReactMarkdown } from 'ui-component/markdown/MemoizedReactMarkdown'
import SourceDocDialog from 'ui-component/dialog/SourceDocDialog'
import StarterPromptsCard from 'ui-component/cards/StarterPromptsCard'
import { cancelAudioRecording, startAudioRecording, stopAudioRecording } from './audio-recording'
import { ImageButton, ImageSrc, ImageBackdrop, ImageMarked } from 'ui-component/button/ImageButton'
import './ChatMessage.css'
import './audio-recording.css'
// api
import chatmessageApi from 'api/chatmessage'
@ -30,12 +49,16 @@ import useApi from 'hooks/useApi'
// Const
import { baseURL, maxScroll } from 'store/constant'
import robotPNG from 'assets/images/robot.png'
import userPNG from 'assets/images/account.png'
import StarterPromptsCard from '../../ui-component/cards/StarterPromptsCard'
// Utils
import { isValidURL, removeDuplicateURL, setLocalStorageChatflow } from 'utils/genericHelper'
export const ChatMessage = ({ open, chatflowid, isDialog }) => {
const messageImageStyle = {
width: '128px',
height: '128px',
objectFit: 'cover'
}
export const ChatMessage = ({ open, chatflowid, isDialog, previews, setPreviews }) => {
const theme = useTheme()
const customization = useSelector((state) => state.customization)
@ -51,6 +74,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
])
const [socketIOClientId, setSocketIOClientId] = useState('')
const [isChatFlowAvailableToStream, setIsChatFlowAvailableToStream] = useState(false)
const [isChatFlowAvailableForSpeech, setIsChatFlowAvailableForSpeech] = useState(false)
const [sourceDialogOpen, setSourceDialogOpen] = useState(false)
const [sourceDialogProps, setSourceDialogProps] = useState({})
const [chatId, setChatId] = useState(undefined)
@ -58,10 +82,220 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
const inputRef = useRef(null)
const getChatmessageApi = useApi(chatmessageApi.getInternalChatmessageFromChatflow)
const getIsChatflowStreamingApi = useApi(chatflowsApi.getIsChatflowStreaming)
const getAllowChatFlowUploads = useApi(chatflowsApi.getAllowChatflowUploads)
const getChatflowConfig = useApi(chatflowsApi.getSpecificChatflow)
const [starterPrompts, setStarterPrompts] = useState([])
// drag & drop and file input
const fileUploadRef = useRef(null)
const [isChatFlowAvailableForUploads, setIsChatFlowAvailableForUploads] = useState(false)
const [isDragActive, setIsDragActive] = useState(false)
// recording
const [isRecording, setIsRecording] = useState(false)
const [recordingNotSupported, setRecordingNotSupported] = useState(false)
const [isLoadingRecording, setIsLoadingRecording] = useState(false)
const isFileAllowedForUpload = (file) => {
const constraints = getAllowChatFlowUploads.data
/**
* {isImageUploadAllowed: boolean, imgUploadSizeAndTypes: Array<{ fileTypes: string[], maxUploadSize: number }>}
*/
let acceptFile = false
if (constraints.isImageUploadAllowed) {
const fileType = file.type
const sizeInMB = file.size / 1024 / 1024
constraints.imgUploadSizeAndTypes.map((allowed) => {
if (allowed.fileTypes.includes(fileType) && sizeInMB <= allowed.maxUploadSize) {
acceptFile = true
}
})
}
if (!acceptFile) {
alert(`Cannot upload file. Kindly check the allowed file types and maximum allowed size.`)
}
return acceptFile
}
const handleDrop = async (e) => {
if (!isChatFlowAvailableForUploads) {
return
}
e.preventDefault()
setIsDragActive(false)
let files = []
if (e.dataTransfer.files.length > 0) {
for (const file of e.dataTransfer.files) {
if (isFileAllowedForUpload(file) === false) {
return
}
const reader = new FileReader()
const { name } = file
files.push(
new Promise((resolve) => {
reader.onload = (evt) => {
if (!evt?.target?.result) {
return
}
const { result } = evt.target
let previewUrl
if (file.type.startsWith('audio/')) {
previewUrl = audioUploadSVG
} else if (file.type.startsWith('image/')) {
previewUrl = URL.createObjectURL(file)
}
resolve({
data: result,
preview: previewUrl,
type: 'file',
name: name,
mime: file.type
})
}
reader.readAsDataURL(file)
})
)
}
const newFiles = await Promise.all(files)
setPreviews((prevPreviews) => [...prevPreviews, ...newFiles])
}
if (e.dataTransfer.items) {
for (const item of e.dataTransfer.items) {
if (item.kind === 'string' && item.type.match('^text/uri-list')) {
item.getAsString((s) => {
let upload = {
data: s,
preview: s,
type: 'url',
name: s.substring(s.lastIndexOf('/') + 1)
}
setPreviews((prevPreviews) => [...prevPreviews, upload])
})
} else if (item.kind === 'string' && item.type.match('^text/html')) {
item.getAsString((s) => {
if (s.indexOf('href') === -1) return
//extract href
let start = s.substring(s.indexOf('href') + 6)
let hrefStr = start.substring(0, start.indexOf('"'))
let upload = {
data: hrefStr,
preview: hrefStr,
type: 'url',
name: hrefStr.substring(hrefStr.lastIndexOf('/') + 1)
}
setPreviews((prevPreviews) => [...prevPreviews, upload])
})
}
}
}
}
const handleFileChange = async (event) => {
const fileObj = event.target.files && event.target.files[0]
if (!fileObj) {
return
}
let files = []
for (const file of event.target.files) {
if (isFileAllowedForUpload(file) === false) {
return
}
const reader = new FileReader()
const { name } = file
files.push(
new Promise((resolve) => {
reader.onload = (evt) => {
if (!evt?.target?.result) {
return
}
const { result } = evt.target
resolve({
data: result,
preview: URL.createObjectURL(file),
type: 'file',
name: name,
mime: file.type
})
}
reader.readAsDataURL(file)
})
)
}
const newFiles = await Promise.all(files)
setPreviews((prevPreviews) => [...prevPreviews, ...newFiles])
// 👇️ reset file input
event.target.value = null
}
const addRecordingToPreviews = (blob) => {
const mimeType = blob.type.substring(0, blob.type.indexOf(';'))
// read blob and add to previews
const reader = new FileReader()
reader.readAsDataURL(blob)
reader.onloadend = () => {
const base64data = reader.result
const upload = {
data: base64data,
preview: audioUploadSVG,
type: 'audio',
name: 'audio.wav',
mime: mimeType
}
setPreviews((prevPreviews) => [...prevPreviews, upload])
}
}
const handleDrag = (e) => {
if (isChatFlowAvailableForUploads) {
e.preventDefault()
e.stopPropagation()
if (e.type === 'dragenter' || e.type === 'dragover') {
setIsDragActive(true)
} else if (e.type === 'dragleave') {
setIsDragActive(false)
}
}
}
const handleDeletePreview = (itemToDelete) => {
if (itemToDelete.type === 'file') {
URL.revokeObjectURL(itemToDelete.preview) // Clean up for file
}
setPreviews(previews.filter((item) => item !== itemToDelete))
}
const handleUploadClick = () => {
// 👇️ open file input box on click of another element
fileUploadRef.current.click()
}
const clearPreviews = () => {
// Revoke the data uris to avoid memory leaks
previews.forEach((file) => URL.revokeObjectURL(file.preview))
setPreviews([])
}
const onMicrophonePressed = () => {
setIsRecording(true)
startAudioRecording(setIsRecording, setRecordingNotSupported)
}
const onRecordingCancelled = () => {
if (!recordingNotSupported) cancelAudioRecording()
setIsRecording(false)
setRecordingNotSupported(false)
}
const onRecordingStopped = async () => {
setIsLoadingRecording(true)
stopAudioRecording(addRecordingToPreviews)
}
const onSourceDialogClick = (data, title) => {
setSourceDialogProps({ data, title })
setSourceDialogOpen(true)
@ -118,7 +352,10 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
if (e) e.preventDefault()
if (!promptStarterInput && userInput.trim() === '') {
return
const containsAudio = previews.filter((item) => item.type === 'audio').length > 0
if (!(previews.length >= 1 && containsAudio)) {
return
}
}
let input = userInput
@ -126,7 +363,16 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
if (promptStarterInput !== undefined && promptStarterInput.trim() !== '') input = promptStarterInput
setLoading(true)
setMessages((prevMessages) => [...prevMessages, { message: input, type: 'userMessage' }])
const urls = previews.map((item) => {
return {
data: item.data,
type: item.type,
name: item.name,
mime: item.mime
}
})
clearPreviews()
setMessages((prevMessages) => [...prevMessages, { message: input, type: 'userMessage', fileUploads: urls }])
// Send user question and history to API
try {
@ -135,6 +381,7 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
history: messages.filter((msg) => msg.message !== 'Hi there! How can I help?'),
chatId
}
if (urls && urls.length > 0) params.uploads = urls
if (isChatFlowAvailableToStream) params.socketIOClientId = socketIOClientId
const response = await predictionApi.sendMessageAndGetPrediction(chatflowid, params)
@ -144,6 +391,17 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
if (!chatId) setChatId(data.chatId)
if (input === '' && data.question) {
// the response contains the question even if it was in an audio format
// so if input is empty but the response contains the question, update the user message to show the question
setMessages((prevMessages) => {
let allMessages = [...cloneDeep(prevMessages)]
if (allMessages[allMessages.length - 2].type === 'apiMessage') return allMessages
allMessages[allMessages.length - 2].message = data.question
return allMessages
})
}
if (!isChatFlowAvailableToStream) {
let text = ''
if (data.text) text = data.text
@ -222,6 +480,14 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
if (message.sourceDocuments) obj.sourceDocuments = JSON.parse(message.sourceDocuments)
if (message.usedTools) obj.usedTools = JSON.parse(message.usedTools)
if (message.fileAnnotations) obj.fileAnnotations = JSON.parse(message.fileAnnotations)
if (message.fileUploads) {
obj.fileUploads = JSON.parse(message.fileUploads)
obj.fileUploads.forEach((file) => {
if (file.type === 'stored-file') {
file.data = `${baseURL}/api/v1/get-upload-file?chatflowId=${chatflowid}&chatId=${chatId}&fileName=${file.name}`
}
})
}
return obj
})
setMessages((prevMessages) => [...prevMessages, ...loadedMessages])
@ -239,6 +505,15 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [getIsChatflowStreamingApi.data])
// Get chatflow uploads capability
useEffect(() => {
if (getAllowChatFlowUploads.data) {
setIsChatFlowAvailableForUploads(getAllowChatFlowUploads.data?.isImageUploadAllowed ?? false)
setIsChatFlowAvailableForSpeech(getAllowChatFlowUploads.data?.isSpeechToTextEnabled ?? false)
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [getAllowChatFlowUploads.data])
useEffect(() => {
if (getChatflowConfig.data) {
if (getChatflowConfig.data?.chatbotConfig && JSON.parse(getChatflowConfig.data?.chatbotConfig)) {
@ -273,11 +548,18 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
useEffect(() => {
let socket
if (open && chatflowid) {
// API request
getChatmessageApi.request(chatflowid)
getIsChatflowStreamingApi.request(chatflowid)
getAllowChatFlowUploads.request(chatflowid)
getChatflowConfig.request(chatflowid)
// Scroll to bottom
scrollToBottom()
setIsRecording(false)
// SocketIO
socket = socketIOClient(baseURL)
socket.on('connect', () => {
@ -311,147 +593,330 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [open, chatflowid])
useEffect(() => {
// wait for audio recording to load and then send
const containsAudio = previews.filter((item) => item.type === 'audio').length > 0
if (previews.length >= 1 && containsAudio) {
setIsRecording(false)
setRecordingNotSupported(false)
handlePromptClick('')
}
// eslint-disable-next-line
}, [previews])
return (
<>
<div className={isDialog ? 'cloud-dialog' : 'cloud'}>
<div ref={ps} className='messagelist'>
<div onDragEnter={handleDrag}>
{isDragActive && (
<div
className='image-dropzone'
onDragEnter={handleDrag}
onDragLeave={handleDrag}
onDragEnd={handleDrag}
onDrop={handleDrop}
/>
)}
{isDragActive && getAllowChatFlowUploads.data?.isImageUploadAllowed && (
<Box className='drop-overlay'>
<Typography variant='h2'>Drop here to upload</Typography>
{getAllowChatFlowUploads.data.imgUploadSizeAndTypes.map((allowed) => {
return (
<>
<Typography variant='subtitle1'>{allowed.fileTypes?.join(', ')}</Typography>
<Typography variant='subtitle1'>Max Allowed Size: {allowed.maxUploadSize} MB</Typography>
</>
)
})}
</Box>
)}
<div ref={ps} className={`${isDialog ? 'cloud-dialog' : 'cloud'}`}>
<div id='messagelist' className={'messagelist'}>
{messages &&
messages.map((message, index) => {
return (
// The latest message sent by the user will be animated while waiting for a response
<>
<Box
sx={{
background: message.type === 'apiMessage' ? theme.palette.asyncSelect.main : ''
}}
key={index}
style={{ display: 'flex' }}
className={
message.type === 'userMessage' && loading && index === messages.length - 1
? customization.isDarkMode
? 'usermessagewaiting-dark'
: 'usermessagewaiting-light'
: message.type === 'usermessagewaiting'
? 'apimessage'
: 'usermessage'
}
>
{/* Display the correct icon depending on the message type */}
{message.type === 'apiMessage' ? (
<img src={robotPNG} alt='AI' width='30' height='30' className='boticon' />
) : (
<img src={userPNG} alt='Me' width='30' height='30' className='usericon' />
)}
<div style={{ display: 'flex', flexDirection: 'column', width: '100%' }}>
{message.usedTools && (
<div style={{ display: 'block', flexDirection: 'row', width: '100%' }}>
{message.usedTools.map((tool, index) => {
return (
<Chip
size='small'
key={index}
label={tool.tool}
component='a'
sx={{ mr: 1, mt: 1 }}
variant='outlined'
clickable
onClick={() => onSourceDialogClick(tool, 'Used Tools')}
/>
)
})}
</div>
)}
<div className='markdownanswer'>
{/* Messages are being rendered in Markdown format */}
<MemoizedReactMarkdown
remarkPlugins={[remarkGfm, remarkMath]}
rehypePlugins={[rehypeMathjax, rehypeRaw]}
components={{
code({ inline, className, children, ...props }) {
const match = /language-(\w+)/.exec(className || '')
return !inline ? (
<CodeBlock
key={Math.random()}
chatflowid={chatflowid}
isDialog={isDialog}
language={(match && match[1]) || ''}
value={String(children).replace(/\n$/, '')}
{...props}
/>
) : (
<code className={className} {...props}>
{children}
</code>
)
}
}}
>
{message.message}
</MemoizedReactMarkdown>
<Box
sx={{
background: message.type === 'apiMessage' ? theme.palette.asyncSelect.main : ''
}}
key={index}
style={{ display: 'flex' }}
className={
message.type === 'userMessage' && loading && index === messages.length - 1
? customization.isDarkMode
? 'usermessagewaiting-dark'
: 'usermessagewaiting-light'
: message.type === 'usermessagewaiting'
? 'apimessage'
: 'usermessage'
}
>
{/* Display the correct icon depending on the message type */}
{message.type === 'apiMessage' ? (
<img src={robotPNG} alt='AI' width='30' height='30' className='boticon' />
) : (
<img src={userPNG} alt='Me' width='30' height='30' className='usericon' />
)}
<div style={{ display: 'flex', flexDirection: 'column', width: '100%' }}>
{message.usedTools && (
<div style={{ display: 'block', flexDirection: 'row', width: '100%' }}>
{message.usedTools.map((tool, index) => {
return (
<Chip
size='small'
key={index}
label={tool.tool}
component='a'
sx={{ mr: 1, mt: 1 }}
variant='outlined'
clickable
onClick={() => onSourceDialogClick(tool, 'Used Tools')}
/>
)
})}
</div>
{message.fileAnnotations && (
<div style={{ display: 'block', flexDirection: 'row', width: '100%' }}>
{message.fileAnnotations.map((fileAnnotation, index) => {
return (
<Button
sx={{ fontSize: '0.85rem', textTransform: 'none', mb: 1 }}
key={index}
variant='outlined'
onClick={() => downloadFile(fileAnnotation)}
endIcon={<IconDownload color={theme.palette.primary.main} />}
>
{fileAnnotation.fileName}
</Button>
)
})}
</div>
)}
{message.sourceDocuments && (
<div style={{ display: 'block', flexDirection: 'row', width: '100%' }}>
{removeDuplicateURL(message).map((source, index) => {
const URL =
source.metadata && source.metadata.source
? isValidURL(source.metadata.source)
: undefined
return (
<Chip
size='small'
key={index}
label={
URL
? URL.pathname.substring(0, 15) === '/'
? URL.host
: `${URL.pathname.substring(0, 15)}...`
: `${source.pageContent.substring(0, 15)}...`
}
component='a'
sx={{ mr: 1, mb: 1 }}
variant='outlined'
clickable
onClick={() =>
URL ? onURLClick(source.metadata.source) : onSourceDialogClick(source)
}
)}
{message.fileUploads && message.fileUploads.length > 0 && (
<div
style={{
display: 'flex',
flexWrap: 'wrap',
flexDirection: 'column',
width: '100%',
gap: '8px'
}}
>
{message.fileUploads.map((item, index) => {
return (
<>
{item.mime.startsWith('image/') ? (
<Card
key={index}
sx={{
p: 0,
m: 0,
maxWidth: 128,
marginRight: '10px',
flex: '0 0 auto'
}}
>
<CardMedia
component='img'
image={item.data}
sx={{ height: 64 }}
alt={'preview'}
style={messageImageStyle}
/>
</Card>
) : (
// eslint-disable-next-line jsx-a11y/media-has-caption
<audio controls='controls'>
Your browser does not support the &lt;audio&gt; tag.
<source src={item.data} type={item.mime} />
</audio>
)}
</>
)
})}
</div>
)}
<div className='markdownanswer'>
{/* Messages are being rendered in Markdown format */}
<MemoizedReactMarkdown
remarkPlugins={[remarkGfm, remarkMath]}
rehypePlugins={[rehypeMathjax, rehypeRaw]}
components={{
code({ inline, className, children, ...props }) {
const match = /language-(\w+)/.exec(className || '')
return !inline ? (
<CodeBlock
key={Math.random()}
chatflowid={chatflowid}
isDialog={isDialog}
language={(match && match[1]) || ''}
value={String(children).replace(/\n$/, '')}
{...props}
/>
) : (
<code className={className} {...props}>
{children}
</code>
)
})}
</div>
)}
}
}}
>
{message.message}
</MemoizedReactMarkdown>
</div>
</Box>
</>
{message.fileAnnotations && (
<div style={{ display: 'block', flexDirection: 'row', width: '100%' }}>
{message.fileAnnotations.map((fileAnnotation, index) => {
return (
<Button
sx={{ fontSize: '0.85rem', textTransform: 'none', mb: 1 }}
key={index}
variant='outlined'
onClick={() => downloadFile(fileAnnotation)}
endIcon={<IconDownload color={theme.palette.primary.main} />}
>
{fileAnnotation.fileName}
</Button>
)
})}
</div>
)}
{message.sourceDocuments && (
<div style={{ display: 'block', flexDirection: 'row', width: '100%' }}>
{removeDuplicateURL(message).map((source, index) => {
const URL =
source.metadata && source.metadata.source
? isValidURL(source.metadata.source)
: undefined
return (
<Chip
size='small'
key={index}
label={
URL
? URL.pathname.substring(0, 15) === '/'
? URL.host
: `${URL.pathname.substring(0, 15)}...`
: `${source.pageContent.substring(0, 15)}...`
}
component='a'
sx={{ mr: 1, mb: 1 }}
variant='outlined'
clickable
onClick={() =>
URL ? onURLClick(source.metadata.source) : onSourceDialogClick(source)
}
/>
)
})}
</div>
)}
</div>
</Box>
)
})}
</div>
</div>
<div style={{ position: 'relative' }}>
{messages && messages.length === 1 && (
<StarterPromptsCard starterPrompts={starterPrompts || []} onPromptClick={handlePromptClick} isGrid={isDialog} />
)}
<Divider />
</div>
{messages && messages.length === 1 && starterPrompts.length > 0 && (
<div style={{ position: 'relative' }}>
<StarterPromptsCard
sx={{ bottom: previews && previews.length > 0 ? 70 : 0 }}
starterPrompts={starterPrompts || []}
onPromptClick={handlePromptClick}
isGrid={isDialog}
/>
</div>
)}
<Divider sx={{ width: '100%' }} />
<div className='center'>
<div style={{ width: '100%' }}>
{previews && previews.length > 0 && (
<Box sx={{ width: '100%', mb: 1.5, display: 'flex', alignItems: 'center' }}>
{previews.map((item, index) => (
<Fragment key={index}>
{item.mime.startsWith('image/') ? (
<ImageButton
focusRipple
style={{
width: '48px',
height: '48px',
marginRight: '10px',
flex: '0 0 auto'
}}
onClick={() => handleDeletePreview(item)}
>
<ImageSrc style={{ backgroundImage: `url(${item.data})` }} />
<ImageBackdrop className='MuiImageBackdrop-root' />
<ImageMarked className='MuiImageMarked-root'>
<IconTrash size={20} color='white' />
</ImageMarked>
</ImageButton>
) : (
<Card
sx={{
display: 'inline-flex',
alignItems: 'center',
height: '48px',
width: isDialog ? ps?.current?.offsetWidth / 4 : ps?.current?.offsetWidth / 2,
p: 0.5,
mr: 1,
backgroundColor: theme.palette.grey[500],
flex: '0 0 auto'
}}
variant='outlined'
>
<CardMedia component='audio' sx={{ color: 'transparent' }} controls src={item.data} />
<IconButton onClick={() => handleDeletePreview(item)} size='small'>
<IconTrash size={20} color='white' />
</IconButton>
</Card>
)}
</Fragment>
))}
</Box>
)}
{isRecording ? (
<>
{recordingNotSupported ? (
<div className='overlay'>
<div className='browser-not-supporting-audio-recording-box'>
<Typography variant='body1'>
To record audio, use modern browsers like Chrome or Firefox that support audio recording.
</Typography>
<Button
variant='contained'
color='error'
size='small'
type='button'
onClick={() => onRecordingCancelled()}
>
Okay
</Button>
</div>
</div>
) : (
<Box
sx={{
width: '100%',
height: '54px',
px: 2,
border: '1px solid',
borderRadius: 3,
backgroundColor: customization.isDarkMode ? '#32353b' : '#fafafa',
borderColor: 'rgba(0, 0, 0, 0.23)',
display: 'flex',
alignItems: 'center',
justifyContent: 'space-between'
}}
>
<div className='recording-elapsed-time'>
<span className='red-recording-dot'>
<IconCircleDot />
</span>
<Typography id='elapsed-time'>00:00</Typography>
{isLoadingRecording && <Typography ml={1.5}>Sending...</Typography>}
</div>
<div className='recording-control-buttons-container'>
<IconButton onClick={onRecordingCancelled} size='small'>
<IconX
color={loading || !chatflowid ? '#9e9e9e' : customization.isDarkMode ? 'white' : '#1e88e5'}
/>
</IconButton>
<IconButton onClick={onRecordingStopped} size='small'>
<IconSend
color={loading || !chatflowid ? '#9e9e9e' : customization.isDarkMode ? 'white' : '#1e88e5'}
/>
</IconButton>
</div>
</Box>
)}
</>
) : (
<form style={{ width: '100%' }} onSubmit={handleSubmit}>
<OutlinedInput
inputRef={inputRef}
@ -467,33 +932,75 @@ export const ChatMessage = ({ open, chatflowid, isDialog }) => {
onChange={onChange}
multiline={true}
maxRows={isDialog ? 7 : 2}
endAdornment={
<InputAdornment position='end' sx={{ padding: '15px' }}>
<IconButton type='submit' disabled={loading || !chatflowid} edge='end'>
{loading ? (
<div>
<CircularProgress color='inherit' size={20} />
</div>
) : (
// Send icon SVG in input field
<IconSend
startAdornment={
isChatFlowAvailableForUploads && (
<InputAdornment position='start' sx={{ pl: 2 }}>
<IconButton
onClick={handleUploadClick}
type='button'
disabled={loading || !chatflowid}
edge='start'
>
<IconPhotoPlus
color={loading || !chatflowid ? '#9e9e9e' : customization.isDarkMode ? 'white' : '#1e88e5'}
/>
)}
</IconButton>
</InputAdornment>
</IconButton>
</InputAdornment>
)
}
endAdornment={
<>
{isChatFlowAvailableForSpeech && (
<InputAdornment position='end'>
<IconButton
onClick={() => onMicrophonePressed()}
type='button'
disabled={loading || !chatflowid}
edge='end'
>
<IconMicrophone
className={'start-recording-button'}
color={
loading || !chatflowid ? '#9e9e9e' : customization.isDarkMode ? 'white' : '#1e88e5'
}
/>
</IconButton>
</InputAdornment>
)}
<InputAdornment position='end' sx={{ padding: '15px' }}>
<IconButton type='submit' disabled={loading || !chatflowid} edge='end'>
{loading ? (
<div>
<CircularProgress color='inherit' size={20} />
</div>
) : (
// Send icon SVG in input field
<IconSend
color={
loading || !chatflowid ? '#9e9e9e' : customization.isDarkMode ? 'white' : '#1e88e5'
}
/>
)}
</IconButton>
</InputAdornment>
</>
}
/>
{isChatFlowAvailableForUploads && (
<input style={{ display: 'none' }} multiple ref={fileUploadRef} type='file' onChange={handleFileChange} />
)}
</form>
</div>
)}
</div>
<SourceDocDialog show={sourceDialogOpen} dialogProps={sourceDialogProps} onCancel={() => setSourceDialogOpen(false)} />
</>
</div>
)
}
ChatMessage.propTypes = {
open: PropTypes.bool,
chatflowid: PropTypes.string,
isDialog: PropTypes.bool
isDialog: PropTypes.bool,
previews: PropTypes.array,
setPreviews: PropTypes.func
}

View File

@ -35,6 +35,7 @@ export const ChatPopUp = ({ chatflowid }) => {
const [open, setOpen] = useState(false)
const [showExpandDialog, setShowExpandDialog] = useState(false)
const [expandDialogProps, setExpandDialogProps] = useState({})
const [previews, setPreviews] = useState([])
const anchorRef = useRef(null)
const prevOpen = useRef(open)
@ -191,8 +192,15 @@ export const ChatPopUp = ({ chatflowid }) => {
<Transitions in={open} {...TransitionProps}>
<Paper>
<ClickAwayListener onClickAway={handleClose}>
<MainCard border={false} elevation={16} content={false} boxShadow shadow={theme.shadows[16]}>
<ChatMessage chatflowid={chatflowid} open={open} />
<MainCard
border={false}
className='cloud-wrapper'
elevation={16}
content={false}
boxShadow
shadow={theme.shadows[16]}
>
<ChatMessage chatflowid={chatflowid} open={open} previews={previews} setPreviews={setPreviews} />
</MainCard>
</ClickAwayListener>
</Paper>
@ -204,6 +212,8 @@ export const ChatPopUp = ({ chatflowid }) => {
dialogProps={expandDialogProps}
onClear={clearChat}
onCancel={() => setShowExpandDialog(false)}
previews={previews}
setPreviews={setPreviews}
></ChatExpandDialog>
</>
)

View File

@ -0,0 +1,249 @@
/* style.css*/
/* Media Queries */
/* Small Devices*/
@media (min-width: 0px) {
* {
box-sizing: border-box;
}
.start-recording-button {
font-size: 70px;
color: #435f7a;
cursor: pointer;
}
.start-recording-button:hover {
opacity: 1;
}
.recording-control-buttons-container {
/*targeting Chrome & Safari*/
display: -webkit-flex;
/*targeting IE10*/
display: -ms-flex;
display: flex;
justify-content: center;
/*horizontal centering*/
align-items: center;
gap: 12px;
}
.recording-elapsed-time {
font-size: 16px;
/*targeting Chrome & Safari*/
display: -webkit-flex;
/*targeting IE10*/
display: -ms-flex;
display: flex;
justify-content: center;
/*horizontal centering*/
align-items: center;
}
.recording-elapsed-time #elapsed-time {
margin: 0;
}
.recording-indicator-wrapper {
position: relative;
display: flex;
width: 16px;
height: 16px;
}
.red-recording-dot {
font-size: 25px;
color: red;
margin-right: 12px;
/*transitions with Firefox, IE and Opera Support browser support*/
animation-name: flashing-recording-dot;
-webkit-animation-name: flashing-recording-dot;
-moz-animation-name: flashing-recording-dot;
-o-animation-name: flashing-recording-dot;
animation-duration: 2s;
-webkit-animation-duration: 2s;
-moz-animation-duration: 2s;
-o-animation-duration: 2s;
animation-iteration-count: infinite;
-webkit-animation-iteration-count: infinite;
-moz-animation-iteration-count: infinite;
-o-animation-iteration-count: infinite;
}
/* The animation code */
@keyframes flashing-recording-dot {
0% {
opacity: 1;
}
50% {
opacity: 0;
}
100% {
opacity: 1;
}
}
@-webkit-keyframes flashing-recording-dot {
0% {
opacity: 1;
}
50% {
opacity: 0;
}
100% {
opacity: 1;
}
}
@-moz-keyframes flashing-recording-dot {
0% {
opacity: 1;
}
50% {
opacity: 0;
}
100% {
opacity: 1;
}
}
@-o-keyframes flashing-recording-dot {
0% {
opacity: 1;
}
50% {
opacity: 0;
}
100% {
opacity: 1;
}
}
.recording-control-buttons-container.hide {
display: none;
}
.overlay {
width: 100%;
height: '54px';
/*targeting Chrome & Safari*/
display: -webkit-flex;
/*targeting IE10*/
display: -ms-flex;
display: flex;
justify-content: center;
/*horizontal centering*/
align-items: center;
}
.overlay.hide {
display: none;
}
.browser-not-supporting-audio-recording-box {
/*targeting Chrome & Safari*/
display: -webkit-flex;
/*targeting IE10*/
display: -ms-flex;
display: flex;
justify-content: space-between;
/*horizontal centering*/
align-items: center;
width: 100%;
font-size: 16px;
gap: 12px;
}
.browser-not-supporting-audio-recording-box > p {
margin: 0;
}
.close-browser-not-supported-box {
cursor: pointer;
background-color: #abc1c05c;
border-radius: 10px;
font-size: 16px;
border: none;
}
.close-browser-not-supported-box:hover {
background-color: #92a5a45c;
}
.close-browser-not-supported-box:focus {
outline: none;
border: none;
}
.audio-element.hide {
display: none;
}
.text-indication-of-audio-playing-container {
height: 20px;
}
.text-indication-of-audio-playing {
font-size: 20px;
}
.text-indication-of-audio-playing.hide {
display: none;
}
/* 3 Dots animation*/
.text-indication-of-audio-playing span {
/*transitions with Firefox, IE and Opera Support browser support*/
animation-name: blinking-dot;
-webkit-animation-name: blinking-dot;
-moz-animation-name: blinking-dot;
-o-animation-name: blinking-dot;
animation-duration: 2s;
-webkit-animation-duration: 2s;
-moz-animation-duration: 2s;
-o-animation-duration: 2s;
animation-iteration-count: infinite;
-webkit-animation-iteration-count: infinite;
-moz-animation-iteration-count: infinite;
-o-animation-iteration-count: infinite;
}
.text-indication-of-audio-playing span:nth-child(2) {
animation-delay: 0.4s;
-webkit-animation-delay: 0.4s;
-moz-animation-delay: 0.4s;
-o-animation-delay: 0.4s;
}
.text-indication-of-audio-playing span:nth-child(3) {
animation-delay: 0.8s;
-webkit-animation-delay: 0.8s;
-moz-animation-delay: 0.8s;
-o-animation-delay: 0.8s;
}
/* The animation code */
@keyframes blinking-dot {
0% {
opacity: 0;
}
50% {
opacity: 1;
}
100% {
opacity: 0;
}
}
/* The animation code */
@-webkit-keyframes blinking-dot {
0% {
opacity: 0;
}
50% {
opacity: 1;
}
100% {
opacity: 0;
}
}
/* The animation code */
@-moz-keyframes blinking-dot {
0% {
opacity: 0;
}
50% {
opacity: 1;
}
100% {
opacity: 0;
}
}
/* The animation code */
@-o-keyframes blinking-dot {
0% {
opacity: 0;
}
50% {
opacity: 1;
}
100% {
opacity: 0;
}
}
}

View File

@ -0,0 +1,337 @@
/**
* @fileoverview This file contains the API to handle audio recording.
* Originally from 'https://ralzohairi.medium.com/audio-recording-in-javascript-96eed45b75ee'
*/
// audio-recording.js ---------------
let microphoneButton, elapsedTimeTag
/** Initialize controls */
function initializeControls() {
microphoneButton = document.getElementsByClassName('start-recording-button')[0]
}
/** Displays recording control buttons */
function handleDisplayingRecordingControlButtons() {
//Hide the microphone button that starts audio recording
microphoneButton.style.display = 'none'
//Handle the displaying of the elapsed recording time
handleElapsedRecordingTime()
}
/** Hide the displayed recording control buttons */
function handleHidingRecordingControlButtons() {
//Display the microphone button that starts audio recording
microphoneButton.style.display = 'block'
//stop interval that handles both time elapsed and the red dot
clearInterval(elapsedTimeTimer)
}
/** Stores the actual start time when an audio recording begins to take place to ensure elapsed time start time is accurate*/
let audioRecordStartTime
/** Stores the maximum recording time in hours to stop recording once maximum recording hour has been reached */
let maximumRecordingTimeInHours = 1
/** Stores the reference of the setInterval function that controls the timer in audio recording*/
let elapsedTimeTimer
/** Starts the audio recording*/
export function startAudioRecording(onRecordingStart, onUnsupportedBrowser) {
initializeControls()
//start recording using the audio recording API
audioRecorder
.start()
.then(() => {
//on success show the controls to stop and cancel the recording
if (onRecordingStart) {
onRecordingStart(true)
}
//store the recording start time to display the elapsed time according to it
audioRecordStartTime = new Date()
//display control buttons to offer the functionality of stop and cancel
handleDisplayingRecordingControlButtons()
})
.catch((error) => {
//on error
//No Browser Support Error
if (error.message.includes('mediaDevices API or getUserMedia method is not supported in this browser.')) {
if (onUnsupportedBrowser) {
onUnsupportedBrowser(true)
}
}
//Error handling structure
switch (error.name) {
case 'AbortError': //error from navigator.mediaDevices.getUserMedia
// eslint-disable-next-line no-console
console.log('An AbortError has occurred.')
break
case 'NotAllowedError': //error from navigator.mediaDevices.getUserMedia
// eslint-disable-next-line no-console
console.log('A NotAllowedError has occurred. User might have denied permission.')
break
case 'NotFoundError': //error from navigator.mediaDevices.getUserMedia
// eslint-disable-next-line no-console
console.log('A NotFoundError has occurred.')
break
case 'NotReadableError': //error from navigator.mediaDevices.getUserMedia
// eslint-disable-next-line no-console
console.log('A NotReadableError has occurred.')
break
case 'SecurityError': //error from navigator.mediaDevices.getUserMedia or from the MediaRecorder.start
// eslint-disable-next-line no-console
console.log('A SecurityError has occurred.')
break
case 'TypeError': //error from navigator.mediaDevices.getUserMedia
// eslint-disable-next-line no-console
console.log('A TypeError has occurred.')
break
case 'InvalidStateError': //error from the MediaRecorder.start
// eslint-disable-next-line no-console
console.log('An InvalidStateError has occurred.')
break
case 'UnknownError': //error from the MediaRecorder.start
// eslint-disable-next-line no-console
console.log('An UnknownError has occurred.')
break
default:
// eslint-disable-next-line no-console
console.log('An error occurred with the error name ' + error.name)
}
})
}
/** Stop the currently started audio recording & sends it
*/
export function stopAudioRecording(addRecordingToPreviews) {
//stop the recording using the audio recording API
audioRecorder
.stop()
.then((audioBlob) => {
//hide recording control button & return record icon
handleHidingRecordingControlButtons()
if (addRecordingToPreviews) {
addRecordingToPreviews(audioBlob)
}
})
.catch((error) => {
//Error handling structure
switch (error.name) {
case 'InvalidStateError': //error from the MediaRecorder.stop
// eslint-disable-next-line no-console
console.log('An InvalidStateError has occurred.')
break
default:
// eslint-disable-next-line no-console
console.log('An error occurred with the error name ' + error.name)
}
})
}
/** Cancel the currently started audio recording */
export function cancelAudioRecording() {
//cancel the recording using the audio recording API
audioRecorder.cancel()
//hide recording control button & return record icon
handleHidingRecordingControlButtons()
}
/** Computes the elapsed recording time since the moment the function is called in the format h:m:s*/
function handleElapsedRecordingTime() {
elapsedTimeTag = document.getElementById('elapsed-time')
//display initial time when recording begins
displayElapsedTimeDuringAudioRecording('00:00')
//create an interval that compute & displays elapsed time, as well as, animate red dot - every second
elapsedTimeTimer = setInterval(() => {
//compute the elapsed time every second
let elapsedTime = computeElapsedTime(audioRecordStartTime) //pass the actual record start time
//display the elapsed time
displayElapsedTimeDuringAudioRecording(elapsedTime)
}, 1000) //every second
}
/** Display elapsed time during audio recording
* @param {String} elapsedTime - elapsed time in the format mm:ss or hh:mm:ss
*/
function displayElapsedTimeDuringAudioRecording(elapsedTime) {
//1. display the passed elapsed time as the elapsed time in the elapsedTime HTML element
elapsedTimeTag.innerHTML = elapsedTime
//2. Stop the recording when the max number of hours is reached
if (elapsedTimeReachedMaximumNumberOfHours(elapsedTime)) {
stopAudioRecording()
}
}
/**
* @param {String} elapsedTime - elapsed time in the format mm:ss or hh:mm:ss
* @returns {Boolean} whether the elapsed time reached the maximum number of hours or not
*/
function elapsedTimeReachedMaximumNumberOfHours(elapsedTime) {
//Split the elapsed time by the symbol that separates the hours, minutes and seconds :
let elapsedTimeSplit = elapsedTime.split(':')
//Turn the maximum recording time in hours to a string and pad it with zero if less than 10
let maximumRecordingTimeInHoursAsString =
maximumRecordingTimeInHours < 10 ? '0' + maximumRecordingTimeInHours : maximumRecordingTimeInHours.toString()
//if the elapsed time reach hours and also reach the maximum recording time in hours return true
return elapsedTimeSplit.length === 3 && elapsedTimeSplit[0] === maximumRecordingTimeInHoursAsString
}
/** Computes the elapsedTime since the moment the function is called in the format mm:ss or hh:mm:ss
* @param {String} startTime - start time to compute the elapsed time since
* @returns {String} elapsed time in mm:ss format or hh:mm:ss format, if elapsed hours are 0.
*/
function computeElapsedTime(startTime) {
//record end time
let endTime = new Date()
//time difference in ms
let timeDiff = endTime - startTime
//convert time difference from ms to seconds
timeDiff = timeDiff / 1000
//extract integer seconds that don't form a minute using %
let seconds = Math.floor(timeDiff % 60) //ignoring incomplete seconds (floor)
//pad seconds with a zero if necessary
seconds = seconds < 10 ? '0' + seconds : seconds
//convert time difference from seconds to minutes using %
timeDiff = Math.floor(timeDiff / 60)
//extract integer minutes that don't form an hour using %
let minutes = timeDiff % 60 //no need to floor possible incomplete minutes, because they've been handled as seconds
minutes = minutes < 10 ? '0' + minutes : minutes
//convert time difference from minutes to hours
timeDiff = Math.floor(timeDiff / 60)
//extract integer hours that don't form a day using %
let hours = timeDiff % 24 //no need to floor possible incomplete hours, because they've been handled as seconds
//convert time difference from hours to days
timeDiff = Math.floor(timeDiff / 24)
// the rest of timeDiff is number of days
let days = timeDiff //add days to hours
let totalHours = hours + days * 24
totalHours = totalHours < 10 ? '0' + totalHours : totalHours
if (totalHours === '00') {
return minutes + ':' + seconds
} else {
return totalHours + ':' + minutes + ':' + seconds
}
}
//API to handle audio recording
export const audioRecorder = {
/** Stores the recorded audio as Blob objects of audio data as the recording continues*/
audioBlobs: [] /*of type Blob[]*/,
/** Stores the reference of the MediaRecorder instance that handles the MediaStream when recording starts*/
mediaRecorder: null /*of type MediaRecorder*/,
/** Stores the reference to the stream currently capturing the audio*/
streamBeingCaptured: null /*of type MediaStream*/,
/** Start recording the audio
* @returns {Promise} - returns a promise that resolves if audio recording successfully started
*/
start: function () {
//Feature Detection
if (!(navigator.mediaDevices && navigator.mediaDevices.getUserMedia)) {
//Feature is not supported in browser
//return a custom error
return Promise.reject(new Error('mediaDevices API or getUserMedia method is not supported in this browser.'))
} else {
//Feature is supported in browser
//create an audio stream
return (
navigator.mediaDevices
.getUserMedia({ audio: true } /*of type MediaStreamConstraints*/)
//returns a promise that resolves to the audio stream
.then((stream) /*of type MediaStream*/ => {
//save the reference of the stream to be able to stop it when necessary
audioRecorder.streamBeingCaptured = stream
//create a media recorder instance by passing that stream into the MediaRecorder constructor
audioRecorder.mediaRecorder = new MediaRecorder(stream)
/*the MediaRecorder interface of the MediaStream Recording API provides functionality to easily record media*/
//clear previously saved audio Blobs, if any
audioRecorder.audioBlobs = []
//add a dataavailable event listener in order to store the audio data Blobs when recording
audioRecorder.mediaRecorder.addEventListener('dataavailable', (event) => {
//store audio Blob object
audioRecorder.audioBlobs.push(event.data)
})
//start the recording by calling the start method on the media recorder
audioRecorder.mediaRecorder.start()
})
)
/* errors are not handled in the API because if its handled and the promise is chained, the .then after the catch will be executed*/
}
},
/** Stop the started audio recording
* @returns {Promise} - returns a promise that resolves to the audio as a blob file
*/
stop: function () {
//return a promise that would return the blob or URL of the recording
return new Promise((resolve) => {
//save audio type to pass to set the Blob type
let mimeType = audioRecorder.mediaRecorder.mimeType
//listen to the stop event in order to create & return a single Blob object
audioRecorder.mediaRecorder.addEventListener('stop', () => {
//create a single blob object, as we might have gathered a few Blob objects that needs to be joined as one
let audioBlob = new Blob(audioRecorder.audioBlobs, { type: mimeType })
//resolve promise with the single audio blob representing the recorded audio
resolve(audioBlob)
})
audioRecorder.cancel()
})
},
/** Cancel audio recording*/
cancel: function () {
//stop the recording feature
audioRecorder.mediaRecorder.stop()
//stop all the tracks on the active stream in order to stop the stream
audioRecorder.stopStream()
//reset API properties for next recording
audioRecorder.resetRecordingProperties()
},
/** Stop all the tracks on the active stream in order to stop the stream and remove
* the red flashing dot showing in the tab
*/
stopStream: function () {
//stopping the capturing request by stopping all the tracks on the active stream
audioRecorder.streamBeingCaptured
.getTracks() //get all tracks from the stream
.forEach((track) /*of type MediaStreamTrack*/ => track.stop()) //stop each one
},
/** Reset all the recording properties including the media recorder and stream being captured*/
resetRecordingProperties: function () {
audioRecorder.mediaRecorder = null
audioRecorder.streamBeingCaptured = null
/*No need to remove event listeners attached to mediaRecorder as
If a DOM element which is removed is reference-free (no references pointing to it), the element itself is picked
up by the garbage collector as well as any event handlers/listeners associated with it.
getEventListeners(audioRecorder.mediaRecorder) will return an empty array of events.*/
}
}

View File

@ -1,9 +1,11 @@
import { useState, useEffect } from 'react'
import { useState, useEffect, useRef } from 'react'
import PropTypes from 'prop-types'
import { useSelector } from 'react-redux'
// material-ui
import { useTheme } from '@mui/material/styles'
import { Box, List, Paper, Popper, ClickAwayListener } from '@mui/material'
import { ListItemButton, ListItemIcon, ListItemText, Typography, Box, List, Paper, Popper, ClickAwayListener } from '@mui/material'
import FiberManualRecordIcon from '@mui/icons-material/FiberManualRecord'
// third-party
import PerfectScrollbar from 'react-perfect-scrollbar'
@ -11,8 +13,6 @@ import PerfectScrollbar from 'react-perfect-scrollbar'
// project imports
import MainCard from 'ui-component/cards/MainCard'
import Transitions from 'ui-component/extended/Transitions'
import NavItem from 'layout/MainLayout/Sidebar/MenuList/NavItem'
import settings from 'menu-items/settings'
// ==============================|| SETTINGS ||============================== //
@ -20,9 +20,26 @@ import settings from 'menu-items/settings'
const Settings = ({ chatflow, isSettingsOpen, anchorEl, onSettingsItemClick, onUploadFile, onClose }) => {
const theme = useTheme()
const [settingsMenu, setSettingsMenu] = useState([])
const customization = useSelector((state) => state.customization)
const inputFile = useRef(null)
const [open, setOpen] = useState(false)
const handleFileUpload = (e) => {
if (!e.target.files) return
const file = e.target.files[0]
const reader = new FileReader()
reader.onload = (evt) => {
if (!evt?.target?.result) {
return
}
const { result } = evt.target
onUploadFile(result)
}
reader.readAsText(file)
}
useEffect(() => {
if (chatflow && !chatflow.id) {
const settingsMenu = settings.children.filter((menu) => menu.id === 'loadChatflow')
@ -39,16 +56,40 @@ const Settings = ({ chatflow, isSettingsOpen, anchorEl, onSettingsItemClick, onU
// settings list items
const items = settingsMenu.map((menu) => {
return (
<NavItem
key={menu.id}
item={menu}
level={1}
navType='SETTINGS'
onClick={(id) => onSettingsItemClick(id)}
onUploadFile={onUploadFile}
const Icon = menu.icon
const itemIcon = menu?.icon ? (
<Icon stroke={1.5} size='1.3rem' />
) : (
<FiberManualRecordIcon
sx={{
width: customization.isOpen.findIndex((id) => id === menu?.id) > -1 ? 8 : 6,
height: customization.isOpen.findIndex((id) => id === menu?.id) > -1 ? 8 : 6
}}
fontSize={level > 0 ? 'inherit' : 'medium'}
/>
)
return (
<ListItemButton
key={menu.id}
sx={{
borderRadius: `${customization.borderRadius}px`,
mb: 0.5,
alignItems: 'flex-start',
py: 1.25,
pl: `24px`
}}
onClick={() => {
if (menu.id === 'loadChatflow' && inputFile) {
inputFile?.current.click()
} else {
onSettingsItemClick(menu.id)
}
}}
>
<ListItemIcon sx={{ my: 'auto', minWidth: !menu?.icon ? 18 : 36 }}>{itemIcon}</ListItemIcon>
<ListItemText primary={<Typography color='inherit'>{menu.title}</Typography>} />
</ListItemButton>
)
})
return (
@ -82,6 +123,14 @@ const Settings = ({ chatflow, isSettingsOpen, anchorEl, onSettingsItemClick, onU
<List>{items}</List>
</Box>
</PerfectScrollbar>
<input
type='file'
hidden
accept='.json'
ref={inputFile}
style={{ display: 'none' }}
onChange={(e) => handleFileUpload(e)}
/>
</MainCard>
</ClickAwayListener>
</Paper>