diff --git a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts index 62c46878..6bb042b6 100644 --- a/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts +++ b/packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts @@ -4,21 +4,15 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models' import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages' import { ChainValues } from '@langchain/core/utils/types' import { AgentStep } from '@langchain/core/agents' -import { - renderTemplate, - MessagesPlaceholder, - HumanMessagePromptTemplate, - PromptTemplate -} from "@langchain/core/prompts"; +import { renderTemplate, MessagesPlaceholder, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts' import { RunnableSequence } from '@langchain/core/runnables' import { ChatConversationalAgent } from 'langchain/agents' import { getBaseClasses } from '../../../src/utils' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' import { AgentExecutor } from '../../../src/agents' -import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' -import { addImagesToMessages, llmSupportsVision } from "../../../src/multiModalUtils"; -import { IVisionChatModal } from "../../../src/IVisionChatModal"; +import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils' +import { IVisionChatModal } from '../../../src/IVisionChatModal' const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI. diff --git a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts index 0d572eca..814fc1cc 100644 --- a/packages/components/nodes/chains/ConversationChain/ConversationChain.ts +++ b/packages/components/nodes/chains/ConversationChain/ConversationChain.ts @@ -11,7 +11,6 @@ import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, MessageCon import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' import { IVisionChatModal } from '../../../src/IVisionChatModal' -import { MessageContent } from 'llamaindex' let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.` const inputKey = 'input' diff --git a/packages/components/nodes/chains/LLMChain/LLMChain.ts b/packages/components/nodes/chains/LLMChain/LLMChain.ts index a70a308e..738a8c4b 100644 --- a/packages/components/nodes/chains/LLMChain/LLMChain.ts +++ b/packages/components/nodes/chains/LLMChain/LLMChain.ts @@ -1,22 +1,17 @@ -import { BaseLanguageModel, BaseLanguageModelCallOptions } from "@langchain/core/language_models/base"; -import { BaseLLMOutputParser, BaseOutputParser } from "@langchain/core/output_parsers"; -import { HumanMessage } from "@langchain/core/messages"; -import { - ChatPromptTemplate, - FewShotPromptTemplate, - HumanMessagePromptTemplate, - PromptTemplate -} from "@langchain/core/prompts"; -import { OutputFixingParser } from "langchain/output_parsers"; -import { LLMChain } from "langchain/chains"; -import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from "../../../src/Interface"; -import { additionalCallbacks, ConsoleCallbackHandler, CustomChainHandler } from "../../../src/handler"; -import { getBaseClasses, handleEscapeCharacters } from "../../../src/utils"; -import { checkInputs, Moderation, streamResponse } from "../../moderation/Moderation"; -import { formatResponse, injectOutputParser } from "../../outputparsers/OutputParserHelpers"; -import { ChatOpenAI } from "../../chatmodels/ChatOpenAI/FlowiseChatOpenAI"; -import { addImagesToMessages, llmSupportsVision } from "../../../src/multiModalUtils"; -import { IVisionChatModal } from "../../../src/IVisionChatModal"; +import { BaseLanguageModel, BaseLanguageModelCallOptions } from '@langchain/core/language_models/base' +import { BaseLLMOutputParser, BaseOutputParser } from '@langchain/core/output_parsers' +import { HumanMessage } from '@langchain/core/messages' +import { ChatPromptTemplate, FewShotPromptTemplate, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts' +import { OutputFixingParser } from 'langchain/output_parsers' +import { LLMChain } from 'langchain/chains' +import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { additionalCallbacks, ConsoleCallbackHandler, CustomChainHandler } from '../../../src/handler' +import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' +import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' +import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers' +import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' +import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils' +import { IVisionChatModal } from '../../../src/IVisionChatModal' class LLMChain_Chains implements INode { label: string diff --git a/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts index 155469ef..785bd3c5 100644 --- a/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts @@ -103,38 +103,6 @@ class AzureChatOpenAI_ChatModels implements INode { step: 1, optional: true, additionalParams: true - // }, - // { - // label: 'Allow Image Uploads', - // name: 'allowImageUploads', - // type: 'boolean', - // description: - // 'Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent', - // default: false, - // optional: true - // }, - // { - // label: 'Image Resolution', - // description: 'This parameter controls the resolution in which the model views the image.', - // name: 'imageResolution', - // type: 'options', - // options: [ - // { - // label: 'Low', - // name: 'low' - // }, - // { - // label: 'High', - // name: 'high' - // }, - // { - // label: 'Auto', - // name: 'auto' - // } - // ], - // default: 'low', - // optional: false, - // additionalParams: true } ] } @@ -155,9 +123,6 @@ class AzureChatOpenAI_ChatModels implements INode { const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData) const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData) - // const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean - // const imageResolution = nodeData.inputs?.imageResolution as string - const obj: Partial & BaseLLMParams & Partial = { temperature: parseFloat(temperature), modelName, @@ -176,7 +141,7 @@ class AzureChatOpenAI_ChatModels implements INode { const multiModalOption: IMultiModalOption = { image: { - allowImageUploads: false, + allowImageUploads: false } }