lint fixes

pull/1898/head
vinodkiran 2024-03-07 20:09:05 +05:30
parent 7ab96a4c39
commit 2b0ca60686
4 changed files with 18 additions and 65 deletions

View File

@ -4,21 +4,15 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'
import { ChainValues } from '@langchain/core/utils/types'
import { AgentStep } from '@langchain/core/agents'
import {
renderTemplate,
MessagesPlaceholder,
HumanMessagePromptTemplate,
PromptTemplate
} from "@langchain/core/prompts";
import { renderTemplate, MessagesPlaceholder, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts'
import { RunnableSequence } from '@langchain/core/runnables'
import { ChatConversationalAgent } from 'langchain/agents'
import { getBaseClasses } from '../../../src/utils'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { AgentExecutor } from '../../../src/agents'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { addImagesToMessages, llmSupportsVision } from "../../../src/multiModalUtils";
import { IVisionChatModal } from "../../../src/IVisionChatModal";
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
import { IVisionChatModal } from '../../../src/IVisionChatModal'
const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI.

View File

@ -11,7 +11,6 @@ import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, MessageCon
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
import { IVisionChatModal } from '../../../src/IVisionChatModal'
import { MessageContent } from 'llamaindex'
let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.`
const inputKey = 'input'

View File

@ -1,22 +1,17 @@
import { BaseLanguageModel, BaseLanguageModelCallOptions } from "@langchain/core/language_models/base";
import { BaseLLMOutputParser, BaseOutputParser } from "@langchain/core/output_parsers";
import { HumanMessage } from "@langchain/core/messages";
import {
ChatPromptTemplate,
FewShotPromptTemplate,
HumanMessagePromptTemplate,
PromptTemplate
} from "@langchain/core/prompts";
import { OutputFixingParser } from "langchain/output_parsers";
import { LLMChain } from "langchain/chains";
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from "../../../src/Interface";
import { additionalCallbacks, ConsoleCallbackHandler, CustomChainHandler } from "../../../src/handler";
import { getBaseClasses, handleEscapeCharacters } from "../../../src/utils";
import { checkInputs, Moderation, streamResponse } from "../../moderation/Moderation";
import { formatResponse, injectOutputParser } from "../../outputparsers/OutputParserHelpers";
import { ChatOpenAI } from "../../chatmodels/ChatOpenAI/FlowiseChatOpenAI";
import { addImagesToMessages, llmSupportsVision } from "../../../src/multiModalUtils";
import { IVisionChatModal } from "../../../src/IVisionChatModal";
import { BaseLanguageModel, BaseLanguageModelCallOptions } from '@langchain/core/language_models/base'
import { BaseLLMOutputParser, BaseOutputParser } from '@langchain/core/output_parsers'
import { HumanMessage } from '@langchain/core/messages'
import { ChatPromptTemplate, FewShotPromptTemplate, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts'
import { OutputFixingParser } from 'langchain/output_parsers'
import { LLMChain } from 'langchain/chains'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { additionalCallbacks, ConsoleCallbackHandler, CustomChainHandler } from '../../../src/handler'
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
import { IVisionChatModal } from '../../../src/IVisionChatModal'
class LLMChain_Chains implements INode {
label: string

View File

@ -103,38 +103,6 @@ class AzureChatOpenAI_ChatModels implements INode {
step: 1,
optional: true,
additionalParams: true
// },
// {
// label: 'Allow Image Uploads',
// name: 'allowImageUploads',
// type: 'boolean',
// description:
// 'Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent',
// default: false,
// optional: true
// },
// {
// label: 'Image Resolution',
// description: 'This parameter controls the resolution in which the model views the image.',
// name: 'imageResolution',
// type: 'options',
// options: [
// {
// label: 'Low',
// name: 'low'
// },
// {
// label: 'High',
// name: 'high'
// },
// {
// label: 'Auto',
// name: 'auto'
// }
// ],
// default: 'low',
// optional: false,
// additionalParams: true
}
]
}
@ -155,9 +123,6 @@ class AzureChatOpenAI_ChatModels implements INode {
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
// const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
// const imageResolution = nodeData.inputs?.imageResolution as string
const obj: Partial<AzureOpenAIInput> & BaseLLMParams & Partial<OpenAIChatInput> = {
temperature: parseFloat(temperature),
modelName,
@ -176,7 +141,7 @@ class AzureChatOpenAI_ChatModels implements INode {
const multiModalOption: IMultiModalOption = {
image: {
allowImageUploads: false,
allowImageUploads: false
}
}