Updating of Type LLMCache to BaseCache and renaming vars for clarity

pull/989/head
vinodkiran 2023-10-08 22:59:43 +05:30
parent 12159f6730
commit 7d4337724d
19 changed files with 63 additions and 54 deletions

View File

@ -44,7 +44,7 @@ class AWSChatBedrock_ChatModels implements INode {
{
label: 'Cache',
name: 'cache',
type: 'LLMCache',
type: 'BaseCache',
optional: true
},
{
@ -138,7 +138,7 @@ class AWSChatBedrock_ChatModels implements INode {
const iModel = nodeData.inputs?.model as string
const iTemperature = nodeData.inputs?.temperature as string
const iMax_tokens_to_sample = nodeData.inputs?.max_tokens_to_sample as string
const cache = nodeData.inputs?.llmCache as BaseCache
const cache = nodeData.inputs?.cache as BaseCache
const obj: BaseBedrockInput & BaseLLMParams = {
region: iRegion,

View File

@ -36,7 +36,7 @@ class AzureChatOpenAI_ChatModels implements INode {
{
label: 'Cache',
name: 'cache',
type: 'LLMCache',
type: 'BaseCache',
optional: true
},
{
@ -115,7 +115,7 @@ class AzureChatOpenAI_ChatModels implements INode {
const presencePenalty = nodeData.inputs?.presencePenalty as string
const timeout = nodeData.inputs?.timeout as string
const streaming = nodeData.inputs?.streaming as boolean
const cache = nodeData.inputs?.llmCache as BaseCache
const cache = nodeData.inputs?.cache as BaseCache
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData)

View File

@ -27,7 +27,7 @@ class Bittensor_ChatModels implements INode {
{
label: 'Cache',
name: 'cache',
type: 'LLMCache',
type: 'BaseCache',
optional: true
},
{
@ -42,7 +42,7 @@ class Bittensor_ChatModels implements INode {
async init(nodeData: INodeData, _: string): Promise<any> {
const system_prompt = nodeData.inputs?.system_prompt as string
const cache = nodeData.inputs?.llmCache as BaseCache
const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<BittensorInput> = {
systemPrompt: system_prompt

View File

@ -35,7 +35,7 @@ class ChatAnthropic_ChatModels implements INode {
{
label: 'Cache',
name: 'cache',
type: 'LLMCache',
type: 'BaseCache',
optional: true
},
{
@ -143,7 +143,7 @@ class ChatAnthropic_ChatModels implements INode {
const topP = nodeData.inputs?.topP as string
const topK = nodeData.inputs?.topK as string
const streaming = nodeData.inputs?.streaming as boolean
const cache = nodeData.inputs?.llmCache as BaseCache
const cache = nodeData.inputs?.cache as BaseCache
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const anthropicApiKey = getCredentialParam('anthropicApiKey', credentialData, nodeData)

View File

@ -34,7 +34,7 @@ class ChatGooglePaLM_ChatModels implements INode {
{
label: 'Cache',
name: 'cache',
type: 'LLMCache',
type: 'BaseCache',
optional: true
},
{
@ -103,7 +103,7 @@ class ChatGooglePaLM_ChatModels implements INode {
const temperature = nodeData.inputs?.temperature as string
const topP = nodeData.inputs?.topP as string
const topK = nodeData.inputs?.topK as string
const cache = nodeData.inputs?.llmCache as BaseCache
const cache = nodeData.inputs?.cache as BaseCache
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const googleMakerSuiteKey = getCredentialParam('googleMakerSuiteKey', credentialData, nodeData)

View File

@ -38,7 +38,7 @@ class GoogleVertexAI_ChatModels implements INode {
{
label: 'Cache',
name: 'cache',
type: 'LLMCache',
type: 'BaseCache',
optional: true
},
{
@ -120,7 +120,7 @@ class GoogleVertexAI_ChatModels implements INode {
const modelName = nodeData.inputs?.modelName as string
const maxOutputTokens = nodeData.inputs?.maxOutputTokens as string
const topP = nodeData.inputs?.topP as string
const cache = nodeData.inputs?.llmCache as BaseCache
const cache = nodeData.inputs?.cache as BaseCache
const obj: GoogleVertexAIChatInput<GoogleAuthOptions> = {
temperature: parseFloat(temperature),

View File

@ -34,7 +34,7 @@ class ChatHuggingFace_ChatModels implements INode {
{
label: 'Cache',
name: 'cache',
type: 'LLMCache',
type: 'BaseCache',
optional: true
},
{
@ -109,7 +109,7 @@ class ChatHuggingFace_ChatModels implements INode {
const hfTopK = nodeData.inputs?.hfTopK as string
const frequencyPenalty = nodeData.inputs?.frequencyPenalty as string
const endpoint = nodeData.inputs?.endpoint as string
const cache = nodeData.inputs?.llmCache as BaseCache
const cache = nodeData.inputs?.cache as BaseCache
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const huggingFaceApiKey = getCredentialParam('huggingFaceApiKey', credentialData, nodeData)

View File

@ -29,7 +29,7 @@ class ChatLocalAI_ChatModels implements INode {
{
label: 'Cache',
name: 'cache',
type: 'LLMCache',
type: 'BaseCache',
optional: true
},
{
@ -86,7 +86,7 @@ class ChatLocalAI_ChatModels implements INode {
const topP = nodeData.inputs?.topP as string
const timeout = nodeData.inputs?.timeout as string
const basePath = nodeData.inputs?.basePath as string
const cache = nodeData.inputs?.llmCache as BaseCache
const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<OpenAIChatInput> & BaseLLMParams & { openAIApiKey?: string } = {
temperature: parseFloat(temperature),

View File

@ -35,7 +35,7 @@ class ChatOpenAI_ChatModels implements INode {
{
label: 'Cache',
name: 'cache',
type: 'LLMCache',
type: 'BaseCache',
optional: true
},
{
@ -159,7 +159,7 @@ class ChatOpenAI_ChatModels implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
const cache = nodeData.inputs?.llmCache as BaseCache
const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<OpenAIChatInput> & BaseLLMParams & { openAIApiKey?: string } = {
temperature: parseFloat(temperature),

View File

@ -36,7 +36,7 @@ class ChatOpenAICustom_ChatModels implements INode {
{
label: 'Cache',
name: 'cache',
type: 'LLMCache',
type: 'BaseCache',
optional: true
},
{
@ -121,7 +121,7 @@ class ChatOpenAICustom_ChatModels implements INode {
const streaming = nodeData.inputs?.streaming as boolean
const basePath = nodeData.inputs?.basepath as string
const baseOptions = nodeData.inputs?.baseOptions
const cache = nodeData.inputs?.llmCache as BaseCache
const cache = nodeData.inputs?.cache as BaseCache
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)

View File

@ -2,6 +2,8 @@ import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Inter
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { Bedrock } from 'langchain/llms/bedrock'
import { BaseBedrockInput } from 'langchain/dist/util/bedrock'
import { BaseCache } from 'langchain/schema'
import { BaseLLMParams } from 'langchain/llms/base'
/**
* I had to run the following to build the component
@ -39,6 +41,12 @@ class AWSBedrock_LLMs implements INode {
optional: true
}
this.inputs = [
{
label: 'Cache',
name: 'cache',
type: 'BaseCache',
optional: true
},
{
label: 'Region',
name: 'region',
@ -130,8 +138,8 @@ class AWSBedrock_LLMs implements INode {
const iModel = nodeData.inputs?.model as string
const iTemperature = nodeData.inputs?.temperature as string
const iMax_tokens_to_sample = nodeData.inputs?.max_tokens_to_sample as string
const obj: Partial<BaseBedrockInput> = {
const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<BaseBedrockInput> & BaseLLMParams = {
model: iModel,
region: iRegion,
temperature: parseFloat(iTemperature),
@ -157,6 +165,7 @@ class AWSBedrock_LLMs implements INode {
sessionToken: credentialApiSession
}
}
if (cache) obj.cache = cache
const amazonBedrock = new Bedrock(obj)
return amazonBedrock

View File

@ -33,8 +33,8 @@ class AzureOpenAI_LLMs implements INode {
this.inputs = [
{
label: 'Cache',
name: 'llmCache',
type: 'LLMCache',
name: 'cache',
type: 'BaseCache',
optional: true
},
{
@ -170,7 +170,7 @@ class AzureOpenAI_LLMs implements INode {
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
const llmCache = nodeData.inputs?.llmCache as BaseCache
const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<AzureOpenAIInput> & BaseLLMParams & Partial<OpenAIInput> = {
temperature: parseFloat(temperature),
@ -188,7 +188,7 @@ class AzureOpenAI_LLMs implements INode {
if (presencePenalty) obj.presencePenalty = parseFloat(presencePenalty)
if (timeout) obj.timeout = parseInt(timeout, 10)
if (bestOf) obj.bestOf = parseInt(bestOf, 10)
if (llmCache) obj.cache = llmCache
if (cache) obj.cache = cache
const model = new OpenAI(obj)
return model

View File

@ -27,8 +27,8 @@ class Bittensor_LLMs implements INode {
this.inputs = [
{
label: 'Cache',
name: 'llmCache',
type: 'LLMCache',
name: 'cache',
type: 'BaseCache',
optional: true
},
{
@ -52,13 +52,13 @@ class Bittensor_LLMs implements INode {
async init(nodeData: INodeData, _: string): Promise<any> {
const system_prompt = nodeData.inputs?.system_prompt as string
const topResponses = Number(nodeData.inputs?.topResponses as number)
const llmCache = nodeData.inputs?.llmCache as BaseCache
const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<BittensorInput> & BaseLLMParams = {
systemPrompt: system_prompt,
topResponses: topResponses
}
if (llmCache) obj.cache = llmCache
if (cache) obj.cache = cache
const model = new NIBittensorLLM(obj)
return model

View File

@ -33,8 +33,8 @@ class Cohere_LLMs implements INode {
this.inputs = [
{
label: 'Cache',
name: 'llmCache',
type: 'LLMCache',
name: 'cache',
type: 'BaseCache',
optional: true
},
{
@ -92,7 +92,7 @@ class Cohere_LLMs implements INode {
const temperature = nodeData.inputs?.temperature as string
const modelName = nodeData.inputs?.modelName as string
const maxTokens = nodeData.inputs?.maxTokens as string
const llmCache = nodeData.inputs?.llmCache as BaseCache
const cache = nodeData.inputs?.cache as BaseCache
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const cohereApiKey = getCredentialParam('cohereApiKey', credentialData, nodeData)
@ -103,7 +103,7 @@ class Cohere_LLMs implements INode {
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
if (modelName) obj.model = modelName
if (temperature) obj.temperature = parseFloat(temperature)
if (llmCache) obj.cache = llmCache
if (cache) obj.cache = cache
const model = new Cohere(obj)
return model
}

View File

@ -32,8 +32,8 @@ class GooglePaLM_LLMs implements INode {
this.inputs = [
{
label: 'Cache',
name: 'llmCache',
type: 'LLMCache',
name: 'cache',
type: 'BaseCache',
optional: true
},
{
@ -132,7 +132,7 @@ class GooglePaLM_LLMs implements INode {
const topP = nodeData.inputs?.topP as string
const topK = nodeData.inputs?.topK as string
const stopSequencesObj = nodeData.inputs?.stopSequencesObj
const llmCache = nodeData.inputs?.llmCache as BaseCache
const cache = nodeData.inputs?.cache as BaseCache
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const googleMakerSuiteKey = getCredentialParam('googleMakerSuiteKey', credentialData, nodeData)
@ -146,7 +146,7 @@ class GooglePaLM_LLMs implements INode {
if (maxOutputTokens) obj.maxOutputTokens = parseInt(maxOutputTokens, 10)
if (topP) obj.topP = parseFloat(topP)
if (topK) obj.topK = parseFloat(topK)
if (llmCache) obj.cache = llmCache
if (cache) obj.cache = cache
let parsedStopSequences: any | undefined = undefined
if (stopSequencesObj) {

View File

@ -37,8 +37,8 @@ class GoogleVertexAI_LLMs implements INode {
this.inputs = [
{
label: 'Cache',
name: 'llmCache',
type: 'LLMCache',
name: 'cache',
type: 'BaseCache',
optional: true
},
{
@ -127,7 +127,7 @@ class GoogleVertexAI_LLMs implements INode {
const modelName = nodeData.inputs?.modelName as string
const maxOutputTokens = nodeData.inputs?.maxOutputTokens as string
const topP = nodeData.inputs?.topP as string
const llmCache = nodeData.inputs?.llmCache as BaseCache
const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<GoogleVertexAITextInput> = {
temperature: parseFloat(temperature),
@ -137,7 +137,7 @@ class GoogleVertexAI_LLMs implements INode {
if (maxOutputTokens) obj.maxOutputTokens = parseInt(maxOutputTokens, 10)
if (topP) obj.topP = parseFloat(topP)
if (llmCache) obj.cache = llmCache
if (cache) obj.cache = cache
const model = new GoogleVertexAI(obj)
return model

View File

@ -33,8 +33,8 @@ class HuggingFaceInference_LLMs implements INode {
this.inputs = [
{
label: 'Cache',
name: 'llmCache',
type: 'LLMCache',
name: 'cache',
type: 'BaseCache',
optional: true
},
{
@ -113,7 +113,7 @@ class HuggingFaceInference_LLMs implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const huggingFaceApiKey = getCredentialParam('huggingFaceApiKey', credentialData, nodeData)
const llmCache = nodeData.inputs?.llmCache as BaseCache
const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<HFInput> = {
model,
@ -128,7 +128,7 @@ class HuggingFaceInference_LLMs implements INode {
if (endpoint) obj.endpoint = endpoint
const huggingFace = new HuggingFaceInference(obj)
if (llmCache) huggingFace.cache = llmCache
if (cache) huggingFace.cache = cache
return huggingFace
}

View File

@ -34,8 +34,8 @@ class OpenAI_LLMs implements INode {
this.inputs = [
{
label: 'Cache',
name: 'llmCache',
type: 'LLMCache',
name: 'cache',
type: 'BaseCache',
optional: true
},
{
@ -157,7 +157,7 @@ class OpenAI_LLMs implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
const llmCache = nodeData.inputs?.llmCache as BaseCache
const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<OpenAIInput> & BaseLLMParams & { openAIApiKey?: string } = {
temperature: parseFloat(temperature),
@ -174,7 +174,7 @@ class OpenAI_LLMs implements INode {
if (batchSize) obj.batchSize = parseInt(batchSize, 10)
if (bestOf) obj.bestOf = parseInt(bestOf, 10)
if (llmCache) obj.cache = llmCache
if (cache) obj.cache = cache
let parsedBaseOptions: any | undefined = undefined
if (baseOptions) {

View File

@ -34,8 +34,8 @@ class Replicate_LLMs implements INode {
this.inputs = [
{
label: 'Cache',
name: 'llmCache',
type: 'LLMCache',
name: 'cache',
type: 'BaseCache',
optional: true
},
{
@ -111,7 +111,7 @@ class Replicate_LLMs implements INode {
const name = modelName.split(':')[0].split('/').pop()
const org = modelName.split(':')[0].split('/')[0]
const llmCache = nodeData.inputs?.llmCache as BaseCache
const cache = nodeData.inputs?.cache as BaseCache
const obj: ReplicateInput & BaseLLMParams = {
model: `${org}/${name}:${version}`,
@ -130,7 +130,7 @@ class Replicate_LLMs implements INode {
}
if (Object.keys(inputs).length) obj.input = inputs
if (llmCache) obj.cache = llmCache
if (cache) obj.cache = cache
const model = new Replicate(obj)
return model