跳到主要内容

Class: LlamaDeuce

Llama2 LLM implementation

Implements

Constructors

constructor

new LlamaDeuce(init?)

Parameters

NameType
init?Partial<LlamaDeuce>

Defined in

packages/core/src/llm/LLM.ts:434

Properties

chatStrategy

chatStrategy: DeuceChatStrategy

Defined in

packages/core/src/llm/LLM.ts:427


hasStreaming

hasStreaming: boolean

Implementation of

LLM.hasStreaming

Defined in

packages/core/src/llm/LLM.ts:432


maxTokens

Optional maxTokens: number

Defined in

packages/core/src/llm/LLM.ts:430


model

model: "Llama-2-70b-chat-old" | "Llama-2-70b-chat-4bit" | "Llama-2-13b-chat-old" | "Llama-2-13b-chat-4bit" | "Llama-2-7b-chat-old" | "Llama-2-7b-chat-4bit"

Defined in

packages/core/src/llm/LLM.ts:426


replicateSession

replicateSession: ReplicateSession

Defined in

packages/core/src/llm/LLM.ts:431


temperature

temperature: number

Defined in

packages/core/src/llm/LLM.ts:428


topP

topP: number

Defined in

packages/core/src/llm/LLM.ts:429

Accessors

metadata

get metadata(): Object

Returns

Object

NameType
contextWindownumber
maxTokensundefined | number
model"Llama-2-70b-chat-old" | "Llama-2-70b-chat-4bit" | "Llama-2-13b-chat-old" | "Llama-2-13b-chat-4bit" | "Llama-2-7b-chat-old" | "Llama-2-7b-chat-4bit"
temperaturenumber
tokenizerundefined
topPnumber

Implementation of

LLM.metadata

Defined in

packages/core/src/llm/LLM.ts:454

Methods

chat

chat<T, R>(messages, _parentEvent?, streaming?): Promise<R>

Get a chat response from the LLM

Type parameters

NameType
Textends undefined | boolean = undefined
RT extends true ? AsyncGenerator<string, void, unknown> : ChatResponse

Parameters

NameTypeDescription
messagesChatMessage[]The return type of chat() and complete() are set by the "streaming" parameter being set to True.
_parentEvent?Event-
streaming?T-

Returns

Promise<R>

Implementation of

LLM.chat

Defined in

packages/core/src/llm/LLM.ts:592


complete

complete<T, R>(prompt, parentEvent?, streaming?): Promise<R>

Get a prompt completion from the LLM

Type parameters

NameType
Textends undefined | boolean = undefined
RT extends true ? AsyncGenerator<string, void, unknown> : ChatResponse

Parameters

NameTypeDescription
promptstringthe prompt to complete
parentEvent?Event-
streaming?T-

Returns

Promise<R>

Implementation of

LLM.complete

Defined in

packages/core/src/llm/LLM.ts:632


mapMessageTypeA16Z

mapMessageTypeA16Z(messageType): string

Parameters

NameType
messageTypeMessageType

Returns

string

Defined in

packages/core/src/llm/LLM.ts:501


mapMessagesToPrompt

mapMessagesToPrompt(messages): Object

Parameters

NameType
messagesChatMessage[]

Returns

Object

NameType
promptstring
systemPromptany

Defined in

packages/core/src/llm/LLM.ts:465


mapMessagesToPromptA16Z

mapMessagesToPromptA16Z(messages): Object

Parameters

NameType
messagesChatMessage[]

Returns

Object

NameType
promptstring
systemPromptundefined

Defined in

packages/core/src/llm/LLM.ts:487


mapMessagesToPromptMeta

mapMessagesToPromptMeta(messages, opts?): Object

Parameters

NameType
messagesChatMessage[]
opts?Object
opts.replicate4Bit?boolean
opts.withBos?boolean
opts.withNewlines?boolean

Returns

Object

NameType
promptstring
systemPromptany

Defined in

packages/core/src/llm/LLM.ts:514


tokens

tokens(messages): number

Calculates the number of tokens needed for the given chat messages

Parameters

NameType
messagesChatMessage[]

Returns

number

Implementation of

LLM.tokens

Defined in

packages/core/src/llm/LLM.ts:450