feat(gpt-runner-core): add Anthropic model support

This commit is contained in:
JinmingYang
2023-07-08 15:05:29 +08:00
parent 73418a6508
commit d5ed8690b1
25 changed files with 427 additions and 67 deletions

View File

@@ -12,9 +12,9 @@ export function mapStoredMessagesToChatMessages(
switch (message.name) {
case ChatRole.User:
return new HumanChatMessage(message.text)
case ChatRole.ASSISTANT:
case ChatRole.Assistant:
return new AIChatMessage(message.text)
case ChatRole.SYSTEM:
case ChatRole.System:
return new SystemChatMessage(message.text)
default:
throw new Error('Role must be defined for generic messages')
@@ -29,9 +29,9 @@ export function mapStoredMessageToChatTemplateMessages(
switch (message.name) {
case ChatRole.User:
return HumanMessagePromptTemplate.fromTemplate(message.text)
case ChatRole.ASSISTANT:
case ChatRole.Assistant:
return AIMessagePromptTemplate.fromTemplate(message.text)
case ChatRole.SYSTEM:
case ChatRole.System:
return SystemMessagePromptTemplate.fromTemplate(message.text)
default:
throw new Error('Role must be defined for generic messages')

View File

@@ -1,13 +1,15 @@
import type { BaseLanguageModel } from 'langchain/dist/base_language'
import { ChatModelType } from '@nicepkg/gpt-runner-shared/common'
import type { GetLLMChainParams } from './type'
import { getOpenaiLLM } from './models/openai.chain'
import { getHuggingFaceLLM } from './models/hugging-face.chain'
import { getAnthropicLLM } from './models/anthropic.model'
import { getHuggingFaceLLM } from './models/hugging-face.model'
import { getOpenaiLLM } from './models/openai.model'
export function getLLM(params: GetLLMChainParams): BaseLanguageModel {
const getLLMFns: ((params: GetLLMChainParams) => BaseLanguageModel | null)[] = [
getOpenaiLLM,
getAnthropicLLM,
getHuggingFaceLLM,
getOpenaiLLM,
]
if (!params.model.type) {

View File

@@ -0,0 +1,51 @@
import type { BaseLanguageModel } from 'langchain/dist/base_language'
import { ChatModelType } from '@nicepkg/gpt-runner-shared/common'
import { ChatAnthropic } from 'langchain/chat_models/anthropic'
import { CallbackManager } from 'langchain/callbacks'
import type { GetLLMChainParams } from '../type'
export function getAnthropicLLM(params: GetLLMChainParams): BaseLanguageModel | null {
const { model, onTokenStream, onComplete, onError } = params
if (model.type === ChatModelType.Anthropic) {
const { secrets, modelName, temperature, maxTokens, topP, topK } = model
console.log('Anthropic model: ', model)
return new ChatAnthropic({
streaming: true,
maxRetries: 1,
anthropicApiKey: secrets?.apiKey,
anthropicApiUrl: secrets?.basePath,
modelName,
temperature,
maxTokensToSample: maxTokens,
topP,
topK,
callbackManager: CallbackManager.fromHandlers({
handleLLMNewToken: async (token: string) => {
onTokenStream?.(token)
},
handleLLMEnd: async () => {
onComplete?.()
},
handleLLMError: async (e) => {
console.log('handleLLMError Error: ', e)
onError?.(e)
},
handleChainError: async (err) => {
if (err.message.includes('Could not parse LLM output: ')) {
const output = err.message.split('Could not parse LLM output: ')[1]
onTokenStream?.(`${output} \n\n`)
}
else {
console.log('Chain Error: ', err)
onError?.(err)
}
},
}),
})
}
return null
}

View File

@@ -2,6 +2,7 @@ export const MIN_NODE_VERSION = '16.15.0'
export const SECRET_KEY_PLACEHOLDER = '********'
export const STREAM_DONE_FLAG = '[DONE]'
export const GPT_RUNNER_OFFICIAL_FOLDER = '.gpt-runner'
export const DEFAULT_ANTHROPIC_API_BASE_PATH = 'https://api.anthropic.com'
export const DEFAULT_OPENAI_API_BASE_PATH = 'https://api.openai.com/v1'
export const DEFAULT_EXCLUDE_FILES = [

View File

@@ -0,0 +1,56 @@
import type { ChatModelType } from '../enum'
import type { BaseModelConfig } from './base.config'
export interface AnthropicSecrets {
/**
* The API key to use for Anthropic API requests.
*/
apiKey?: string
/**
* override Anthropic base API url
*/
basePath?: string
}
export interface AnthropicModelConfig extends BaseModelConfig {
/**
* mode type
*/
type: ChatModelType.Anthropic
/**
* openai secret config
*/
secrets?: AnthropicSecrets
/** Amount of randomness injected into the response. Ranges
* from 0 to 1. Use temp closer to 0 for analytical /
* multiple choice, and temp closer to 1 for creative
* and generative tasks.
*/
temperature?: number
/**
* Maximum number of tokens to generate in the completion. -1 returns as many
* tokens as possible given the prompt and the model's maximum context size.
*/
maxTokens?: number
/** Does nucleus sampling, in which we compute the
* cumulative distribution over all the options for each
* subsequent token in decreasing probability order and
* cut it off once it reaches a particular probability
* specified by top_p. Defaults to -1, which disables it.
* Note that you should either alter temperature or top_p,
* but not both.
*/
topP?: number
/**
* Only sample from the top K options for each subsequent
* token. Used to remove "long tail" low probability
* responses. Defaults to -1, which disables it.
*/
topK?: number
}

View File

@@ -1,4 +1,5 @@
import type { ChatModelType } from '../enum'
import type { AnthropicModelConfig } from './anthropic.config'
import type { HuggingFaceModelConfig } from './hugging-face.config'
import type { OpenaiModelConfig } from './openai.config'
@@ -20,8 +21,9 @@ export interface BaseModelConfig {
}
export interface ChatModelTypeMap {
[ChatModelType.Openai]: OpenaiModelConfig
[ChatModelType.Anthropic]: AnthropicModelConfig
[ChatModelType.HuggingFace]: HuggingFaceModelConfig
[ChatModelType.Openai]: OpenaiModelConfig
}
export type PartialChatModelTypeMap = Partial<ChatModelTypeMap>

View File

@@ -1,3 +1,4 @@
export * from './anthropic.config'
export * from './base.config'
export * from './hugging-face.config'
export * from './openai.config'

View File

@@ -1,12 +1,13 @@
export enum ChatModelType {
Openai = 'openai',
HuggingFace = 'huggingFace',
Anthropic = 'anthropic',
}
export enum ChatRole {
User = 'user',
ASSISTANT = 'assistant',
SYSTEM = 'system',
Assistant = 'assistant',
System = 'system',
}
export enum ChatMessageStatus {
@@ -58,6 +59,8 @@ export enum LocaleLang {
}
export enum SecretStorageKey {
Anthropic = 'anthropic',
HuggingFace = 'huggingFace',
Openai = 'openai',
Proxy = 'proxy',
}

View File

@@ -0,0 +1,18 @@
import { z } from 'zod'
import { type AnthropicModelConfig, type AnthropicSecrets, ChatModelType } from '../../types'
import { DEFAULT_ANTHROPIC_API_BASE_PATH } from '../../helpers'
import { BaseModelConfigSchema } from './base.zod'
export const AnthropicSecretsSchema = z.object({
apiKey: z.string().optional().describe('The Anthropic API key'),
basePath: z.string().optional().default(DEFAULT_ANTHROPIC_API_BASE_PATH).describe('The Anthropic base API url'),
}) satisfies z.ZodType<AnthropicSecrets>
export const AnthropicModelConfigSchema = BaseModelConfigSchema.extend({
type: z.literal(ChatModelType.Anthropic).describe('Use Anthropic model'),
secrets: AnthropicSecretsSchema.optional().describe('The Anthropic API secrets config'),
temperature: z.number().optional().describe('The temperature for the Anthropic model'),
maxTokens: z.number().optional().describe('The maximum number of tokens for the Anthropic model'),
topP: z.number().optional().describe('The top P value for the Anthropic model'),
topK: z.number().optional().describe('The top K value for the Anthropic model'),
}) satisfies z.ZodType<AnthropicModelConfig>

View File

@@ -1,8 +1,9 @@
import type { z } from 'zod'
import type { GetModelConfigType } from '../../types'
import { ChatModelType } from '../../types'
import { OpenaiModelConfigSchema, OpenaiSecretsSchema } from './openai.zod'
import { AnthropicModelConfigSchema, AnthropicSecretsSchema } from './anthropic.zod'
import { HuggingFaceModelConfigSchema, HuggingFaceSecretsSchema } from './hugging-face.zod'
import { OpenaiModelConfigSchema, OpenaiSecretsSchema } from './openai.zod'
export * from './base.zod'
export * from './openai.zod'
@@ -15,14 +16,18 @@ export function getModelConfigTypeSchema<T extends ChatModelType>(modelType: T,
secrets: z.ZodType<GetModelConfigType<key, 'secrets'>>
}
} = {
[ChatModelType.Openai]: {
config: OpenaiModelConfigSchema,
secrets: OpenaiSecretsSchema,
[ChatModelType.Anthropic]: {
config: AnthropicModelConfigSchema,
secrets: AnthropicSecretsSchema,
},
[ChatModelType.HuggingFace]: {
config: HuggingFaceModelConfigSchema,
secrets: HuggingFaceSecretsSchema,
},
[ChatModelType.Openai]: {
config: OpenaiModelConfigSchema,
secrets: OpenaiSecretsSchema,
},
}
return chatModelTypeSchemaMap[modelType][schemaType]
}

View File

@@ -9,11 +9,11 @@ export const OpenaiSecretsSchema = z.object({
// username: z.string().optional().describe('The OpenAI username'),
// password: z.string().optional().describe('The OpenAI password'),
accessToken: z.string().optional().describe('The OpenAI access token'),
basePath: z.string().optional().default(DEFAULT_OPENAI_API_BASE_PATH).describe('The Chatgpt base path'),
basePath: z.string().optional().default(DEFAULT_OPENAI_API_BASE_PATH).describe('The OpenAI base API path'),
}) satisfies z.ZodType<OpenaiSecrets>
export const OpenaiModelConfigSchema = BaseModelConfigSchema.extend({
type: z.literal(ChatModelType.Openai).describe('Use Open AI model'),
type: z.literal(ChatModelType.Openai).describe('Use OpenAI model'),
secrets: OpenaiSecretsSchema.optional().describe('The OpenAI API secrets config'),
temperature: z.number().optional().describe('The temperature for the OpenAI model'),
maxTokens: z.number().optional().describe('The maximum number of tokens for the OpenAI model'),

View File

@@ -2,8 +2,9 @@ import { z } from 'zod'
import { type ChatModel, ChatModelType, type FilterPattern, type FormCheckboxGroupConfig, type FormFieldBaseConfig, type FormInputConfig, type FormItemConfig, type FormOption, type FormRadioGroupConfig, type FormSelectConfig, type FormTextareaConfig, type SingleChatMessage, type SingleFileConfig, type UserConfig, type UserConfigForUser } from '../../types'
import { ChatRoleSchema } from '../enum.zod'
import type { PartialChatModelTypeMap } from './../../types/config/base.config'
import { OpenaiModelConfigSchema } from './openai.zod'
import { AnthropicModelConfigSchema } from './anthropic.zod'
import { HuggingFaceModelConfigSchema } from './hugging-face.zod'
import { OpenaiModelConfigSchema } from './openai.zod'
export const FilterPatternSchema = z.union([
z.array(z.union([z.string(), z.instanceof(RegExp)])),
@@ -14,15 +15,16 @@ export const FilterPatternSchema = z.union([
z.undefined(),
]) satisfies z.ZodType<FilterPattern>
// OpenaiModelConfigSchema or HuggingFaceModelConfigSchema
export const ChatModelSchema = z.union([
OpenaiModelConfigSchema,
AnthropicModelConfigSchema,
HuggingFaceModelConfigSchema,
OpenaiModelConfigSchema,
]) satisfies z.ZodType<ChatModel>
export const PartialChatModelTypeMapSchema = z.object({
[ChatModelType.Openai]: OpenaiModelConfigSchema.optional(),
[ChatModelType.Anthropic]: AnthropicModelConfigSchema.optional(),
[ChatModelType.HuggingFace]: HuggingFaceModelConfigSchema.optional(),
[ChatModelType.Openai]: OpenaiModelConfigSchema.optional(),
}) satisfies z.ZodType<PartialChatModelTypeMap>
export const UserConfigSchema = z.object({

View File

@@ -59,17 +59,21 @@
"context_settings_selected_files_checkbox_label": "Ausgewählte Dateien als Aufforderung. Aktuell ausgewählte<FileNumWrapper>{{fileNum}}</FileNumWrapper>Dateien<TokenNumWrapper>{{tokenNum}}</TokenNumWrapper>Token.",
"context_settings_all_file_paths_checkbox_label": "Alle Dateipfade als Aufforderung<TokenNumWrapper>{{tokenNum}}</TokenNumWrapper>Token.",
"model_settings_btn": "Modell-Einstellungen",
"openai_model_name": "Modellname",
"openai_temperature": "Temperatur",
"openai_max_tokens": "Maximale Antwort-Token",
"openai_top_p": "Top P",
"openai_frequency_penalty": "Frequenzstrafe",
"openai_presence_penalty": "Anwesenheitsstrafe",
"model_name": "Modellname",
"temperature": "Temperatur",
"max_tokens": "Maximale Antwort-Token",
"top_p": "Top P",
"top_k": "Top K",
"frequency_penalty": "Frequenzstrafe",
"presence_penalty": "Anwesenheitsstrafe",
"version": "Version",
"github": "Github",
"reward": "Belohnung",
"contributors": "Mitwirkende",
"buy_me_a_coffee": "Kauf mir einen Kaffee",
"anthropic_api_key": "Anthropic API-Schlüssel",
"anthropic_api_key_placeholder": "Bitte geben Sie den Anthropic API-Schlüssel ein",
"anthropic_api_base_path": "Anthropic API-Basispfad",
"openai_api_key": "OpenAI API-Schlüssel",
"openai_api_key_placeholder": "Bitte geben Sie den OpenAI API-Schlüssel ein",
"openai_api_base_path": "OpenAI API-Basispfad",
@@ -90,4 +94,4 @@
"file_editor_forgot_save_tips_title": "Möchten Sie die Änderungen an {{fileName}} speichern?",
"file_editor_forgot_save_tips_content": "Ihre Änderungen gehen verloren, wenn Sie sie nicht speichern."
}
}
}

View File

@@ -59,17 +59,21 @@
"context_settings_selected_files_checkbox_label": "Selected Files As Prompt. Current Selected <FileNumWrapper>{{fileNum}}</FileNumWrapper> Files <TokenNumWrapper>{{tokenNum}}</TokenNumWrapper> Tokens.",
"context_settings_all_file_paths_checkbox_label": "All File Path As Prompt <TokenNumWrapper>{{tokenNum}}</TokenNumWrapper> Tokens.",
"model_settings_btn": "Model Settings",
"openai_model_name": "Model Name",
"openai_temperature": "Temperature",
"openai_max_tokens": "Max Reply Tokens",
"openai_top_p": "Top P",
"openai_frequency_penalty": "Frequency Penalty",
"openai_presence_penalty": "Presence Penalty",
"model_name": "Model Name",
"temperature": "Temperature",
"max_tokens": "Max Reply Tokens",
"top_p": "Top P",
"top_k": "Top K",
"frequency_penalty": "Frequency Penalty",
"presence_penalty": "Presence Penalty",
"version": "Version",
"github": "Github",
"reward": "Reward",
"contributors": "Contributors",
"buy_me_a_coffee": "buy me a coffee",
"anthropic_api_key": "Anthropic API Key",
"anthropic_api_key_placeholder": "Please input Anthropic API Key",
"anthropic_api_base_path": "Anthropic API Base Path",
"openai_api_key": "OpenAI API Key",
"openai_api_key_placeholder": "Please input OpenAI API Key",
"openai_api_base_path": "OpenAI API Base Path",
@@ -90,4 +94,4 @@
"file_editor_forgot_save_tips_title": "Do you want to save changes to {{fileName}}?",
"file_editor_forgot_save_tips_content": "Your changes will be lost if you don't save them."
}
}
}

View File

@@ -59,17 +59,21 @@
"context_settings_selected_files_checkbox_label": "選択したファイルをプロンプトとして使用。現在選択中<FileNumWrapper>{{fileNum}}</FileNumWrapper>ファイル<TokenNumWrapper>{{tokenNum}}</TokenNumWrapper>トークン。",
"context_settings_all_file_paths_checkbox_label": "すべてのファイルパスをプロンプトとして使用<TokenNumWrapper>{{tokenNum}}</TokenNumWrapper>トークン。",
"model_settings_btn": "モデル設定",
"openai_model_name": "モデル名",
"openai_temperature": "温度",
"openai_max_tokens": "最大回答トークン数",
"openai_top_p": "トップP",
"openai_frequency_penalty": "頻度ペナルティ",
"openai_presence_penalty": "存在ペナルティ",
"model_name": "モデル名",
"temperature": "温度",
"max_tokens": "最大回答トークン数",
"top_p": "Top P",
"top_k": "Top K",
"frequency_penalty": "頻度ペナルティ",
"presence_penalty": "存在ペナルティ",
"version": "バージョン",
"github": "GitHub",
"reward": "寄付",
"contributors": "貢献者",
"buy_me_a_coffee": "コーヒーを買ってください",
"anthropic_api_key": "Anthropic APIキー",
"anthropic_api_key_placeholder": "Anthropic API キーを入力してください",
"anthropic_api_base_path": "Anthropic API ベースパス",
"openai_api_key": "OpenAI APIキー",
"openai_api_key_placeholder": "OpenAI API キーを入力してください",
"openai_api_base_path": "OpenAI API ベースパス",
@@ -90,4 +94,4 @@
"file_editor_forgot_save_tips_title": "変更を{{fileName}}に保存しますか?",
"file_editor_forgot_save_tips_content": "保存しない場合、変更は失われます。"
}
}
}

View File

@@ -59,17 +59,21 @@
"context_settings_selected_files_checkbox_label": "将选定的文件作为提示,当前选定 <FileNumWrapper>{{fileNum}}</FileNumWrapper> 个文件,<TokenNumWrapper>{{tokenNum}}</TokenNumWrapper> tokens",
"context_settings_all_file_paths_checkbox_label": "将所有文件路径作为提示,<TokenNumWrapper>{{tokenNum}}</TokenNumWrapper> tokens",
"model_settings_btn": "模型设置",
"openai_model_name": "模型名称",
"openai_temperature": "温度",
"openai_max_tokens": "最大回复令牌",
"openai_top_p": "Top P",
"openai_frequency_penalty": "频率惩罚",
"openai_presence_penalty": "存在惩罚",
"model_name": "模型名称",
"temperature": "温度",
"max_tokens": "最大回复令牌",
"top_p": "Top P",
"top_k": "Top K",
"frequency_penalty": "频率惩罚",
"presence_penalty": "存在惩罚",
"version": "版本",
"github": "GitHub",
"reward": "赞赏",
"contributors": "贡献者",
"buy_me_a_coffee": "请我喝杯咖啡",
"anthropic_api_key": "Anthropic API Key",
"anthropic_api_key_placeholder": "请输入 Anthropic API Key",
"anthropic_api_base_path": "Anthropic API 基础路径",
"openai_api_key": "OpenAI API Key",
"openai_api_key_placeholder": "请输入 OpenAI API Key",
"openai_api_base_path": "OpenAI API 基础路径",
@@ -90,4 +94,4 @@
"file_editor_forgot_save_tips_title": "你想要保存对{{fileName}}的更改吗?",
"file_editor_forgot_save_tips_content": "如果你不保存,你的改动将会丢失."
}
}
}

View File

@@ -59,17 +59,21 @@
"context_settings_selected_files_checkbox_label": "將選定的文件作為提示,當前選定 <FileNumWrapper>{{fileNum}}</FileNumWrapper> 個文件, <TokenNumWrapper>{{tokenNum}}</TokenNumWrapper> tokens",
"context_settings_all_file_paths_checkbox_label": "將所有文件路徑作為提示,<TokenNumWrapper>{{tokenNum}}</TokenNumWrapper> tokens",
"model_settings_btn": "模型設置",
"openai_model_name": "模型名稱",
"openai_temperature": "溫度",
"openai_max_tokens": "最大回覆令牌",
"openai_top_p": "P",
"openai_frequency_penalty": "頻率處罰",
"openai_presence_penalty": "存在處罰",
"model_name": "模型名稱",
"temperature": "溫度",
"max_tokens": "最大回覆令牌",
"top_p": "Top P",
"top_k": "Top K",
"frequency_penalty": "頻率處罰",
"presence_penalty": "存在處罰",
"version": "版本",
"github": "GitHub",
"reward": "贊賞",
"contributors": "貢獻者",
"buy_me_a_coffee": "請我喝杯咖啡",
"anthropic_api_key": "Anthropic API Key",
"anthropic_api_key_placeholder": "請輸入 Anthropic API Key",
"anthropic_api_base_path": "Anthropic API 基礎路徑",
"openai_api_key": "OpenAI API Key",
"openai_api_key_placeholder": "請輸入 OpenAI API Key",
"openai_api_base_path": "OpenAI API 基礎路徑",
@@ -90,4 +94,4 @@
"file_editor_forgot_save_tips_title": "你想要保存對{{fileName}}的更改嗎?",
"file_editor_forgot_save_tips_content": "如果你不保存,你的改動將會丟失."
}
}
}

View File

@@ -253,7 +253,7 @@ export const ChatPanel: FC<ChatPanelProps> = memo((props) => {
messageItems: chatInstance?.messages.map((message, i) => {
const isLast = i === chatInstance.messages.length - 1
const isLastTwo = i >= chatInstance.messages.length - 2
const isAi = message.name === ChatRole.ASSISTANT
const isAi = message.name === ChatRole.Assistant
const handleRegenerateMessage = () => {
if (!isLast)

View File

@@ -0,0 +1,137 @@
import type { AnthropicModelConfig, SingleFileConfig } from '@nicepkg/gpt-runner-shared/common'
import { memo, useState } from 'react'
import type { FC } from 'react'
import { useTranslation } from 'react-i18next'
import { HookFormInput } from '../../../../../../../components/hook-form/hook-form-input'
import { type ISelectOption, SelectOption } from '../../../../../../../components/select-option'
import { BaseModelSettings, type BaseModelSettingsFormItemConfig } from '../base-model-settings'
interface FormData extends Pick<AnthropicModelConfig, 'modelName' | 'temperature' | 'maxTokens' | 'topP' | 'topK'> {
}
export interface AnthropicModelSettingsProps {
singleFileConfig?: SingleFileConfig
}
export const AnthropicModelSettings: FC<AnthropicModelSettingsProps> = memo((props) => {
const { singleFileConfig } = props
const { t } = useTranslation()
const [modelTipOptions] = useState<ISelectOption[]>([
{
label: 'claude-1',
value: 'claude-1',
},
{
label: 'claude-1-100k',
value: 'claude-1-100k',
},
{
label: 'claude-instant-1',
value: 'claude-instant-1',
},
{
label: 'claude-instant-1-100k',
value: 'claude-instant-1-100k',
},
])
const formConfig: BaseModelSettingsFormItemConfig<FormData>[] = [
{
name: 'modelName',
buildView: ({ buildLabel, useFormReturns: { control, formState, watch, setValue } }) => {
return <>
<HookFormInput
name="modelName"
label={buildLabel(t('chat_page.model_name'))}
labelInLeft
placeholder={''}
errors={formState.errors}
control={control}
/>
<SelectOption
options={modelTipOptions}
value={watch('modelName')}
onChange={(value) => {
setValue('modelName', value)
}} />
</>
},
},
{
name: 'temperature',
buildView: ({ buildLabel, useFormReturns: { control, formState } }) => {
return <>
<HookFormInput
name="temperature"
label={buildLabel(t('chat_page.temperature'))}
labelInLeft
isNumber
placeholder={'0 ~ 1'}
errors={formState.errors}
control={control}
/>
</>
},
},
{
name: 'maxTokens',
buildView: ({ buildLabel, useFormReturns: { control, formState } }) => {
return <>
<HookFormInput
name="maxTokens"
label={buildLabel(t('chat_page.max_tokens'))}
labelInLeft
isNumber
minNumber={0}
placeholder={'0 ~ 2048'}
errors={formState.errors}
control={control}
/>
</>
},
},
{
name: 'topP',
buildView: ({ buildLabel, useFormReturns: { control, formState } }) => {
return <>
<HookFormInput
name="topP"
label={buildLabel(t('chat_page.top_p'))}
labelInLeft
minNumber={0}
maxNumber={1}
placeholder={'0 ~ 1'}
isNumber
errors={formState.errors}
control={control}
/>
</>
},
},
{
name: 'topK',
buildView: ({ buildLabel, useFormReturns: { control, formState } }) => {
return <>
<HookFormInput
name="topK"
label={buildLabel(t('chat_page.top_k'))}
labelInLeft
minNumber={0}
maxNumber={1}
placeholder={'0 ~ 1'}
isNumber
errors={formState.errors}
control={control}
/>
</>
},
},
]
return <BaseModelSettings singleFileConfig={singleFileConfig} formConfig={formConfig} />
})
AnthropicModelSettings.displayName = 'AnthropicModelSettings'

View File

@@ -0,0 +1,55 @@
import { DEFAULT_ANTHROPIC_API_BASE_PATH } from '@nicepkg/gpt-runner-shared/common'
import type { AnthropicSecrets, SingleFileConfig } from '@nicepkg/gpt-runner-shared/common'
import { type FC, memo } from 'react'
import { useTranslation } from 'react-i18next'
import { HookFormInput } from '../../../../../../../components/hook-form/hook-form-input'
import { BaseSecretsSettings, type BaseSecretsSettingsFormItemConfig } from '../base-secrets-settings'
interface FormData extends Pick<AnthropicSecrets, 'apiKey' | 'basePath'> {
}
export interface AnthropicSecretsSettingsProps {
singleFileConfig: SingleFileConfig
}
export const AnthropicSecretsSettings: FC<AnthropicSecretsSettingsProps> = memo((props) => {
const { singleFileConfig } = props
const { t } = useTranslation()
const formConfig: BaseSecretsSettingsFormItemConfig<FormData>[] = [
{
name: 'apiKey',
buildView: ({ useFormReturns: { control, formState } }) => {
return <>
<HookFormInput
label={t('chat_page.anthropic_api_key')}
placeholder={t('chat_page.anthropic_api_key_placeholder')}
name="apiKey"
errors={formState.errors}
control={control}
type="password"
/>
</>
},
},
{
name: 'basePath',
buildView: ({ useFormReturns: { control, formState } }) => {
return <>
<HookFormInput
label={t('chat_page.anthropic_api_base_path')}
placeholder={DEFAULT_ANTHROPIC_API_BASE_PATH}
name="basePath"
errors={formState.errors}
control={control}
/>
</>
},
},
]
return <BaseSecretsSettings singleFileConfig={singleFileConfig} formConfig={formConfig} />
})
AnthropicSecretsSettings.displayName = 'AnthropicSecretsSettings'

View File

@@ -3,6 +3,8 @@ import { ChatModelType, resolveSingleFileConfig } from '@nicepkg/gpt-runner-shar
import type { FC, ReactNode } from 'react'
import { memo, useMemo } from 'react'
import { useUserConfig } from '../../../../../../hooks/use-user-config.hook'
import { AnthropicSecretsSettings } from './anthropic-settings/secrets-settings'
import { AnthropicModelSettings } from './anthropic-settings/model-settings'
import { OpenaiSecretsSettings } from './openai-settings/secrets-settings'
import { OpenaiModelSettings } from './openai-settings/model-settings'
@@ -41,16 +43,21 @@ export const ModelSettings: FC<ModelSettingsProps> = memo((props) => {
const finalModelType = resolvedSingleFileConfig?.model?.type || ChatModelType.Openai
const modelTypeViewMap: Record<ChatModelType, Record<ModelSettingsViewType, () => ReactNode>> = {
[ChatModelType.Openai]: {
secrets: () => <OpenaiSecretsSettings singleFileConfig={resolvedSingleFileConfig} />,
model: () => <OpenaiModelSettings singleFileConfig={resolvedSingleFileConfig} />,
title: () => <>OpenAI</>,
[ChatModelType.Anthropic]: {
secrets: () => <AnthropicSecretsSettings singleFileConfig={resolvedSingleFileConfig} />,
model: () => <AnthropicModelSettings singleFileConfig={resolvedSingleFileConfig} />,
title: () => <>Anthropic</>,
},
[ChatModelType.HuggingFace]: {
secrets: () => <></>,
model: () => <></>,
title: () => <>Hugging Face</>,
},
[ChatModelType.Openai]: {
secrets: () => <OpenaiSecretsSettings singleFileConfig={resolvedSingleFileConfig} />,
model: () => <OpenaiModelSettings singleFileConfig={resolvedSingleFileConfig} />,
title: () => <>OpenAI</>,
},
}
return <>{modelTypeViewMap[finalModelType][viewType]()}</>

View File

@@ -41,7 +41,7 @@ export const OpenaiModelSettings: FC<OpenaiModelSettingsProps> = memo((props) =>
return <>
<HookFormInput
name="modelName"
label={buildLabel(t('chat_page.openai_model_name'))}
label={buildLabel(t('chat_page.model_name'))}
labelInLeft
placeholder={''}
errors={formState.errors}
@@ -62,7 +62,7 @@ export const OpenaiModelSettings: FC<OpenaiModelSettingsProps> = memo((props) =>
return <>
<HookFormInput
name="temperature"
label={buildLabel(t('chat_page.openai_temperature'))}
label={buildLabel(t('chat_page.temperature'))}
labelInLeft
isNumber
placeholder={'0 ~ 1'}
@@ -78,7 +78,7 @@ export const OpenaiModelSettings: FC<OpenaiModelSettingsProps> = memo((props) =>
return <>
<HookFormInput
name="maxTokens"
label={buildLabel(t('chat_page.openai_max_tokens'))}
label={buildLabel(t('chat_page.max_tokens'))}
labelInLeft
isNumber
minNumber={0}
@@ -95,7 +95,7 @@ export const OpenaiModelSettings: FC<OpenaiModelSettingsProps> = memo((props) =>
return <>
<HookFormInput
name="topP"
label={buildLabel(t('chat_page.openai_top_p'))}
label={buildLabel(t('chat_page.top_p'))}
labelInLeft
minNumber={0}
maxNumber={1}
@@ -113,7 +113,7 @@ export const OpenaiModelSettings: FC<OpenaiModelSettingsProps> = memo((props) =>
return <>
<HookFormInput
name="frequencyPenalty"
label={buildLabel(t('chat_page.openai_frequency_penalty'))}
label={buildLabel(t('chat_page.frequency_penalty'))}
labelInLeft
isNumber
minNumber={-2}
@@ -131,7 +131,7 @@ export const OpenaiModelSettings: FC<OpenaiModelSettingsProps> = memo((props) =>
return <>
<HookFormInput
name="presencePenalty"
label={buildLabel(t('chat_page.openai_presence_penalty'))}
label={buildLabel(t('chat_page.presence_penalty'))}
labelInLeft
isNumber
minNumber={-2}

View File

@@ -228,7 +228,7 @@ export const createChatSlice: StateCreator<
}
finalMessages.push({
name: ChatRole.ASSISTANT,
name: ChatRole.Assistant,
text: '',
})