refactor(gpt-runner-core): rename the langchain utils and update docs

This commit is contained in:
JinmingYang
2023-07-20 00:14:16 +08:00
parent 62b90dc233
commit acfc12c6e3
24 changed files with 648 additions and 1206 deletions

View File

@@ -97,7 +97,7 @@ You can see the web interface in your browser at [http://localhost:3003](http://
### The second way: VSCode Extension
> Requirements VSCode >= 1.72.0
> Requirements VSCode >= 1.78.0
Install the [GPT-Runner VSCode Extension](https://marketplace.visualstudio.com/items?itemName=nicepkg.gpt-runner) from the VSCode Marketplace.

View File

@@ -91,7 +91,7 @@ npx gptr
### 方式二VSCode 扩展
> 要求 VSCode >= 1.72.0
> 要求 VSCode >= 1.78.0
从 VSCode Marketplace 安装 [GPT-Runner VSCode 扩展](https://marketplace.visualstudio.com/items?itemName=nicepkg.gpt-runner)。

View File

@@ -25,7 +25,7 @@
"test:ci": "pnpm build && pnpm typecheck && pnpm lint && pnpm test"
},
"devDependencies": {
"@antfu/eslint-config": "^0.39.7",
"@antfu/eslint-config": "^0.39.8",
"@nicepkg/gpt-runner": "workspace:*",
"@nicepkg/gpt-runner-cli": "workspace:*",
"@nicepkg/gpt-runner-core": "workspace:*",
@@ -33,7 +33,7 @@
"@types/fs-extra": "^11.0.1",
"@types/node": "^18.16.19",
"@types/prettier": "^2.7.3",
"@types/react": "^18.2.14",
"@types/react": "^18.2.15",
"@vitejs/plugin-legacy": "^4.1.0",
"@vitest/ui": "^0.33.0",
"bumpp": "^9.1.1",
@@ -46,20 +46,20 @@
"jsdom": "^22.1.0",
"lint-staged": "^13.2.3",
"msw": "1.2.2",
"pnpm": "8.6.7",
"pnpm": "8.6.9",
"prettier": "^3.0.0",
"react": "^18.2.0",
"rollup": "^3.26.2",
"rollup": "^3.26.3",
"semver": "^7.5.4",
"simple-git-hooks": "^2.8.1",
"taze": "^0.11.2",
"terser": "^5.19.0",
"terser": "^5.19.1",
"tsup": "^7.1.0",
"typescript": "^5.1.6",
"unbuild": "^0.8.11",
"unplugin-auto-import": "^0.16.6",
"vite": "^4.4.3",
"vite-plugin-inspect": "^0.7.32",
"vite": "^4.4.4",
"vite-plugin-inspect": "^0.7.33",
"vite-plugin-pages": "^0.31.0",
"vitest": "^0.33.0"
},
@@ -77,4 +77,4 @@
"eslint --cache --fix"
]
}
}
}

View File

@@ -48,7 +48,8 @@
"dependencies": {
"@nicepkg/gpt-runner-shared": "workspace:*",
"ignore": "^5.2.4",
"langchain": "^0.0.107",
"unconfig": "^0.3.9"
"langchain": "^0.0.112",
"unconfig": "^0.3.9",
"zod": "^3.21.4"
}
}
}

View File

@@ -0,0 +1,20 @@
import { DynamicStructuredTool } from 'langchain/tools'
import { z } from 'zod'
export function getCurdFilesAgent() {
return new DynamicStructuredTool({
name: 'curd-files',
description: 'Please extract the core information of curd files based on your own answer to the user, and I will Create, update, read, and delete files based on the core information you gave. You need to give full file path and file content to me',
schema: z.object({
data: z.array(z.object({
type: z.enum(['create', 'update', 'read', 'delete']).describe('The type of operation to perform'),
filePath: z.string().describe('The path to the file to be edited'),
content: z.string().optional().describe('The content to be written to the file'),
})).optional().default([]).describe('The operations to perform on the files'),
}),
func: async ({ data }) => {
console.log('----------data', data)
return JSON.stringify(data)
},
})
}

View File

@@ -0,0 +1,28 @@
import type { BaseLanguageModel } from 'langchain/dist/base_language'
import { ChatModelType } from '@nicepkg/gpt-runner-shared/common'
import type { GetModelParams } from './type'
import { getAnthropicModel } from './models/anthropic.model'
import { getHuggingFaceModel } from './models/hugging-face.model'
import { getOpenaiModel } from './models/openai.model'
export function getModel(params: GetModelParams): BaseLanguageModel {
const getModelFns: ((params: GetModelParams) => BaseLanguageModel | null)[] = [
getAnthropicModel,
getHuggingFaceModel,
getOpenaiModel,
]
if (!params.model.type) {
Object.assign(params.model, {
type: ChatModelType.Openai,
})
}
for (const getModelFn of getModelFns) {
const llm = getModelFn(params)
if (llm)
return llm
}
throw new Error(`No LLM provided, model type ${params.model.type}`)
}

View File

@@ -0,0 +1,2 @@
export * from './llm.chain'
export * from './struct-data.chain'

View File

@@ -5,22 +5,22 @@ import {
import { LLMChain } from 'langchain/chains'
import { ChatRole } from '@nicepkg/gpt-runner-shared/common'
import { mapStoredMessageToChatTemplateMessages } from '../helper'
import type { GetLLMChainParams } from './type'
import { getLLM } from './get-llm'
import type { GetModelParams } from './type'
import { getModel } from './get-model'
export interface LLmChainParams extends GetLLMChainParams {}
export interface LLMChainParams extends GetModelParams {}
export async function llmChain(params: LLmChainParams) {
export async function getLLMChain(params: LLMChainParams) {
const {
messages,
systemPrompt,
systemPromptAsUserPrompt,
} = params
const llm = getLLM(params)
const llm = getModel({ ...params, streaming: true })
const DEFAULT_SYSTEM_PROMPT = 'You are a friendly assistant.'
const finalMessages = [...messages]
const finalMessages = [...messages || []]
let finalSystemPrompt = systemPrompt || DEFAULT_SYSTEM_PROMPT
if (systemPromptAsUserPrompt) {

View File

@@ -2,18 +2,16 @@ import type { BaseLanguageModel } from 'langchain/dist/base_language'
import { ChatModelType } from '@nicepkg/gpt-runner-shared/common'
import { ChatAnthropic } from 'langchain/chat_models/anthropic'
import { CallbackManager } from 'langchain/callbacks'
import type { GetLLMChainParams } from '../type'
import type { GetModelParams } from '../type'
export function getAnthropicLLM(params: GetLLMChainParams): BaseLanguageModel | null {
const { model, onTokenStream, onComplete, onError } = params
export function getAnthropicModel(params: GetModelParams): BaseLanguageModel | null {
const { streaming, model, onTokenStream, onComplete, onError } = params
if (model.type === ChatModelType.Anthropic) {
const { secrets, modelName, temperature, maxTokens, topP, topK } = model
console.log('Anthropic model: ', model)
return new ChatAnthropic({
streaming: true,
streaming,
maxRetries: 1,
anthropicApiKey: secrets?.apiKey,
anthropicApiUrl: secrets?.basePath,

View File

@@ -0,0 +1,66 @@
import type { BaseLanguageModel } from 'langchain/dist/base_language'
import { ChatModelType, getErrorMsg } from '@nicepkg/gpt-runner-shared/common'
import { HuggingFaceInference } from 'langchain/llms/hf'
import type { GetModelParams } from '../type'
export function getHuggingFaceModel(params: GetModelParams): BaseLanguageModel | null {
const { model, onTokenStream, onComplete, onError } = params
if (model.type === ChatModelType.HuggingFace) {
const { secrets, modelName, temperature, maxTokens, topP, topK, frequencyPenalty } = model
const huggingFaceModel = new HuggingFaceInference({
// streaming: true,
maxRetries: 1,
apiKey: secrets?.apiKey,
model: modelName,
temperature,
maxTokens,
topP,
topK,
frequencyPenalty,
// callbackManager: CallbackManager.fromHandlers({
// handleLLMNewToken: async (token: string) => {
// onTokenStream?.(token)
// },
// handleLLMEnd: async (output) => {
// onComplete?.()
// },
// handleLLMError: async (e) => {
// console.log('handleLLMError Error: ', e)
// onError?.(e)
// },
// handleChainError: async (err) => {
// if (err.message.includes('Could not parse LLM output: ')) {
// const output = err.message.split('Could not parse LLM output: ')[1]
// onTokenStream?.(`${output} \n\n`)
// }
// else {
// console.log('Chain Error: ', err)
// onError?.(err)
// }
// },
// }),
})
const oldCall = huggingFaceModel._call
huggingFaceModel._call = async function (...args) {
try {
const result = await oldCall.apply(this, args)
onTokenStream?.(result)
return result
}
catch (err) {
onError?.(err)
return getErrorMsg(err)
}
finally {
onComplete?.()
}
}
return huggingFaceModel
}
return null
}

View File

@@ -2,10 +2,10 @@ import type { BaseLanguageModel } from 'langchain/dist/base_language'
import { ChatModelType } from '@nicepkg/gpt-runner-shared/common'
import { ChatOpenAI } from 'langchain/chat_models/openai'
import { CallbackManager } from 'langchain/callbacks'
import type { GetLLMChainParams } from '../type'
import type { GetModelParams } from '../type'
export function getOpenaiLLM(params: GetLLMChainParams): BaseLanguageModel | null {
const { model, onTokenStream, onComplete, onError } = params
export function getOpenaiModel(params: GetModelParams): BaseLanguageModel | null {
const { streaming, model, onTokenStream, onComplete, onError } = params
if (model.type === ChatModelType.Openai) {
const { secrets, modelName, temperature, maxTokens, topP, frequencyPenalty, presencePenalty } = model
@@ -22,7 +22,7 @@ export function getOpenaiLLM(params: GetLLMChainParams): BaseLanguageModel | nul
}
return new ChatOpenAI({
streaming: true,
streaming,
maxRetries: 1,
openAIApiKey: secrets?.apiKey,
modelName,

View File

@@ -0,0 +1,23 @@
import '../fixes'
import { initializeAgentExecutorWithOptions } from 'langchain/agents'
import type { StructuredTool } from 'langchain/tools'
import type { GetModelParams } from './type'
import { getModel } from './get-model'
import { getCurdFilesAgent } from './agents/curd-files.agent'
export interface StructChainParams extends GetModelParams {}
export async function getStructDataChain(params: StructChainParams) {
const llm = getModel(params)
const tools: StructuredTool[] = [
getCurdFilesAgent(),
]
const chain = await initializeAgentExecutorWithOptions(tools, llm, {
agentType: 'structured-chat-zero-shot-react-description',
})
return chain
}

View File

@@ -0,0 +1,6 @@
import type { ChatModel, SingleChatMessage } from '@nicepkg/gpt-runner-shared/common'
import type { BaseModelParams } from '../types'
export interface GetModelParams extends BaseModelParams<SingleChatMessage> {
model: ChatModel
}

View File

@@ -1 +1 @@
export * from './llm-chain'
export * from './chains'

View File

@@ -1,28 +0,0 @@
import type { BaseLanguageModel } from 'langchain/dist/base_language'
import { ChatModelType } from '@nicepkg/gpt-runner-shared/common'
import type { GetLLMChainParams } from './type'
import { getAnthropicLLM } from './models/anthropic.model'
import { getHuggingFaceLLM } from './models/hugging-face.model'
import { getOpenaiLLM } from './models/openai.model'
export function getLLM(params: GetLLMChainParams): BaseLanguageModel {
const getLLMFns: ((params: GetLLMChainParams) => BaseLanguageModel | null)[] = [
getAnthropicLLM,
getHuggingFaceLLM,
getOpenaiLLM,
]
if (!params.model.type) {
Object.assign(params.model, {
type: ChatModelType.Openai,
})
}
for (const getLLMFn of getLLMFns) {
const llm = getLLMFn(params)
if (llm)
return llm
}
throw new Error(`No LLM provided, model type ${params.model.type}`)
}

View File

@@ -1 +0,0 @@
export * from './llm-chain'

View File

@@ -1,49 +0,0 @@
import type { BaseLanguageModel } from 'langchain/dist/base_language'
import { ChatModelType } from '@nicepkg/gpt-runner-shared/common'
import { CallbackManager } from 'langchain/callbacks'
import { HuggingFaceInference } from 'langchain/llms/hf'
import type { GetLLMChainParams } from '../type'
export function getHuggingFaceLLM(params: GetLLMChainParams): BaseLanguageModel | null {
const { model, onTokenStream, onComplete, onError } = params
if (model.type === ChatModelType.HuggingFace) {
const { secrets, modelName, temperature, maxTokens, topP, topK, frequencyPenalty } = model
return new HuggingFaceInference({
// streaming: true,
maxRetries: 1,
apiKey: secrets?.apiKey,
model: modelName,
temperature,
maxTokens,
topP,
topK,
frequencyPenalty,
callbackManager: CallbackManager.fromHandlers({
handleLLMNewToken: async (token: string) => {
onTokenStream?.(token)
},
handleLLMEnd: async () => {
onComplete?.()
},
handleLLMError: async (e) => {
console.log('handleLLMError Error: ', e)
onError?.(e)
},
handleChainError: async (err) => {
if (err.message.includes('Could not parse LLM output: ')) {
const output = err.message.split('Could not parse LLM output: ')[1]
onTokenStream?.(`${output} \n\n`)
}
else {
console.log('Chain Error: ', err)
onError?.(err)
}
},
}),
})
}
return null
}

View File

@@ -1,6 +0,0 @@
import type { ChatModel, SingleChatMessage } from '@nicepkg/gpt-runner-shared/common'
import type { BaseStreamChainParams } from '../types'
export interface GetLLMChainParams extends BaseStreamChainParams<SingleChatMessage> {
model: ChatModel
}

View File

@@ -1,5 +1,6 @@
export interface BaseStreamChainParams<Message> {
messages: Message[]
export interface BaseModelParams<Message> {
streaming?: boolean
messages?: Message[]
systemPrompt?: string
systemPromptAsUserPrompt?: boolean
onTokenStream?: (token: string) => void

View File

@@ -47,7 +47,7 @@
## 📦 Installation
> 1. Requires VSCode >= 1.72.0
> 1. Requires VSCode >= 1.78.0
> 2. Make sure you have an Open AI Key or Anthropic Key. If you do not have, please visit [Open AI](https://platform.openai.com/account/api-keys) or [Anthropic](https://www.anthropic.com/product/) to apply.
You can search `GPT Runner` in the VSCode Extension Marketplace for installation.

View File

@@ -46,7 +46,7 @@
## 📦 安装
> 1. 要求 VSCode >= 1.72.0
> 1. 要求 VSCode >= 1.78.0
> 2. 确保你有一个 Open AI Key 或 Anthropic Key如果没有请访问 [Open AI](https://platform.openai.com/account/api-keys) 或 [Anthropic](https://www.anthropic.com/product/) 申请。

View File

@@ -79,31 +79,31 @@
"@monaco-editor/react": "^4.5.1",
"@nicepkg/gpt-runner-core": "workspace:*",
"@nicepkg/gpt-runner-shared": "workspace:*",
"@tanstack/react-query": "^4.29.19",
"@tanstack/react-query": "^4.29.25",
"@types/connect-history-api-fallback": "^1.5.0",
"@types/cors": "^2.8.13",
"@types/express": "^4.17.17",
"@types/global-agent": "^2.1.1",
"@types/keyboardjs": "^2.5.1",
"@types/lodash-es": "^4.17.7",
"@types/react": "^18.2.14",
"@types/react-dom": "^18.2.6",
"@types/lodash-es": "^4.17.8",
"@types/react": "^18.2.15",
"@types/react-dom": "^18.2.7",
"@types/react-syntax-highlighter": "^15.5.7",
"@types/uuid": "^9.0.2",
"@use-gesture/react": "^10.2.27",
"@vitejs/plugin-react": "^4.0.3",
"@vscode/webview-ui-toolkit": "^1.2.2",
"clsx": "^1.2.1",
"clsx": "^2.0.0",
"commander": "^10.0.1",
"connect-history-api-fallback": "^2.0.0",
"cors": "^2.8.5",
"cross-env": "^7.0.3",
"eventemitter": "^0.3.3",
"express": "^4.18.2",
"framer-motion": "^10.12.18",
"framer-motion": "^10.13.0",
"fs-extra": "^11.1.1",
"global-agent": "^3.0.0",
"i18next": "^23.2.10",
"i18next": "^23.2.11",
"i18next-browser-languagedetector": "^7.1.0",
"i18next-http-backend": "^2.2.1",
"keyboardjs": "^2.7.0",
@@ -112,22 +112,22 @@
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-error-boundary": "^4.0.10",
"react-hook-form": "^7.45.1",
"react-hook-form": "^7.45.2",
"react-hot-toast": "^2.4.1",
"react-i18next": "^13.0.2",
"react-markdown": "^8.0.7",
"react-router-dom": "^6.14.1",
"react-router-dom": "^6.14.2",
"react-syntax-highlighter": "^15.5.0",
"react-tiny-popover": "^7.2.4",
"react-use": "^17.4.0",
"remark-gfm": "^3.0.1",
"styled-components": "^6.0.3",
"styled-components": "^6.0.4",
"undici": "^5.22.1",
"unist-util-visit": "^5.0.0",
"uuid": "^9.0.0",
"vite": "^4.4.3",
"vite": "^4.4.4",
"vite-plugin-monaco-editor": "^1.1.0",
"vite-plugin-svgr": "^3.2.0",
"zustand": "^4.3.9"
}
}
}

View File

@@ -2,7 +2,7 @@ import type { Request, Response } from 'express'
import type { ChatModelType, ChatStreamReqParams, FailResponse, SingleFileConfig, SuccessResponse } from '@nicepkg/gpt-runner-shared/common'
import { ChatStreamReqParamsSchema, Debug, STREAM_DONE_FLAG, buildFailResponse, buildSuccessResponse, toUnixPath } from '@nicepkg/gpt-runner-shared/common'
import { PathUtils, verifyParamsByZod } from '@nicepkg/gpt-runner-shared/node'
import { createFileContext, getSecrets, llmChain, loadUserConfig, parseGptFile } from '@nicepkg/gpt-runner-core'
import { createFileContext, getLLMChain, getSecrets, loadUserConfig, parseGptFile } from '@nicepkg/gpt-runner-core'
import { getValidFinalPath } from '../services/valid-path'
import type { ControllerConfig } from '../types'
@@ -108,7 +108,7 @@ export const llmControllers: ControllerConfig = {
finalSystemPrompt += appendSystemPrompt
const chain = await llmChain({
const llmChain = await getLLMChain({
messages,
systemPrompt: finalSystemPrompt,
systemPromptAsUserPrompt,
@@ -124,9 +124,22 @@ export const llmControllers: ControllerConfig = {
},
})
await chain.call({
await llmChain.call({
'global.input': prompt,
})
// const structDataChain = await getStructDataChain({
// model: {
// ...model!,
// secrets: finalSecrets,
// },
// })
// const structDataChainAnswer = await structDataChain.call({
// input: answer,
// })
// console.log('structDataChainAnswer', structDataChainAnswer)
}
catch (error: any) {
console.log('chatgptChain error', error)

1514
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff