mirror of https://github.com/THUDM/ChatGLM-6B
parent
f06df225dd
commit
e570c166fe
@ -0,0 +1,14 @@
|
||||
node_modules
|
||||
npm-debug.log
|
||||
yarn-debug.log
|
||||
yarn-error.log
|
||||
tsconfig.tsbuildinfo
|
||||
|
||||
.eslintcache
|
||||
.DS_Store
|
||||
.idea
|
||||
.vscode
|
||||
*.suo
|
||||
*.ntvs*
|
||||
*.njsproj
|
||||
*.sln
|
@ -0,0 +1,76 @@
|
||||
# glm-bot
|
||||
|
||||
基于koishi框架的qq聊天机器人
|
||||
|
||||
|
||||
## 环境依赖
|
||||
|
||||
* nodejs14以上版本
|
||||
* gocqhttp
|
||||
|
||||
## 使用方法
|
||||
* 1.启动接口
|
||||
```
|
||||
python fastapi.py
|
||||
```
|
||||
|
||||
如果启动的是flask.py
|
||||
则需要在index.ts文件中将
|
||||
```
|
||||
// 启用glm-bot
|
||||
ctx.plugin(glm_bot,{
|
||||
type: 'fastapi',
|
||||
myServerUrl: 'http://wx.blockelite.cn:10269/chatglm',
|
||||
publicUrl: 'http://127.0.0.1:10269/chat',
|
||||
send_glmmtg_response: true,
|
||||
prefix: '',
|
||||
defaultText: '',
|
||||
output: 'quote'
|
||||
})
|
||||
```
|
||||
|
||||
修改为
|
||||
```
|
||||
// 启用glm-bot
|
||||
ctx.plugin(glm_bot,{
|
||||
type: 'flaskapi',
|
||||
myServerUrl: 'http://wx.blockelite.cn:10269/chatglm',
|
||||
publicUrl: 'http://127.0.0.1:10269/chat',
|
||||
send_glmmtg_response: true,
|
||||
prefix: '',
|
||||
defaultText: '',
|
||||
output: 'quote'
|
||||
})
|
||||
```
|
||||
|
||||
* 2.启动[go-cqhttp](https://github.com/Mrs4s/go-cqhttp)并开启正向ws服务
|
||||
|
||||
* 2-1配置onebot
|
||||
将index.ts中的
|
||||
```
|
||||
endpoint: 'ws://127.0.0.1:32333'
|
||||
```
|
||||
修改为go-cqhttp的正向ws服务地址
|
||||
|
||||
* 3.安装[koishi](https://koishi.chat)依赖
|
||||
|
||||
```
|
||||
cd glm-bot && npm i
|
||||
```
|
||||
|
||||
|
||||
* 4.启动机器人
|
||||
```
|
||||
node -r esbuild-register .
|
||||
```
|
||||
|
||||
## 感谢
|
||||
* [koishi](https://koishi.chat)
|
||||
|
||||
|
||||
* [go-cqhttp](https://github.com/Mrs4s/go-cqhttp)
|
||||
|
||||
|
||||
* [glm-bot](https://github.com/wochenlong/glm-bot)
|
||||
|
||||
* [t4wefan](https://github.com/t4wefan/ChatGLM-6B-with-flask-api)
|
@ -0,0 +1,82 @@
|
||||
from fastapi import FastAPI
|
||||
from pydantic import BaseModel
|
||||
import uvicorn
|
||||
import json
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
from typing import List,Tuple
|
||||
|
||||
|
||||
max_length = 4096
|
||||
# 根据id获取上下文信息
|
||||
def get_history(id: str) -> List[Tuple[str,str]] or None:
|
||||
if id in sessions.keys():
|
||||
length = len(json.dumps(sessions[id],indent=2))
|
||||
if length>max_length:
|
||||
sessions[id] = []
|
||||
return None
|
||||
if sessions[id] == []:
|
||||
return None
|
||||
return sessions[id]
|
||||
else:
|
||||
sessions[id] = []
|
||||
return None
|
||||
|
||||
# 根据id清空上下文
|
||||
|
||||
|
||||
def clear(id: str) -> str:
|
||||
sessions[id] = []
|
||||
return '已重置'
|
||||
|
||||
|
||||
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
"THUDM/chatglm-6b", trust_remote_code=True)
|
||||
model = AutoModel.from_pretrained(
|
||||
"THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
|
||||
model = model.eval()
|
||||
|
||||
MAX_TURNS = 20
|
||||
MAX_BOXES = MAX_TURNS * 2
|
||||
|
||||
sessions = {}
|
||||
|
||||
|
||||
def predict(prompt: str, uid: str, max_length: int = 2048, top_p: float = 0.7, temperature: float = 0.95) -> str:
|
||||
history = get_history(uid)
|
||||
print(history)
|
||||
response, history = model.chat(tokenizer, prompt, history=history, max_length=max_length, top_p=top_p,
|
||||
temperature=temperature)
|
||||
sessions[uid].append((prompt, response))
|
||||
print(get_history(uid))
|
||||
return response
|
||||
|
||||
# while 1:
|
||||
# uid = input("uid:")
|
||||
# prompt = input('msg:')
|
||||
# msg = predict(prompt=prompt,uid = uid)
|
||||
# print(msg)
|
||||
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
|
||||
class Item_chat(BaseModel):
|
||||
msg: str
|
||||
uid: str
|
||||
@app.post("/chat")
|
||||
def chat(item:Item_chat):
|
||||
msg = predict(prompt=item.msg, uid=item.uid)
|
||||
print(msg)
|
||||
return msg
|
||||
|
||||
|
||||
class Item_claer(BaseModel):
|
||||
uid: str
|
||||
@app.post("/clear")
|
||||
def clear_session(item:Item_claer):
|
||||
return clear(item.uid)
|
||||
|
||||
|
||||
uvicorn.run(app, host="0.0.0.0", port=10269)
|
@ -0,0 +1,5 @@
|
||||
# koishi-plugin-glm-bot
|
||||
|
||||
[![npm](https://img.shields.io/npm/v/koishi-plugin-glm-bot?style=flat-square)](https://www.npmjs.com/package/koishi-plugin-glm-bot)
|
||||
|
||||
glm-bot的稳定版
|
@ -0,0 +1,38 @@
|
||||
import { Context } from 'koishi'
|
||||
import console from '@koishijs/plugin-console'
|
||||
import * as sandbox from '@koishijs/plugin-sandbox'
|
||||
import * as echo from '@koishijs/plugin-echo'
|
||||
|
||||
import onebot from '@koishijs/plugin-adapter-onebot'
|
||||
|
||||
import glm_bot from './glm-bot'
|
||||
|
||||
// 创建一个 Koishi 应用
|
||||
const ctx = new Context({
|
||||
port: 5140,
|
||||
})
|
||||
// 使用 OneBot 适配器的机器人
|
||||
ctx.plugin(onebot, {
|
||||
protocol: 'ws',
|
||||
selfId: '3111720341',
|
||||
endpoint: 'ws://127.0.0.1:32333',
|
||||
})
|
||||
|
||||
// 启用上述插件
|
||||
ctx.plugin(console) // 提供控制台
|
||||
ctx.plugin(sandbox) // 提供调试沙盒
|
||||
ctx.plugin(echo) // 提供回声指令
|
||||
|
||||
// 启用glm-bot
|
||||
ctx.plugin(glm_bot,{
|
||||
type: 'fastapi',
|
||||
myServerUrl: 'http://wx.blockelite.cn:10269/chatglm',
|
||||
publicUrl: 'http://127.0.0.1:10269/chat',
|
||||
send_glmmtg_response: true,
|
||||
prefix: '',
|
||||
defaultText: '',
|
||||
output: 'quote'
|
||||
})
|
||||
|
||||
// 启动应用
|
||||
ctx.start()
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,20 @@
|
||||
{
|
||||
"name": "glm-bot",
|
||||
"version": "1.0.0",
|
||||
"description": "glm-koishi机器人",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"test": "echo \"Error: no test specified\" && exit 1"
|
||||
},
|
||||
"author": "",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@koishijs/plugin-adapter-onebot": "^5.6.6",
|
||||
"@koishijs/plugin-console": "^5.6.1",
|
||||
"@koishijs/plugin-echo": "^2.2.3",
|
||||
"@koishijs/plugin-market": "^1.12.5",
|
||||
"@koishijs/plugin-sandbox": "^3.0.0",
|
||||
"koishi": "^4.12.4",
|
||||
"koishi-plugin-glm-bot": "^1.1.2"
|
||||
}
|
||||
}
|
Loading…
Reference in new issue