add a chatbot

pull/561/head
initialencounter 2 years ago
parent f06df225dd
commit e570c166fe

14
glm-bot/.gitignore vendored

@ -0,0 +1,14 @@
node_modules
npm-debug.log
yarn-debug.log
yarn-error.log
tsconfig.tsbuildinfo
.eslintcache
.DS_Store
.idea
.vscode
*.suo
*.ntvs*
*.njsproj
*.sln

@ -0,0 +1,76 @@
# glm-bot
基于koishi框架的qq聊天机器人
## 环境依赖
* nodejs14以上版本
* gocqhttp
## 使用方法
* 1.启动接口
```
python fastapi.py
```
如果启动的是flask.py
则需要在index.ts文件中将
```
// 启用glm-bot
ctx.plugin(glm_bot,{
type: 'fastapi',
myServerUrl: 'http://wx.blockelite.cn:10269/chatglm',
publicUrl: 'http://127.0.0.1:10269/chat',
send_glmmtg_response: true,
prefix: '',
defaultText: '',
output: 'quote'
})
```
修改为
```
// 启用glm-bot
ctx.plugin(glm_bot,{
type: 'flaskapi',
myServerUrl: 'http://wx.blockelite.cn:10269/chatglm',
publicUrl: 'http://127.0.0.1:10269/chat',
send_glmmtg_response: true,
prefix: '',
defaultText: '',
output: 'quote'
})
```
* 2.启动[go-cqhttp](https://github.com/Mrs4s/go-cqhttp)并开启正向ws服务
* 2-1配置onebot
将index.ts中的
```
endpoint: 'ws://127.0.0.1:32333'
```
修改为go-cqhttp的正向ws服务地址
* 3.安装[koishi](https://koishi.chat)依赖
```
cd glm-bot && npm i
```
* 4.启动机器人
```
node -r esbuild-register .
```
## 感谢
* [koishi](https://koishi.chat)
* [go-cqhttp](https://github.com/Mrs4s/go-cqhttp)
* [glm-bot](https://github.com/wochenlong/glm-bot)
* [t4wefan](https://github.com/t4wefan/ChatGLM-6B-with-flask-api)

@ -0,0 +1,82 @@
from fastapi import FastAPI
from pydantic import BaseModel
import uvicorn
import json
from transformers import AutoModel, AutoTokenizer
from typing import List,Tuple
max_length = 4096
# 根据id获取上下文信息
def get_history(id: str) -> List[Tuple[str,str]] or None:
if id in sessions.keys():
length = len(json.dumps(sessions[id],indent=2))
if length>max_length:
sessions[id] = []
return None
if sessions[id] == []:
return None
return sessions[id]
else:
sessions[id] = []
return None
# 根据id清空上下文
def clear(id: str) -> str:
sessions[id] = []
return '已重置'
tokenizer = AutoTokenizer.from_pretrained(
"THUDM/chatglm-6b", trust_remote_code=True)
model = AutoModel.from_pretrained(
"THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
model = model.eval()
MAX_TURNS = 20
MAX_BOXES = MAX_TURNS * 2
sessions = {}
def predict(prompt: str, uid: str, max_length: int = 2048, top_p: float = 0.7, temperature: float = 0.95) -> str:
history = get_history(uid)
print(history)
response, history = model.chat(tokenizer, prompt, history=history, max_length=max_length, top_p=top_p,
temperature=temperature)
sessions[uid].append((prompt, response))
print(get_history(uid))
return response
# while 1:
# uid = input("uid:")
# prompt = input('msg:')
# msg = predict(prompt=prompt,uid = uid)
# print(msg)
app = FastAPI()
class Item_chat(BaseModel):
msg: str
uid: str
@app.post("/chat")
def chat(item:Item_chat):
msg = predict(prompt=item.msg, uid=item.uid)
print(msg)
return msg
class Item_claer(BaseModel):
uid: str
@app.post("/clear")
def clear_session(item:Item_claer):
return clear(item.uid)
uvicorn.run(app, host="0.0.0.0", port=10269)

@ -0,0 +1,48 @@
import os
import platform
from transformers import AutoTokenizer, AutoModel
from flask import Flask, request
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
def prepare_model():
global model
model = model.eval()
prepare_model()
model = model.eval()
preset = []
port = 7860
os_name = platform.system()
app = Flask(__name__)
history = {}
@app.route('/chatglm', methods=["GET"])
def delete_msg():
global history
query = request.args.get('msg')
usrid = request.args.get('usrid')
source = request.args.get('source')
if query == None:
return '请提供内容'
if query == 'ping':
return 'pong!服务端运行正常!'
if source == None:
return '无来源的请求,请更新插件'
if usrid == None:
return '请提供用户id'
if not usrid in history:
history[usrid] = preset
print(f"usrid{usrid},content{query}")
if query == "clear":
history[usrid] = preset
print(f"usrid{usrid},清空历史")
return '已重置当前对话'
response, history[usrid] = model.chat(tokenizer, query, history=history[usrid])
print(f"ChatGLM-6B{response}")
return response
if __name__ == '__main__':
print(f"欢迎使用 ChatGLM-6B API可通过发送GET请求到http://127.0.0.1:{port}/chatglm来调用。")
app.run(host='0.0.0.0', port=port)

@ -0,0 +1,33 @@
{
"name": "koishi-plugin-glm-bot",
"description": "glm-testbot的稳定版支持自建后端",
"version": "1.0.3",
"main": "lib/index.js",
"typings": "lib/index.d.ts",
"files": [
"lib",
"dist"
],
"license": "MIT",
"scripts": {},
"keywords": [
"chatbot",
"koishi",
"plugin"
],
"peerDependencies": {
"koishi": "4.12.0"
},
"koishi": {
"description": "glm插件",
"browser": true,
"service": {
"optional": [
"puppeteer"
],
"implements": [
"glm"
]
}
}
}

@ -0,0 +1,5 @@
# koishi-plugin-glm-bot
[![npm](https://img.shields.io/npm/v/koishi-plugin-glm-bot?style=flat-square)](https://www.npmjs.com/package/koishi-plugin-glm-bot)
glm-bot的稳定版

@ -0,0 +1,38 @@
import { Context } from 'koishi'
import console from '@koishijs/plugin-console'
import * as sandbox from '@koishijs/plugin-sandbox'
import * as echo from '@koishijs/plugin-echo'
import onebot from '@koishijs/plugin-adapter-onebot'
import glm_bot from './glm-bot'
// 创建一个 Koishi 应用
const ctx = new Context({
port: 5140,
})
// 使用 OneBot 适配器的机器人
ctx.plugin(onebot, {
protocol: 'ws',
selfId: '3111720341',
endpoint: 'ws://127.0.0.1:32333',
})
// 启用上述插件
ctx.plugin(console) // 提供控制台
ctx.plugin(sandbox) // 提供调试沙盒
ctx.plugin(echo) // 提供回声指令
// 启用glm-bot
ctx.plugin(glm_bot,{
type: 'fastapi',
myServerUrl: 'http://wx.blockelite.cn:10269/chatglm',
publicUrl: 'http://127.0.0.1:10269/chat',
send_glmmtg_response: true,
prefix: '',
defaultText: '',
output: 'quote'
})
// 启动应用
ctx.start()

File diff suppressed because it is too large Load Diff

@ -0,0 +1,20 @@
{
"name": "glm-bot",
"version": "1.0.0",
"description": "glm-koishi机器人",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "",
"license": "MIT",
"dependencies": {
"@koishijs/plugin-adapter-onebot": "^5.6.6",
"@koishijs/plugin-console": "^5.6.1",
"@koishijs/plugin-echo": "^2.2.3",
"@koishijs/plugin-market": "^1.12.5",
"@koishijs/plugin-sandbox": "^3.0.0",
"koishi": "^4.12.4",
"koishi-plugin-glm-bot": "^1.1.2"
}
}
Loading…
Cancel
Save