ChatGLM-6B/api.py

38 lines
1009 B
Python
Raw Normal View History

import datetime
import json
import uvicorn
2023-03-19 14:03:02 +00:00
from fastapi import FastAPI, Request
from utils import load_mode_and_tokenizer
2023-03-19 14:03:02 +00:00
app = FastAPI()
2023-03-23 13:42:43 +00:00
2023-03-19 14:03:02 +00:00
@app.post("/")
async def create_item(request: Request):
2023-03-23 13:42:43 +00:00
global model, tokenizer
json_post_raw = await request.json()
json_post = json.dumps(json_post_raw)
json_post_list = json.loads(json_post)
prompt = json_post_list.get('prompt')
history = json_post_list.get('history')
2023-03-19 14:03:02 +00:00
response, history = model.chat(tokenizer, prompt, history=history)
now = datetime.datetime.now()
time = now.strftime("%Y-%m-%d %H:%M:%S")
answer = {
2023-03-23 13:42:43 +00:00
"response": response,
"history": history,
"status": 200,
"time": time
2023-03-19 14:03:02 +00:00
}
2023-03-23 13:42:43 +00:00
log = "[" + time + "] " + '", prompt:"' + prompt + '", response:"' + repr(response) + '"'
2023-03-19 14:03:02 +00:00
print(log)
return answer
2023-03-23 13:42:43 +00:00
2023-03-19 14:03:02 +00:00
if __name__ == '__main__':
2023-03-24 08:45:24 +00:00
uvicorn.run('api:app', host='0.0.0.0', port=8000, workers=1)
2023-03-19 14:03:02 +00:00
model, tokenizer = load_mode_and_tokenizer("THUDM/chatglm-6b", num_gpus=1)