From acbc2e178a1aae2fcd44030e93c0de5733f5a1c8 Mon Sep 17 00:00:00 2001 From: ZhangErling <45256786+ZhangErling@users.noreply.github.com> Date: Fri, 24 Mar 2023 15:33:42 +0800 Subject: [PATCH 001/110] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dapi=E6=9C=8D=E5=8A=A1?= =?UTF-8?q?=E5=90=AF=E5=8A=A8=E9=94=99=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 第31行【API】->【api】会因为大小写原因找不到API 第34行的下划线【chatglm_6b】->【chatglm-6b】会导致模型加载错误 --- api.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api.py b/api.py index 14a2d57..10f70b6 100644 --- a/api.py +++ b/api.py @@ -28,8 +28,8 @@ async def create_item(request: Request): if __name__ == '__main__': - uvicorn.run('API:app', host='0.0.0.0', port=8000, workers=1) + uvicorn.run('api:app', host='0.0.0.0', port=8000, workers=1) tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) -model = AutoModel.from_pretrained("THUDM/chatglm_6b", trust_remote_code=True).half().cuda() +model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() model.eval() From bf39dac0670d4cbb734aaa4664001f1863cecf14 Mon Sep 17 00:00:00 2001 From: holk-h Date: Fri, 24 Mar 2023 18:34:09 +0800 Subject: [PATCH 002/110] Support stream out interruption by using Ctrl+C --- cli_demo.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/cli_demo.py b/cli_demo.py index 8a043fb..fea47fc 100644 --- a/cli_demo.py +++ b/cli_demo.py @@ -1,14 +1,15 @@ import os import platform +import signal from transformers import AutoTokenizer, AutoModel -tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) -model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() +tokenizer = AutoTokenizer.from_pretrained("./model", trust_remote_code=True) +model = AutoModel.from_pretrained("./model", trust_remote_code=True).half().cuda() model = model.eval() os_name = platform.system() clear_command = 'cls' if os_name == 'Windows' else 'clear' - +stop_stream = False def build_prompt(history): prompt = "欢迎使用 ChatGLM-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序" @@ -17,9 +18,13 @@ def build_prompt(history): prompt += f"\n\nChatGLM-6B:{response}" return prompt +def signal_handler(signal, frame): + global stop_stream + stop_stream = True def main(): history = [] + global stop_stream print("欢迎使用 ChatGLM-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序") while True: query = input("\n用户:") @@ -32,10 +37,15 @@ def main(): continue count = 0 for response, history in model.stream_chat(tokenizer, query, history=history): - count += 1 - if count % 8 == 0: - os.system(clear_command) - print(build_prompt(history), flush=True) + if stop_stream: + stop_stream = False + break + else: + count += 1 + if count % 8 == 0: + os.system(clear_command) + print(build_prompt(history), flush=True) + signal.signal(signal.SIGINT,signal_handler) os.system(clear_command) print(build_prompt(history), flush=True) From 023c46a317537c9f2a6bdfc4916c77b24a00d868 Mon Sep 17 00:00:00 2001 From: littlepanda0716 Date: Sat, 25 Mar 2023 18:59:11 +0800 Subject: [PATCH 003/110] update api.py --- api.py | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/api.py b/api.py index 10f70b6..4ad1db6 100644 --- a/api.py +++ b/api.py @@ -1,6 +1,19 @@ from fastapi import FastAPI, Request from transformers import AutoTokenizer, AutoModel import uvicorn, json, datetime +import torch + +DEVICE = "cuda" +DEVICE_ID = "0" +CUDA_DEVICE = f"{DEVICE}:{DEVICE_ID}" if DEVICE_ID else DEVICE + + +def torch_gc(): + if torch.cuda.is_available(): + with torch.cuda.device(CUDA_DEVICE): + torch.cuda.empty_cache() + torch.cuda.ipc_collect() + app = FastAPI() @@ -13,7 +26,15 @@ async def create_item(request: Request): json_post_list = json.loads(json_post) prompt = json_post_list.get('prompt') history = json_post_list.get('history') - response, history = model.chat(tokenizer, prompt, history=history) + max_length = json_post_list.get('max_length') + top_p = json_post_list.get('top_p') + temperature = json_post_list.get('temperature') + response, history = model.chat(tokenizer, + prompt, + history=history, + max_length=max_length if max_length else 2048, + top_p=top_p if top_p else 0.7, + temperature=temperature if temperature else 0.95) now = datetime.datetime.now() time = now.strftime("%Y-%m-%d %H:%M:%S") answer = { @@ -24,12 +45,13 @@ async def create_item(request: Request): } log = "[" + time + "] " + '", prompt:"' + prompt + '", response:"' + repr(response) + '"' print(log) + torch_gc() return answer if __name__ == '__main__': - uvicorn.run('api:app', host='0.0.0.0', port=8000, workers=1) + uvicorn.run(app, host='0.0.0.0', port=8000, workers=1) tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) -model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() +model = AutoModel.from_pretrained("THUDM/chatglm_6b", trust_remote_code=True).half().cuda() model.eval() From 1c6002f3f18496217c7c0d147ef34c291c6da65d Mon Sep 17 00:00:00 2001 From: duzx16 Date: Tue, 28 Mar 2023 19:29:41 +0800 Subject: [PATCH 004/110] Fix typo --- cli_demo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cli_demo.py b/cli_demo.py index 8a043fb..0c6ed13 100644 --- a/cli_demo.py +++ b/cli_demo.py @@ -2,8 +2,8 @@ import os import platform from transformers import AutoTokenizer, AutoModel -tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) -model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() +tokenizer = AutoTokenizer.from_pretrained("/Users/zhengxiaodu/Downloads/chatglm-6b", trust_remote_code=True) +model = AutoModel.from_pretrained("/Users/zhengxiaodu/Downloads/chatglm-6b", trust_remote_code=True).half().to("mps") model = model.eval() os_name = platform.system() From c6790a09f05ba6a23b073021bfa6c3df177442fc Mon Sep 17 00:00:00 2001 From: duzx16 Date: Tue, 28 Mar 2023 19:45:17 +0800 Subject: [PATCH 005/110] Fix typos Move model instantiation --- api.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/api.py b/api.py index 4ad1db6..693c70a 100644 --- a/api.py +++ b/api.py @@ -50,8 +50,7 @@ async def create_item(request: Request): if __name__ == '__main__': + tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) + model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() + model.eval() uvicorn.run(app, host='0.0.0.0', port=8000, workers=1) - -tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) -model = AutoModel.from_pretrained("THUDM/chatglm_6b", trust_remote_code=True).half().cuda() -model.eval() From 6fc8141a9c9c57fe2e4897ff93491a9a61b0fb16 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Tue, 28 Mar 2023 19:48:21 +0800 Subject: [PATCH 006/110] Revert "Fix typo" This reverts commit 1c6002f3f18496217c7c0d147ef34c291c6da65d. --- cli_demo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cli_demo.py b/cli_demo.py index 0c6ed13..8a043fb 100644 --- a/cli_demo.py +++ b/cli_demo.py @@ -2,8 +2,8 @@ import os import platform from transformers import AutoTokenizer, AutoModel -tokenizer = AutoTokenizer.from_pretrained("/Users/zhengxiaodu/Downloads/chatglm-6b", trust_remote_code=True) -model = AutoModel.from_pretrained("/Users/zhengxiaodu/Downloads/chatglm-6b", trust_remote_code=True).half().to("mps") +tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) +model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() model = model.eval() os_name = platform.system() From 343e7bc7b6126718f10dc57ef3c911958c4b273b Mon Sep 17 00:00:00 2001 From: duzx16 Date: Tue, 28 Mar 2023 19:52:32 +0800 Subject: [PATCH 007/110] Fix model path --- cli_demo.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/cli_demo.py b/cli_demo.py index fea47fc..1c3ff2b 100644 --- a/cli_demo.py +++ b/cli_demo.py @@ -3,14 +3,15 @@ import platform import signal from transformers import AutoTokenizer, AutoModel -tokenizer = AutoTokenizer.from_pretrained("./model", trust_remote_code=True) -model = AutoModel.from_pretrained("./model", trust_remote_code=True).half().cuda() +tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) +model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() model = model.eval() os_name = platform.system() clear_command = 'cls' if os_name == 'Windows' else 'clear' stop_stream = False + def build_prompt(history): prompt = "欢迎使用 ChatGLM-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序" for query, response in history: @@ -18,10 +19,12 @@ def build_prompt(history): prompt += f"\n\nChatGLM-6B:{response}" return prompt + def signal_handler(signal, frame): global stop_stream stop_stream = True + def main(): history = [] global stop_stream @@ -45,7 +48,7 @@ def main(): if count % 8 == 0: os.system(clear_command) print(build_prompt(history), flush=True) - signal.signal(signal.SIGINT,signal_handler) + signal.signal(signal.SIGINT, signal_handler) os.system(clear_command) print(build_prompt(history), flush=True) From 7d7d87c4bd26d84485e09d284437fc0682deb45f Mon Sep 17 00:00:00 2001 From: duzx16 Date: Tue, 28 Mar 2023 21:18:59 +0800 Subject: [PATCH 008/110] Update README --- README.md | 2 +- README_en.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 075ce78..2f59ad6 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 ## 友情链接 以下是部分基于本仓库开发的开源项目: -* [ChatGLM-MNN](https://github.com/wangzhaode/ChatGLM-MNN): 一个基于 MNN 的 ChatGLM-6B C++ 推理实现,支持根据显存大小动态分配计算任务给 GPU 和 CPU +* [ChatGLM-MNN](https://github.com/wangzhaode/ChatGLM-MNN): 一个基于 MNN 的 ChatGLM-6B C++ 推理实现,支持根据显存大小自动分配计算任务给 GPU 和 CPU * [ChatGLM-Tuning](https://github.com/mymusise/ChatGLM-Tuning): 基于 LoRA 对 ChatGLM-6B 进行微调 如果你有其他好的项目的话,欢迎参照上述格式添加到README中并提出 [PR](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork). diff --git a/README_en.md b/README_en.md index 584e97f..7b84f9c 100644 --- a/README_en.md +++ b/README_en.md @@ -15,7 +15,7 @@ Try the [online demo](https://huggingface.co/spaces/ysharma/ChatGLM-6b_Gradio_St ## Projects The following are some open source projects developed based on this repository: -* [ChatGLM-MNN](https://github.com/wangzhaode/ChatGLM-MNN): An [MNN](https://github.com/alibaba/MNN)-based implementation of ChatGLM-6B C++ inference, which supports dynamic allocation of computing tasks to GPU and CPU according to the size of GPU memory +* [ChatGLM-MNN](https://github.com/wangzhaode/ChatGLM-MNN): An [MNN](https://github.com/alibaba/MNN)-based implementation of ChatGLM-6B C++ inference, which supports automatic allocation of computing tasks to GPU and CPU according to the size of GPU memory * [ChatGLM-Tuning](https://github.com/mymusise/ChatGLM-Tuning): Fine-tuning ChatGLM-6B based on LoRA If you have other good projects, please refer to the above format to add to README and propose [PR](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork). From fc55c075fe8a2c403917d2c7b04d3f0ef3b0e0f2 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Tue, 28 Mar 2023 21:35:52 +0800 Subject: [PATCH 009/110] Update README --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 2f59ad6..814277d 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,10 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 * [ChatGLM-MNN](https://github.com/wangzhaode/ChatGLM-MNN): 一个基于 MNN 的 ChatGLM-6B C++ 推理实现,支持根据显存大小自动分配计算任务给 GPU 和 CPU * [ChatGLM-Tuning](https://github.com/mymusise/ChatGLM-Tuning): 基于 LoRA 对 ChatGLM-6B 进行微调 -如果你有其他好的项目的话,欢迎参照上述格式添加到README中并提出 [PR](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork). +以下是部分针对本项目的教程/文档: +* [Windows部署文档](https://github.com/ZhangErling/ChatGLM-6B/blob/main/deployment_windows.md) + +如果你有其他好的项目/教程的话,欢迎参照上述格式添加到README中并提出 [PR](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork). ## 使用方式 From 323ce7c86530ea3be87f70eabcee27ee38ec29e9 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Wed, 29 Mar 2023 23:24:33 +0800 Subject: [PATCH 010/110] Add instructions for installing Git LFS --- README.md | 3 ++- README_en.md | 18 ++++++++++++++++-- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 814277d..ec90fb8 100644 --- a/README.md +++ b/README.md @@ -160,8 +160,9 @@ model = AutoModel.from_pretrained("THUDM/chatglm-6b-int4",trust_remote_code=True 如果遇到了报错 `Could not find module 'nvcuda.dll'` 或者 `RuntimeError: Unknown platform: darwin` (MacOS) 的话请参考这个[Issue](https://github.com/THUDM/ChatGLM-6B/issues/6#issuecomment-1470060041). ### Mac 上的 GPU 加速 -对于搭载了Apple Silicon的Mac(以及MacBook),可以使用 MPS 后端来在 GPU 上运行 ChatGLM-6B。首先需要参考 Apple 的 [官方说明](https://developer.apple.com/metal/pytorch) 安装 PyTorch-Nightly。然后将模型仓库 clone 到本地 +对于搭载了Apple Silicon的Mac(以及MacBook),可以使用 MPS 后端来在 GPU 上运行 ChatGLM-6B。首先需要参考 Apple 的 [官方说明](https://developer.apple.com/metal/pytorch) 安装 PyTorch-Nightly。然后将模型仓库 clone 到本地(需要先[安装Git LFS](https://docs.github.com/zh/repositories/working-with-files/managing-large-files/installing-git-large-file-storage)) ```shell +git lfs install git clone https://huggingface.co/THUDM/chatglm-6b ``` 将代码中的模型加载改为从本地加载,并使用 mps 后端 diff --git a/README_en.md b/README_en.md index 7b84f9c..b5b4b62 100644 --- a/README_en.md +++ b/README_en.md @@ -9,7 +9,7 @@ ChatGLM-6B uses technology similar to ChatGPT, optimized for Chinese QA and dial Try the [online demo](https://huggingface.co/spaces/ysharma/ChatGLM-6b_Gradio_Streaming) on Huggingface Spaces. ## Update -**[2023/03/23]** Add API deployment, thanks to [@LemonQu-GIT](https://github.com/LemonQu-GIT). Add embedding-quantized model [ChatGLM-6B-INT4-QE](https://huggingface.co/THUDM/chatglm-6b-int4-qe) +**[2023/03/23]** Add API deployment, thanks to [@LemonQu-GIT](https://github.com/LemonQu-GIT). Add embedding-quantized model [ChatGLM-6B-INT4-QE](https://huggingface.co/THUDM/chatglm-6b-int4-qe). Add support for GPU inference on Mac with Apple Silicon. **[2023/03/19]** Add streaming output function `stream_chat`, already applied in web and CLI demo. Fix Chinese punctuations in output. Add quantized model [ChatGLM-6B-INT4](https://huggingface.co/THUDM/chatglm-6b-int4). @@ -154,7 +154,21 @@ model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).fl model = AutoModel.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True).float() ``` -**For Mac users**: if your encounter the error `RuntimeError: Unknown platform: darwin`, please refer to this [Issue](https://github.com/THUDM/ChatGLM-6B/issues/6#issuecomment-1470060041). +If your encounter the error `Could not find module 'nvcuda.dll'` or `RuntimeError: Unknown platform: darwin`(MacOS), please refer to this [Issue](https://github.com/THUDM/ChatGLM-6B/issues/6#issuecomment-1470060041). + +### GPU Inference on Mac +For Macs (and MacBooks) with Apple Silicon, it is possible to use the MPS backend to run ChatGLM-6B on the GPU. First, you need to refer to Apple's [official instructions](https://developer.apple.com/metal/pytorch) to install PyTorch-Nightly. Then clone the model repository locally (you need to [install Git LFS](https://docs.github.com/zh/repositories/working-with-files/managing-large-files/installing-git-large-file-storage)) +```shell +git lfs install +git clone https://huggingface.co/THUDM/chatglm-6b +``` +Change the code to load the model from your local path, and use the mps backend: +```python +model = AutoModel.from_pretrained("your local path", trust_remote_code=True).half().to('mps') +``` +Then you can use GPU-accelerated model inference on Mac. + + ## ChatGLM-6B Examples From 32d625463ce53ac5986c1ea4974dda3d7687f1a0 Mon Sep 17 00:00:00 2001 From: Shaw Date: Thu, 30 Mar 2023 11:49:07 +0800 Subject: [PATCH 011/110] Update README.md --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index ec90fb8..0bcfc06 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,15 @@ # ChatGLM-6B +

+ 🌐 Blog • 🤗 HF Repo • 🐦 Twitter • 📃 [GLM@ACL 22] [GitHub] • 📃 [GLM-130B@ICLR 23] [GitHub]
+

+ ## 介绍 ChatGLM-6B 是一个开源的、支持中英双语的对话语言模型,基于 [General Language Model (GLM)](https://github.com/THUDM/GLM) 架构,具有 62 亿参数。结合模型量化技术,用户可以在消费级的显卡上进行本地部署(INT4 量化级别下最低只需 6GB 显存)。 ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进行了优化。经过约 1T 标识符的中英双语训练,辅以监督微调、反馈自助、人类反馈强化学习等技术的加持,62 亿参数的 ChatGLM-6B 已经能生成相当符合人类偏好的回答。更多信息请参考我们的[博客](https://chatglm.cn/blog)。 -不过,由于 ChatGLM-6B 的规模较小,目前已知其具有相当多的[**局限性**](#局限性),如事实性/数学逻辑错误,可能生成有害/有偏见内容,较弱的上下文能力,自我认知混乱,以及对英文指示生成与中文指示完全矛盾的内容。请大家在使用前了解这些问题,以免产生误解。更大的基于1300亿参数[GLM-130B](https://github.com/THUDM/GLM-130B)的ChatGLM正在内测开发中。 +不过,由于 ChatGLM-6B 的规模较小,目前已知其具有相当多的[**局限性**](#局限性),如事实性/数学逻辑错误,可能生成有害/有偏见内容,较弱的上下文能力,自我认知混乱,以及对英文指示生成与中文指示完全矛盾的内容。请大家在使用前了解这些问题,以免产生误解。更大的基于1300亿参数 [GLM-130B](https://github.com/THUDM/GLM-130B) 的ChatGLM正在内测开发中。 *Read this in [English](README_en.md).* From ee7fa65ebd1fcac780f30d8f25acb241e27304bc Mon Sep 17 00:00:00 2001 From: Shaw Date: Thu, 30 Mar 2023 11:49:53 +0800 Subject: [PATCH 012/110] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0bcfc06..2c6f083 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ ChatGLM-6B 是一个开源的、支持中英双语的对话语言模型,基于 [General Language Model (GLM)](https://github.com/THUDM/GLM) 架构,具有 62 亿参数。结合模型量化技术,用户可以在消费级的显卡上进行本地部署(INT4 量化级别下最低只需 6GB 显存)。 ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进行了优化。经过约 1T 标识符的中英双语训练,辅以监督微调、反馈自助、人类反馈强化学习等技术的加持,62 亿参数的 ChatGLM-6B 已经能生成相当符合人类偏好的回答。更多信息请参考我们的[博客](https://chatglm.cn/blog)。 -不过,由于 ChatGLM-6B 的规模较小,目前已知其具有相当多的[**局限性**](#局限性),如事实性/数学逻辑错误,可能生成有害/有偏见内容,较弱的上下文能力,自我认知混乱,以及对英文指示生成与中文指示完全矛盾的内容。请大家在使用前了解这些问题,以免产生误解。更大的基于1300亿参数 [GLM-130B](https://github.com/THUDM/GLM-130B) 的ChatGLM正在内测开发中。 +不过,由于 ChatGLM-6B 的规模较小,目前已知其具有相当多的[**局限性**](#局限性),如事实性/数学逻辑错误,可能生成有害/有偏见内容,较弱的上下文能力,自我认知混乱,以及对英文指示生成与中文指示完全矛盾的内容。请大家在使用前了解这些问题,以免产生误解。更大的基于1300亿参数 [GLM-130B](https://github.com/THUDM/GLM-130B) 的 ChatGLM 正在内测开发中。 *Read this in [English](README_en.md).* From 968a30672ab90b6d0f3b6be9a098db567b34e06f Mon Sep 17 00:00:00 2001 From: duzx16 Date: Fri, 31 Mar 2023 10:43:55 +0800 Subject: [PATCH 013/110] Add P-Tuning v2 --- README.md | 25 ++- ptuning/README.md | 70 +++++++ ptuning/arguments.py | 217 +++++++++++++++++++++ ptuning/evaluate.sh | 20 ++ ptuning/main.py | 389 +++++++++++++++++++++++++++++++++++++ ptuning/train.sh | 26 +++ ptuning/trainer_seq2seq.py | 245 +++++++++++++++++++++++ 7 files changed, 982 insertions(+), 10 deletions(-) create mode 100644 ptuning/README.md create mode 100644 ptuning/arguments.py create mode 100644 ptuning/evaluate.sh create mode 100644 ptuning/main.py create mode 100644 ptuning/train.sh create mode 100644 ptuning/trainer_seq2seq.py diff --git a/README.md b/README.md index ec90fb8..ff4383b 100644 --- a/README.md +++ b/README.md @@ -10,20 +10,12 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 *Read this in [English](README_en.md).* ## 更新信息 +**[2023/03/31]** 增加基于 P-Tuning-v2 的微调实现,最低只需 8GB 显存即可进行模型微调。详见[模型微调](ptuning/README.md)。 + **[2023/03/23]** 增加API部署(感谢 [@LemonQu-GIT](https://github.com/LemonQu-GIT))。增加Embedding量化模型[ChatGLM-6B-INT4-QE](https://huggingface.co/THUDM/chatglm-6b-int4-qe)。增加对基于Apple Silicon的Mac上GPU加速的支持。 **[2023/03/19]** 增加流式输出接口 `stream_chat`,已更新到网页版和命令行 Demo。修复输出中的中文标点。增加量化后的模型 [ChatGLM-6B-INT4](https://huggingface.co/THUDM/chatglm-6b-int4) -## 友情链接 -以下是部分基于本仓库开发的开源项目: -* [ChatGLM-MNN](https://github.com/wangzhaode/ChatGLM-MNN): 一个基于 MNN 的 ChatGLM-6B C++ 推理实现,支持根据显存大小自动分配计算任务给 GPU 和 CPU -* [ChatGLM-Tuning](https://github.com/mymusise/ChatGLM-Tuning): 基于 LoRA 对 ChatGLM-6B 进行微调 - -以下是部分针对本项目的教程/文档: -* [Windows部署文档](https://github.com/ZhangErling/ChatGLM-6B/blob/main/deployment_windows.md) - -如果你有其他好的项目/教程的话,欢迎参照上述格式添加到README中并提出 [PR](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork). - ## 使用方式 ### 硬件需求 @@ -171,6 +163,9 @@ model = AutoModel.from_pretrained("your local path", trust_remote_code=True).hal ``` 即可使用在 Mac 上使用 GPU 加速模型推理。 +## 模型微调 +详见 [ptuning/README.md](ptuning/README.md)。 + ## ChatGLM-6B 示例 以下是一些使用 `web_demo.py` 得到的示例截图。更多 ChatGLM-6B 的可能,等待你来探索发现! @@ -259,6 +254,16 @@ model = AutoModel.from_pretrained("your local path", trust_remote_code=True).hal 本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源,ChatGLM-6B 模型的权重的使用则需要遵循 [Model License](MODEL_LICENSE)。 +## 友情链接 +以下是部分基于本仓库开发的开源项目: +* [ChatGLM-MNN](https://github.com/wangzhaode/ChatGLM-MNN): 一个基于 MNN 的 ChatGLM-6B C++ 推理实现,支持根据显存大小自动分配计算任务给 GPU 和 CPU +* [ChatGLM-Tuning](https://github.com/mymusise/ChatGLM-Tuning): 基于 LoRA 对 ChatGLM-6B 进行微调 + +以下是部分针对本项目的教程/文档: +* [Windows部署文档](https://github.com/ZhangErling/ChatGLM-6B/blob/main/deployment_windows.md) + +如果你有其他好的项目/教程的话,欢迎参照上述格式添加到README中并提出 [PR](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork). + ## 引用 如果你觉得我们的工作有帮助的话,请考虑引用下列论文 diff --git a/ptuning/README.md b/ptuning/README.md new file mode 100644 index 0000000..1fb0ea6 --- /dev/null +++ b/ptuning/README.md @@ -0,0 +1,70 @@ +# ChatGLM-6B-PT +本仓库实现了对于 ChatGLM-6B 模型基于 [P-Tuning v2](https://github.com/THUDM/P-tuning-v2) 的微调。P-Tuning v2将需要微调的参数量减少到原来的0.1%,再通过模型量化、Gradient Checkpoint等方法,最低只需要 8GB 显存即可运行。 + +下面以 [ADGEN](https://aclanthology.org/D19-1321.pdf) (广告生成) 数据集为例介绍代码的使用方法。 + +## 软件依赖 +除 ChatGLM-6B 的依赖之外,还需要按照以下依赖 +``` +pip install rouge_chinese nltk jieba datasets +``` +## 使用方法 + +### 下载数据集 +ADGEN 数据集任务为根据输入(content)生成一段广告词(summary)。 + +```json +{ + "content": "类型#上衣*版型#宽松*版型#显瘦*图案#线条*衣样式#衬衫*衣袖型#泡泡袖*衣款式#抽绳", + "summary": "这件衬衫的款式非常的宽松,利落的线条可以很好的隐藏身材上的小缺点,穿在身上有着很好的显瘦效果。领口装饰了一个可爱的抽绳,漂亮的绳结展现出了十足的个性,配合时尚的泡泡袖型,尽显女性甜美可爱的气息。" +} +``` + +从 [Google Drive](https://drive.google.com/file/d/13_vf0xRTQsyneRKdD1bZIr93vBGOczrk/view?usp=sharing) 或者 [Tsinghua Cloud]() 下载处理好的 ADGEN数据集,将解压后的 `AdvertiseGen` 目录放到本目录下。 + +### 训练 +运行以下指令进行训练: +```shell +bash train.sh +``` +`train.sh` 中的`PRE_SEQ_LEN`和 `LR` 分别是 soft prompt 长度和训练的学习率,可以进行调节以取得最佳的效果。 + +### 推理 + +将`evaluate.sh`中的`CHECKPOINT`更改为训练时保存的checkpoint名称,运行以下指令进行模型推理和评测: +```shell +bash evaluate.sh +``` + +评测指标为中文 Rouge score 和 BLEU-4。生成的结果保存在 +`./output/adgen-chatglm-6b-pt-8-1e-2/generated_predictions.txt`。 + +### 例子 +#### 示例1 +* Input: 类型#上衣\*材质#牛仔布\*颜色#白色\*风格#简约\*图案#刺绣\*衣样式#外套\*衣款式#破洞 +* Label: 简约而不简单的牛仔外套,白色的衣身十分百搭。衣身多处有做旧破洞设计,打破单调乏味,增加一丝造型看点。衣身后背处有趣味刺绣装饰,丰富层次感,彰显别样时尚。 +* 微调前Output: 这件上衣的材质是牛仔布,颜色是白色,风格是简约,图案是刺绣,衣样式是外套,衣款式是破洞。 +* 微调后Output: 这是一款简约的牛仔外套,破洞设计,将牛仔布破洞,带来一种随意与个性。破洞的牛仔外套,展现出时尚气息,带来一种休闲感。同时,刺绣图案,让整件外套更加立体。 + +#### 示例2 + +* Input: 类型#裙\*版型#显瘦\*风格#文艺\*风格#简约\*图案#印花\*图案#撞色\*裙下摆#压褶\*裙长#连衣裙\*裙领型#圆领 +* Label: 文艺个性的印花连衣裙,藏青色底蕴,低调又大气,撞色太阳花分布整个裙身,绚丽而美好,带来时尚减龄的气质。基础款的舒适圆领,简约不失大方,勾勒精致脸庞。领后是一粒包布扣固定,穿脱十分方便。前片立体的打褶设计,搭配后片压褶的做工,增添层次和空间感,显瘦又有型。 + +* 微调前Output: 类型#裙*版型#显瘦*风格#文艺*风格#简约*图案#印花*图案#撞色*裙下摆#压褶*裙长#连衣裙*裙领型#圆领 1\. 连衣裙:简约风格,裙长为膝盖以上,裙领型为圆领。2\. 裙下摆:压褶设计,使裙摆呈现出流畅的褶皱效果。3\. 裙领型:裙领型为圆领,使穿上连衣裙后更加有型。4\. 版型:采用显瘦设计,让连衣裙看起来更加苗条。5\. 风格:文艺风格,让连衣裙更加有内涵和品味。6\. 图案:印花设计,在连衣裙上印有独特的图案。7\. 撞色:采用撞色设计,让连衣裙在色彩上更加鲜明、富有层次感。 +* 微调后Output: 这是一款文艺范的连衣裙,以印花为元素,采用简约的印花,既能够突出文艺气质,又能够展现简约风。在印花的同时又有领子和裙摆的压褶设计,更加凸显文艺气质。简约而不会过于单调,搭配出街,穿着十分舒适。 + +## 使用自己的数据集 +修改 `train.sh` 和 `evaluate.sh` 中的 `train_file`、`validation_file`和`test_file`为你自己的json格式数据集路径,并将`prompt_column`和`response_column`改为json文件中输入文本和输出文本对应的key。 + +## 引用 + +``` +@inproceedings{liu2022p, + title={P-tuning: Prompt tuning can be comparable to fine-tuning across scales and tasks}, + author={Liu, Xiao and Ji, Kaixuan and Fu, Yicheng and Tam, Weng and Du, Zhengxiao and Yang, Zhilin and Tang, Jie}, + booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)}, + pages={61--68}, + year={2022} +} +``` diff --git a/ptuning/arguments.py b/ptuning/arguments.py new file mode 100644 index 0000000..1c61f97 --- /dev/null +++ b/ptuning/arguments.py @@ -0,0 +1,217 @@ +from dataclasses import dataclass, field +from typing import Optional + + +@dataclass +class ModelArguments: + """ + Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. + """ + + model_name_or_path: str = field( + metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} + ) + config_name: Optional[str] = field( + default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} + ) + tokenizer_name: Optional[str] = field( + default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} + ) + cache_dir: Optional[str] = field( + default=None, + metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, + ) + use_fast_tokenizer: bool = field( + default=True, + metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, + ) + model_revision: str = field( + default="main", + metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, + ) + use_auth_token: bool = field( + default=False, + metadata={ + "help": ( + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " + "with private models)." + ) + }, + ) + resize_position_embeddings: Optional[bool] = field( + default=None, + metadata={ + "help": ( + "Whether to automatically resize the position embeddings if `max_source_length` exceeds " + "the model's position embeddings." + ) + }, + ) + quantization_bit: Optional[int] = field( + default=None + ) + pre_seq_len: Optional[int] = field( + default=None + ) + prefix_projection: bool = field( + default=False + ) + + +@dataclass +class DataTrainingArguments: + """ + Arguments pertaining to what data we are going to input our model for training and eval. + """ + + lang: Optional[str] = field(default=None, metadata={"help": "Language id for summarization."}) + + dataset_name: Optional[str] = field( + default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} + ) + dataset_config_name: Optional[str] = field( + default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} + ) + prompt_column: Optional[str] = field( + default=None, + metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."}, + ) + response_column: Optional[str] = field( + default=None, + metadata={"help": "The name of the column in the datasets containing the summaries (for summarization)."}, + ) + train_file: Optional[str] = field( + default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."} + ) + validation_file: Optional[str] = field( + default=None, + metadata={ + "help": ( + "An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." + ) + }, + ) + test_file: Optional[str] = field( + default=None, + metadata={ + "help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." + }, + ) + overwrite_cache: bool = field( + default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} + ) + preprocessing_num_workers: Optional[int] = field( + default=None, + metadata={"help": "The number of processes to use for the preprocessing."}, + ) + max_source_length: Optional[int] = field( + default=1024, + metadata={ + "help": ( + "The maximum total input sequence length after tokenization. Sequences longer " + "than this will be truncated, sequences shorter will be padded." + ) + }, + ) + max_target_length: Optional[int] = field( + default=128, + metadata={ + "help": ( + "The maximum total sequence length for target text after tokenization. Sequences longer " + "than this will be truncated, sequences shorter will be padded." + ) + }, + ) + val_max_target_length: Optional[int] = field( + default=None, + metadata={ + "help": ( + "The maximum total sequence length for validation target text after tokenization. Sequences longer " + "than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`." + "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " + "during ``evaluate`` and ``predict``." + ) + }, + ) + pad_to_max_length: bool = field( + default=False, + metadata={ + "help": ( + "Whether to pad all samples to model maximum sentence length. " + "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " + "efficient on GPU but very bad for TPU." + ) + }, + ) + max_train_samples: Optional[int] = field( + default=None, + metadata={ + "help": ( + "For debugging purposes or quicker training, truncate the number of training examples to this " + "value if set." + ) + }, + ) + max_eval_samples: Optional[int] = field( + default=None, + metadata={ + "help": ( + "For debugging purposes or quicker training, truncate the number of evaluation examples to this " + "value if set." + ) + }, + ) + max_predict_samples: Optional[int] = field( + default=None, + metadata={ + "help": ( + "For debugging purposes or quicker training, truncate the number of prediction examples to this " + "value if set." + ) + }, + ) + num_beams: Optional[int] = field( + default=None, + metadata={ + "help": ( + "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " + "which is used during ``evaluate`` and ``predict``." + ) + }, + ) + ignore_pad_token_for_loss: bool = field( + default=True, + metadata={ + "help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not." + }, + ) + source_prefix: Optional[str] = field( + default="", metadata={"help": "A prefix to add before every source text (useful for T5 models)."} + ) + + forced_bos_token: Optional[str] = field( + default=None, + metadata={ + "help": ( + "The token to force as the first generated token after the decoder_start_token_id." + "Useful for multilingual models like mBART where the first generated token" + "needs to be the target language token (Usually it is the target language token)" + ) + }, + ) + + + + def __post_init__(self): + if self.dataset_name is None and self.train_file is None and self.validation_file is None: + raise ValueError("Need either a dataset name or a training/validation file.") + else: + if self.train_file is not None: + extension = self.train_file.split(".")[-1] + assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." + if self.validation_file is not None: + extension = self.validation_file.split(".")[-1] + assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." + if self.val_max_target_length is None: + self.val_max_target_length = self.max_target_length + diff --git a/ptuning/evaluate.sh b/ptuning/evaluate.sh new file mode 100644 index 0000000..db2a8c1 --- /dev/null +++ b/ptuning/evaluate.sh @@ -0,0 +1,20 @@ +PRE_SEQ_LEN=8 +CHECKPOINT=adgen-chatglm-6b-pt-8-1e-2 +STEP=3000 + +CUDA_VISIBLE_DEVICES=0 python3 main.py \ + --do_predict \ + --test_file AdvertiseGen/dev.json \ + --overwrite_cache \ + --prompt_column content \ + --response_column summary \ + --model_name_or_path ./output/$CHECKPOINT/checkpoint-$STEP \ + --output_dir ./output/$CHECKPOINT \ + --overwrite_output_dir \ + --max_source_length 64 \ + --max_target_length 64 \ + --per_device_eval_batch_size 1 \ + --predict_with_generate \ + --max_predict_samples 10 \ + --pre_seq_len $PRE_SEQ_LEN \ + --quantization_bit 4 diff --git a/ptuning/main.py b/ptuning/main.py new file mode 100644 index 0000000..d82fccc --- /dev/null +++ b/ptuning/main.py @@ -0,0 +1,389 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Fine-tuning the library models for sequence to sequence. +""" +# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments. + +import logging +import os +import sys +import json + +import numpy as np +from datasets import load_dataset +import jieba +from rouge_chinese import Rouge +from nltk.translate.bleu_score import sentence_bleu + +import transformers +from transformers import ( + AutoConfig, + AutoModel, + AutoTokenizer, + AutoTokenizer, + DataCollatorForSeq2Seq, + HfArgumentParser, + Seq2SeqTrainingArguments, + set_seed, +) +from trainer_seq2seq import Seq2SeqTrainer + +from arguments import ModelArguments, DataTrainingArguments + +logger = logging.getLogger(__name__) + +def main(): + + parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) + if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): + # If we pass only one argument to the script and it's the path to a json file, + # let's parse it to get our arguments. + model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) + else: + model_args, data_args, training_args = parser.parse_args_into_dataclasses() + + # Setup logging + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + handlers=[logging.StreamHandler(sys.stdout)], + ) + + if training_args.should_log: + # The default of training_args.log_level is passive, so we set log level at info here to have that default. + transformers.utils.logging.set_verbosity_info() + + log_level = training_args.get_process_log_level() + logger.setLevel(log_level) + # datasets.utils.logging.set_verbosity(log_level) + transformers.utils.logging.set_verbosity(log_level) + transformers.utils.logging.enable_default_handler() + transformers.utils.logging.enable_explicit_format() + + # Log on each process the small summary: + logger.warning( + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" + ) + logger.info(f"Training/evaluation parameters {training_args}") + + # Set seed before initializing model. + set_seed(training_args.seed) + + # Load dataset + data_files = {} + if data_args.train_file is not None: + data_files["train"] = data_args.train_file + extension = data_args.train_file.split(".")[-1] + if data_args.validation_file is not None: + data_files["validation"] = data_args.validation_file + extension = data_args.validation_file.split(".")[-1] + if data_args.test_file is not None: + data_files["test"] = data_args.test_file + extension = data_args.test_file.split(".")[-1] + + raw_datasets = load_dataset( + extension, + data_files=data_files, + cache_dir=model_args.cache_dir, + use_auth_token=True if model_args.use_auth_token else None, + ) + + # Load pretrained model and tokenizer + config = AutoConfig.from_pretrained(model_args.model_name_or_path, trust_remote_code=True) + config.pre_seq_len = model_args.pre_seq_len + config.prefix_projection = model_args.prefix_projection + + tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, trust_remote_code=True) + + model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, revision=True, trust_remote_code=True) + + model = model.half() + if model_args.quantization_bit is not None: + print(f"Quantized to {model_args.quantization_bit} bit") + model = model.quantize(model_args.quantization_bit) + model.transformer.prefix_encoder.float() + + prefix = data_args.source_prefix if data_args.source_prefix is not None else "" + + # Preprocessing the datasets. + # We need to tokenize inputs and targets. + if training_args.do_train: + column_names = raw_datasets["train"].column_names + elif training_args.do_eval: + column_names = raw_datasets["validation"].column_names + elif training_args.do_predict: + column_names = raw_datasets["test"].column_names + else: + logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") + return + + # Get the column names for input/target. + prompt_column = data_args.prompt_column + response_column = data_args.response_column + + # Temporarily set max_target_length for training. + max_target_length = data_args.max_target_length + + def preprocess_function_eval(examples): + inputs, targets = [], [] + for i in range(len(examples[prompt_column])): + if examples[prompt_column][i] and examples[response_column][i]: + inputs.append(examples[prompt_column][i]) + targets.append(examples[response_column][i]) + + inputs = [prefix + inp for inp in inputs] + model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, truncation=True) + labels = tokenizer(text_target=targets, max_length=max_target_length, truncation=True) + + if data_args.ignore_pad_token_for_loss: + labels["input_ids"] = [ + [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] + ] + model_inputs["labels"] = labels["input_ids"] + + return model_inputs + + def preprocess_function_train(examples): + max_seq_length = data_args.max_source_length + data_args.max_target_length + + model_inputs = { + "input_ids": [], + "labels": [], + } + for i in range(len(examples[prompt_column])): + if examples[prompt_column][i] and examples[response_column][i]: + prompt, answer = examples[prompt_column][i], examples[response_column][i] + prompt = prefix + prompt + a_ids = tokenizer.encode(text=prompt, add_special_tokens=False) + b_ids = tokenizer.encode(text=answer, add_special_tokens=False) + + if len(a_ids) > data_args.max_source_length - 1: + a_ids = a_ids[: data_args.max_source_length - 1] + + if len(b_ids) > data_args.max_target_length - 2: + b_ids = b_ids[: data_args.max_target_length - 2] + + input_ids = a_ids + [150001, 150004] + b_ids + [150005] + + context_length = input_ids.index(150004) + mask_position = context_length - 1 + labels = [-100] * context_length + input_ids[mask_position+1:] + + pad_len = max_seq_length - len(input_ids) + input_ids = input_ids + [tokenizer.pad_token_id] * pad_len + labels = labels + [tokenizer.pad_token_id] * pad_len + + model_inputs["input_ids"].append(input_ids) + model_inputs["labels"].append(labels) + + return model_inputs + + def print_dataset_example(example): + print("input_ids",example["input_ids"]) + print("inputs", tokenizer.decode(example["input_ids"])) + print("label_ids", example["labels"]) + print("labels", tokenizer.decode(example["labels"])) + + if training_args.do_train: + if "train" not in raw_datasets: + raise ValueError("--do_train requires a train dataset") + train_dataset = raw_datasets["train"] + if data_args.max_train_samples is not None: + max_train_samples = min(len(train_dataset), data_args.max_train_samples) + train_dataset = train_dataset.select(range(max_train_samples)) + with training_args.main_process_first(desc="train dataset map pre-processing"): + train_dataset = train_dataset.map( + preprocess_function_train, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + desc="Running tokenizer on train dataset", + ) + print_dataset_example(train_dataset[0]) + + if training_args.do_eval: + max_target_length = data_args.val_max_target_length + if "validation" not in raw_datasets: + raise ValueError("--do_eval requires a validation dataset") + eval_dataset = raw_datasets["validation"] + if data_args.max_eval_samples is not None: + max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) + eval_dataset = eval_dataset.select(range(max_eval_samples)) + with training_args.main_process_first(desc="validation dataset map pre-processing"): + eval_dataset = eval_dataset.map( + preprocess_function_eval, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + desc="Running tokenizer on validation dataset", + ) + print_dataset_example(eval_dataset[0]) + + if training_args.do_predict: + max_target_length = data_args.val_max_target_length + if "test" not in raw_datasets: + raise ValueError("--do_predict requires a test dataset") + predict_dataset = raw_datasets["test"] + if data_args.max_predict_samples is not None: + max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) + predict_dataset = predict_dataset.select(range(max_predict_samples)) + with training_args.main_process_first(desc="prediction dataset map pre-processing"): + predict_dataset = predict_dataset.map( + preprocess_function_eval, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + desc="Running tokenizer on prediction dataset", + ) + print_dataset_example(predict_dataset[0]) + + # Data collator + label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id + data_collator = DataCollatorForSeq2Seq( + tokenizer, + model=model, + label_pad_token_id=label_pad_token_id, + pad_to_multiple_of=None, + ) + + # Metric + def compute_metrics(eval_preds): + preds, labels = eval_preds + if isinstance(preds, tuple): + preds = preds[0] + decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) + if data_args.ignore_pad_token_for_loss: + # Replace -100 in the labels as we can't decode them. + labels = np.where(labels != -100, labels, tokenizer.pad_token_id) + decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) + + score_dict = { + "rouge-1": [], + "rouge-2": [], + "rouge-l": [], + "bleu-4": [] + } + for pred, label in zip(decoded_preds, decoded_labels): + hypothesis = list(jieba.cut(pred)) + reference = list(jieba.cut(label)) + rouge = Rouge() + scores = rouge.get_scores(' '.join(hypothesis) , ' '.join(reference)) + result = scores[0] + + for k, v in result.items(): + score_dict[k].append(round(v["f"] * 100, 4)) + bleu_score = sentence_bleu([list(label)], list(pred)) + score_dict["bleu-4"].append(round(bleu_score * 100, 4)) + + for k, v in score_dict.items(): + score_dict[k] = float(np.mean(v)) + return score_dict + + # Override the decoding parameters of Seq2SeqTrainer + training_args.generation_max_length = ( + training_args.generation_max_length + if training_args.generation_max_length is not None + else data_args.val_max_target_length + ) + training_args.generation_num_beams = ( + data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams + ) + # Initialize our Trainer + trainer = Seq2SeqTrainer( + model=model, + args=training_args, + train_dataset=train_dataset if training_args.do_train else None, + eval_dataset=eval_dataset if training_args.do_eval else None, + tokenizer=tokenizer, + data_collator=data_collator, + compute_metrics=compute_metrics if training_args.predict_with_generate else None, + ) + + # Training + if training_args.do_train: + checkpoint = None + if training_args.resume_from_checkpoint is not None: + checkpoint = training_args.resume_from_checkpoint + # elif last_checkpoint is not None: + # checkpoint = last_checkpoint + model.gradient_checkpointing_enable() + model.enable_input_require_grads() + train_result = trainer.train(resume_from_checkpoint=checkpoint) + # trainer.save_model() # Saves the tokenizer too for easy upload + + metrics = train_result.metrics + max_train_samples = ( + data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) + ) + metrics["train_samples"] = min(max_train_samples, len(train_dataset)) + + trainer.log_metrics("train", metrics) + trainer.save_metrics("train", metrics) + trainer.save_state() + + # Evaluation + results = {} + if training_args.do_eval: + logger.info("*** Evaluate ***") + metrics = trainer.evaluate(metric_key_prefix="eval", do_sample=True, top_p=0.7, max_length=512, temperature=0.95) + max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) + metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) + + trainer.log_metrics("eval", metrics) + trainer.save_metrics("eval", metrics) + + if training_args.do_predict: + logger.info("*** Predict ***") + + predict_results = trainer.predict(predict_dataset, metric_key_prefix="predict", max_length=512, do_sample=True, top_p=0.7, temperature=0.95) + metrics = predict_results.metrics + max_predict_samples = ( + data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) + ) + metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) + + trainer.log_metrics("predict", metrics) + trainer.save_metrics("predict", metrics) + + if trainer.is_world_process_zero(): + if training_args.predict_with_generate: + predictions = tokenizer.batch_decode( + predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True + ) + predictions = [pred.strip() for pred in predictions] + labels = tokenizer.batch_decode( + predict_results.label_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True + ) + labels = [label.strip() for label in labels] + output_prediction_file = os.path.join(training_args.output_dir, "generated_predictions.txt") + with open(output_prediction_file, "w") as writer: + for p, l in zip(predictions, labels): + writer.write(json.dumps({"labels": l, "predict": p}, ensure_ascii=False)) + return results + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ptuning/train.sh b/ptuning/train.sh new file mode 100644 index 0000000..6988596 --- /dev/null +++ b/ptuning/train.sh @@ -0,0 +1,26 @@ +PRE_SEQ_LEN=8 +LR=1e-2 + +CUDA_VISIBLE_DEVICES=0 python3 main.py \ + --do_train \ + --train_file AdvertiseGen/train.json \ + --validation_file AdvertiseGen/dev.json \ + --prompt_column content \ + --response_column summary \ + --overwrite_cache \ + --model_name_or_path /mnt/vepfs/workspace/zxdu/chatglm_6b \ + --output_dir output/adgen-chatglm-6b-pt-$PRE_SEQ_LEN-$LR-dev \ + --overwrite_output_dir \ + --max_source_length 64 \ + --max_target_length 64 \ + --per_device_train_batch_size 8 \ + --per_device_eval_batch_size 1 \ + --gradient_accumulation_steps 2 \ + --predict_with_generate \ + --max_steps 3000 \ + --logging_steps 10 \ + --save_steps 1000 \ + --learning_rate $LR \ + --pre_seq_len $PRE_SEQ_LEN \ + --quantization_bit 4 + diff --git a/ptuning/trainer_seq2seq.py b/ptuning/trainer_seq2seq.py new file mode 100644 index 0000000..0087786 --- /dev/null +++ b/ptuning/trainer_seq2seq.py @@ -0,0 +1,245 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +from torch import nn +from torch.utils.data import Dataset + +from transformers.deepspeed import is_deepspeed_zero3_enabled +from transformers.trainer import Trainer +from transformers.trainer_utils import PredictionOutput +from transformers.utils import logging + + +logger = logging.get_logger(__name__) + + +class Seq2SeqTrainer(Trainer): + def evaluate( + self, + eval_dataset: Optional[Dataset] = None, + ignore_keys: Optional[List[str]] = None, + metric_key_prefix: str = "eval", + **gen_kwargs + ) -> Dict[str, float]: + """ + Run evaluation and returns metrics. + + The calling script will be responsible for providing a method to compute metrics, as they are task-dependent + (pass it to the init `compute_metrics` argument). + + You can also subclass and override this method to inject custom behavior. + + Args: + eval_dataset (`Dataset`, *optional*): + Pass a dataset if you wish to override `self.eval_dataset`. If it is an [`~datasets.Dataset`], columns + not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__` + method. + ignore_keys (`List[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + metric_key_prefix (`str`, *optional*, defaults to `"eval"`): + An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named + "eval_bleu" if the prefix is `"eval"` (default) + max_length (`int`, *optional*): + The maximum target length to use when predicting with the generate method. + num_beams (`int`, *optional*): + Number of beams for beam search that will be used when predicting with the generate method. 1 means no + beam search. + gen_kwargs: + Additional `generate` specific kwargs. + + Returns: + A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The + dictionary also contains the epoch number which comes from the training state. + """ + + gen_kwargs = gen_kwargs.copy() + if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None: + gen_kwargs["max_length"] = self.args.generation_max_length + gen_kwargs["num_beams"] = ( + gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.args.generation_num_beams + ) + self._gen_kwargs = gen_kwargs + + return super().evaluate(eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix) + + def predict( + self, + test_dataset: Dataset, + ignore_keys: Optional[List[str]] = None, + metric_key_prefix: str = "test", + **gen_kwargs + ) -> PredictionOutput: + """ + Run prediction and returns predictions and potential metrics. + + Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method + will also return metrics, like in `evaluate()`. + + Args: + test_dataset (`Dataset`): + Dataset to run the predictions on. If it is a [`~datasets.Dataset`], columns not accepted by the + `model.forward()` method are automatically removed. Has to implement the method `__len__` + ignore_keys (`List[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + metric_key_prefix (`str`, *optional*, defaults to `"eval"`): + An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named + "eval_bleu" if the prefix is `"eval"` (default) + max_length (`int`, *optional*): + The maximum target length to use when predicting with the generate method. + num_beams (`int`, *optional*): + Number of beams for beam search that will be used when predicting with the generate method. 1 means no + beam search. + gen_kwargs: + Additional `generate` specific kwargs. + + + + If your predictions or labels have different sequence lengths (for instance because you're doing dynamic + padding in a token classification task) the predictions will be padded (on the right) to allow for + concatenation into one array. The padding index is -100. + + + + Returns: *NamedTuple* A namedtuple with the following keys: + + - predictions (`np.ndarray`): The predictions on `test_dataset`. + - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some). + - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained + labels). + """ + + gen_kwargs = gen_kwargs.copy() + if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None: + gen_kwargs["max_length"] = self.args.generation_max_length + gen_kwargs["num_beams"] = ( + gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.args.generation_num_beams + ) + self._gen_kwargs = gen_kwargs + + + return super().predict(test_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix) + + def prediction_step( + self, + model: nn.Module, + inputs: Dict[str, Union[torch.Tensor, Any]], + prediction_loss_only: bool, + ignore_keys: Optional[List[str]] = None, + ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: + """ + Perform an evaluation step on `model` using `inputs`. + + Subclass and override to inject custom behavior. + + Args: + model (`nn.Module`): + The model to evaluate. + inputs (`Dict[str, Union[torch.Tensor, Any]]`): + The inputs and targets of the model. + + The dictionary will be unpacked before being fed to the model. Most models expect the targets under the + argument `labels`. Check your model's documentation for all accepted arguments. + prediction_loss_only (`bool`): + Whether or not to return the loss only. + + Return: + Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and + labels (each being optional). + """ + + if not self.args.predict_with_generate or prediction_loss_only: + return super().prediction_step( + model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys + ) + + has_labels = "labels" in inputs + inputs = self._prepare_inputs(inputs) + + # XXX: adapt synced_gpus for fairscale as well + gen_kwargs = self._gen_kwargs.copy() + if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None: + gen_kwargs["max_length"] = self.model.config.max_length + gen_kwargs["num_beams"] = ( + gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.model.config.num_beams + ) + default_synced_gpus = True if is_deepspeed_zero3_enabled() else False + gen_kwargs["synced_gpus"] = ( + gen_kwargs["synced_gpus"] if gen_kwargs.get("synced_gpus") is not None else default_synced_gpus + ) + + if "attention_mask" in inputs: + gen_kwargs["attention_mask"] = inputs.get("attention_mask", None) + if "global_attention_mask" in inputs: + gen_kwargs["global_attention_mask"] = inputs.get("global_attention_mask", None) + + # prepare generation inputs + # some encoder-decoder models can have varying encoder's and thus + # varying model input names + if hasattr(self.model, "encoder") and self.model.encoder.main_input_name != self.model.main_input_name: + generation_inputs = inputs[self.model.encoder.main_input_name] + else: + generation_inputs = inputs[self.model.main_input_name] + + gen_kwargs["input_ids"] = generation_inputs + generated_tokens = self.model.generate(**gen_kwargs) + generated_tokens = generated_tokens[:, generation_inputs.size()[-1]:] + + # in case the batch is shorter than max length, the output should be padded + if gen_kwargs.get("max_length") is not None and generated_tokens.shape[-1] < gen_kwargs["max_length"]: + generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_length"]) + elif gen_kwargs.get("max_new_tokens") is not None and generated_tokens.shape[-1] < ( + gen_kwargs["max_new_tokens"] + 1 + ): + generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_new_tokens"] + 1) + + loss = None + + if self.args.prediction_loss_only: + return (loss, None, None) + + if has_labels: + labels = inputs["labels"] + if gen_kwargs.get("max_length") is not None and labels.shape[-1] < gen_kwargs["max_length"]: + labels = self._pad_tensors_to_max_len(labels, gen_kwargs["max_length"]) + elif gen_kwargs.get("max_new_tokens") is not None and labels.shape[-1] < ( + gen_kwargs["max_new_tokens"] + 1 + ): + labels = self._pad_tensors_to_max_len(labels, (gen_kwargs["max_new_tokens"] + 1)) + else: + labels = None + + return (loss, generated_tokens, labels) + + def _pad_tensors_to_max_len(self, tensor, max_length): + if self.tokenizer is not None and hasattr(self.tokenizer, "pad_token_id"): + # If PAD token is not defined at least EOS token has to be defined + pad_token_id = ( + self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else self.tokenizer.eos_token_id + ) + else: + if self.model.config.pad_token_id is not None: + pad_token_id = self.model.config.pad_token_id + else: + raise ValueError("Pad_token_id must be set in the configuration of the model, in order to pad tensors") + + padded_tensor = pad_token_id * torch.ones( + (tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device + ) + padded_tensor[:, : tensor.shape[-1]] = tensor + return padded_tensor From a100769153b8718288d8825c28e003e1af87077c Mon Sep 17 00:00:00 2001 From: duzx16 Date: Fri, 31 Mar 2023 10:46:44 +0800 Subject: [PATCH 014/110] Add P-Tuning v2 --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3d4e23b..6573e8f 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ ## 介绍 ChatGLM-6B 是一个开源的、支持中英双语的对话语言模型,基于 [General Language Model (GLM)](https://github.com/THUDM/GLM) 架构,具有 62 亿参数。结合模型量化技术,用户可以在消费级的显卡上进行本地部署(INT4 量化级别下最低只需 6GB 显存)。 -ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进行了优化。经过约 1T 标识符的中英双语训练,辅以监督微调、反馈自助、人类反馈强化学习等技术的加持,62 亿参数的 ChatGLM-6B 已经能生成相当符合人类偏好的回答。更多信息请参考我们的[博客](https://chatglm.cn/blog)。 +ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进行了优化。经过约 1T 标识符的中英双语训练,辅以监督微调、反馈自助、人类反馈强化学习等技术的加持,62 亿参数的 ChatGLM-6B 已经能生成相当符合人类偏好的回答。同时实现了基于P-Tuning v2的[模型微调](ptuning/README.md)(INT4量化级别下最低只需 8GB 显存)。更多信息请参考我们的[博客](https://chatglm.cn/blog)。 不过,由于 ChatGLM-6B 的规模较小,目前已知其具有相当多的[**局限性**](#局限性),如事实性/数学逻辑错误,可能生成有害/有偏见内容,较弱的上下文能力,自我认知混乱,以及对英文指示生成与中文指示完全矛盾的内容。请大家在使用前了解这些问题,以免产生误解。更大的基于1300亿参数 [GLM-130B](https://github.com/THUDM/GLM-130B) 的 ChatGLM 正在内测开发中。 From 77da04683969f3a76a03c8d92ae7ebc76732f864 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Fri, 31 Mar 2023 10:49:21 +0800 Subject: [PATCH 015/110] Update model path --- ptuning/train.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ptuning/train.sh b/ptuning/train.sh index 6988596..7f9ff6f 100644 --- a/ptuning/train.sh +++ b/ptuning/train.sh @@ -8,7 +8,7 @@ CUDA_VISIBLE_DEVICES=0 python3 main.py \ --prompt_column content \ --response_column summary \ --overwrite_cache \ - --model_name_or_path /mnt/vepfs/workspace/zxdu/chatglm_6b \ + --model_name_or_path THUDM/chatglm-6b \ --output_dir output/adgen-chatglm-6b-pt-$PRE_SEQ_LEN-$LR-dev \ --overwrite_output_dir \ --max_source_length 64 \ From 971a6fbb20d650563e5096bc22351c08e6fabbed Mon Sep 17 00:00:00 2001 From: duzx16 Date: Fri, 31 Mar 2023 11:27:29 +0800 Subject: [PATCH 016/110] Updaet ADGEN link --- ptuning/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ptuning/README.md b/ptuning/README.md index 1fb0ea6..f3f365a 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -20,7 +20,7 @@ ADGEN 数据集任务为根据输入(content)生成一段广告词(summary } ``` -从 [Google Drive](https://drive.google.com/file/d/13_vf0xRTQsyneRKdD1bZIr93vBGOczrk/view?usp=sharing) 或者 [Tsinghua Cloud]() 下载处理好的 ADGEN数据集,将解压后的 `AdvertiseGen` 目录放到本目录下。 +从 [Google Drive](https://drive.google.com/file/d/13_vf0xRTQsyneRKdD1bZIr93vBGOczrk/view?usp=sharing) 或者 [Tsinghua Cloud](https://cloud.tsinghua.edu.cn/f/b3f119a008264b1cabd1/?dl=1) 下载处理好的 ADGEN 数据集,将解压后的 `AdvertiseGen` 目录放到本目录下。 ### 训练 运行以下指令进行训练: From d2645d881653d5605a8e20a1d4725188493d8c3b Mon Sep 17 00:00:00 2001 From: duzx16 Date: Fri, 31 Mar 2023 11:28:13 +0800 Subject: [PATCH 017/110] Update batch size --- ptuning/train.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ptuning/train.sh b/ptuning/train.sh index 7f9ff6f..c0ffce1 100644 --- a/ptuning/train.sh +++ b/ptuning/train.sh @@ -8,14 +8,14 @@ CUDA_VISIBLE_DEVICES=0 python3 main.py \ --prompt_column content \ --response_column summary \ --overwrite_cache \ - --model_name_or_path THUDM/chatglm-6b \ + --model_name_or_path /mnt/vepfs/workspace/zxdu/chatglm_6b \ --output_dir output/adgen-chatglm-6b-pt-$PRE_SEQ_LEN-$LR-dev \ --overwrite_output_dir \ --max_source_length 64 \ --max_target_length 64 \ - --per_device_train_batch_size 8 \ + --per_device_train_batch_size 1 \ --per_device_eval_batch_size 1 \ - --gradient_accumulation_steps 2 \ + --gradient_accumulation_steps 16 \ --predict_with_generate \ --max_steps 3000 \ --logging_steps 10 \ From 5e818065e4b58bfb7be61984207da76c502077f2 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Fri, 31 Mar 2023 11:29:34 +0800 Subject: [PATCH 018/110] Update memory requirement --- README.md | 2 +- ptuning/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 6573e8f..2c73467 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ ## 介绍 ChatGLM-6B 是一个开源的、支持中英双语的对话语言模型,基于 [General Language Model (GLM)](https://github.com/THUDM/GLM) 架构,具有 62 亿参数。结合模型量化技术,用户可以在消费级的显卡上进行本地部署(INT4 量化级别下最低只需 6GB 显存)。 -ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进行了优化。经过约 1T 标识符的中英双语训练,辅以监督微调、反馈自助、人类反馈强化学习等技术的加持,62 亿参数的 ChatGLM-6B 已经能生成相当符合人类偏好的回答。同时实现了基于P-Tuning v2的[模型微调](ptuning/README.md)(INT4量化级别下最低只需 8GB 显存)。更多信息请参考我们的[博客](https://chatglm.cn/blog)。 +ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进行了优化。经过约 1T 标识符的中英双语训练,辅以监督微调、反馈自助、人类反馈强化学习等技术的加持,62 亿参数的 ChatGLM-6B 已经能生成相当符合人类偏好的回答。同时实现了基于P-Tuning v2的[模型微调](ptuning/README.md)(INT4量化级别下最低只需 7GB 显存)。更多信息请参考我们的[博客](https://chatglm.cn/blog)。 不过,由于 ChatGLM-6B 的规模较小,目前已知其具有相当多的[**局限性**](#局限性),如事实性/数学逻辑错误,可能生成有害/有偏见内容,较弱的上下文能力,自我认知混乱,以及对英文指示生成与中文指示完全矛盾的内容。请大家在使用前了解这些问题,以免产生误解。更大的基于1300亿参数 [GLM-130B](https://github.com/THUDM/GLM-130B) 的 ChatGLM 正在内测开发中。 diff --git a/ptuning/README.md b/ptuning/README.md index f3f365a..a4097f6 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -1,5 +1,5 @@ # ChatGLM-6B-PT -本仓库实现了对于 ChatGLM-6B 模型基于 [P-Tuning v2](https://github.com/THUDM/P-tuning-v2) 的微调。P-Tuning v2将需要微调的参数量减少到原来的0.1%,再通过模型量化、Gradient Checkpoint等方法,最低只需要 8GB 显存即可运行。 +本仓库实现了对于 ChatGLM-6B 模型基于 [P-Tuning v2](https://github.com/THUDM/P-tuning-v2) 的微调。P-Tuning v2将需要微调的参数量减少到原来的0.1%,再通过模型量化、Gradient Checkpoint等方法,最低只需要 7GB 显存即可运行。 下面以 [ADGEN](https://aclanthology.org/D19-1321.pdf) (广告生成) 数据集为例介绍代码的使用方法。 From 24e24d5d6c4f015b3d946c69518dcff10d0f758c Mon Sep 17 00:00:00 2001 From: duzx16 Date: Fri, 31 Mar 2023 11:30:36 +0800 Subject: [PATCH 019/110] Fix model path --- ptuning/train.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ptuning/train.sh b/ptuning/train.sh index c0ffce1..1d03a25 100644 --- a/ptuning/train.sh +++ b/ptuning/train.sh @@ -8,7 +8,7 @@ CUDA_VISIBLE_DEVICES=0 python3 main.py \ --prompt_column content \ --response_column summary \ --overwrite_cache \ - --model_name_or_path /mnt/vepfs/workspace/zxdu/chatglm_6b \ + --model_name_or_path THUDM/chatglm-6b \ --output_dir output/adgen-chatglm-6b-pt-$PRE_SEQ_LEN-$LR-dev \ --overwrite_output_dir \ --max_source_length 64 \ From 99875468dd387b78fb765bb3aa335c5fb7435067 Mon Sep 17 00:00:00 2001 From: Aohan Zeng Date: Fri, 31 Mar 2023 11:46:21 +0800 Subject: [PATCH 020/110] Update README.md --- ptuning/README.md | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/ptuning/README.md b/ptuning/README.md index a4097f6..14011f0 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -1,5 +1,5 @@ # ChatGLM-6B-PT -本仓库实现了对于 ChatGLM-6B 模型基于 [P-Tuning v2](https://github.com/THUDM/P-tuning-v2) 的微调。P-Tuning v2将需要微调的参数量减少到原来的0.1%,再通过模型量化、Gradient Checkpoint等方法,最低只需要 7GB 显存即可运行。 +本仓库实现了对于 ChatGLM-6B 模型基于 [P-Tuning v2](https://github.com/THUDM/P-tuning-v2) 的微调。P-Tuning v2 将需要微调的参数量减少到原来的 0.1%,再通过模型量化、Gradient Checkpoint 等方法,最低只需要 7GB 显存即可运行。 下面以 [ADGEN](https://aclanthology.org/D19-1321.pdf) (广告生成) 数据集为例介绍代码的使用方法。 @@ -29,9 +29,11 @@ bash train.sh ``` `train.sh` 中的`PRE_SEQ_LEN`和 `LR` 分别是 soft prompt 长度和训练的学习率,可以进行调节以取得最佳的效果。 +在默认配置 `per_device_train_batch_size=1`, `gradient_accumulation_steps=16` 下,一次训练迭代会以 1 的批处理大小进行 16 次累加的前后向传播,等效为 16 的总批处理大小,此时最低只需 6.7G 显存。若想在同等批处理大小下提升训练效率,可在二者乘积不变的情况下,加大 `per_device_train_batch_size` 的值,但也会带来更多的显存消耗,请根据实际情况酌情调整。 + ### 推理 -将`evaluate.sh`中的`CHECKPOINT`更改为训练时保存的checkpoint名称,运行以下指令进行模型推理和评测: +将 `evaluate.sh` 中的 `CHECKPOINT` 更改为训练时保存的 checkpoint 名称,运行以下指令进行模型推理和评测: ```shell bash evaluate.sh ``` @@ -43,19 +45,19 @@ bash evaluate.sh #### 示例1 * Input: 类型#上衣\*材质#牛仔布\*颜色#白色\*风格#简约\*图案#刺绣\*衣样式#外套\*衣款式#破洞 * Label: 简约而不简单的牛仔外套,白色的衣身十分百搭。衣身多处有做旧破洞设计,打破单调乏味,增加一丝造型看点。衣身后背处有趣味刺绣装饰,丰富层次感,彰显别样时尚。 -* 微调前Output: 这件上衣的材质是牛仔布,颜色是白色,风格是简约,图案是刺绣,衣样式是外套,衣款式是破洞。 -* 微调后Output: 这是一款简约的牛仔外套,破洞设计,将牛仔布破洞,带来一种随意与个性。破洞的牛仔外套,展现出时尚气息,带来一种休闲感。同时,刺绣图案,让整件外套更加立体。 +* Output[微调前]: 这件上衣的材质是牛仔布,颜色是白色,风格是简约,图案是刺绣,衣样式是外套,衣款式是破洞。 +* Output[微调后]: 这是一款简约的牛仔外套,破洞设计,将牛仔布破洞,带来一种随意与个性。破洞的牛仔外套,展现出时尚气息,带来一种休闲感。同时,刺绣图案,让整件外套更加立体。 #### 示例2 * Input: 类型#裙\*版型#显瘦\*风格#文艺\*风格#简约\*图案#印花\*图案#撞色\*裙下摆#压褶\*裙长#连衣裙\*裙领型#圆领 * Label: 文艺个性的印花连衣裙,藏青色底蕴,低调又大气,撞色太阳花分布整个裙身,绚丽而美好,带来时尚减龄的气质。基础款的舒适圆领,简约不失大方,勾勒精致脸庞。领后是一粒包布扣固定,穿脱十分方便。前片立体的打褶设计,搭配后片压褶的做工,增添层次和空间感,显瘦又有型。 -* 微调前Output: 类型#裙*版型#显瘦*风格#文艺*风格#简约*图案#印花*图案#撞色*裙下摆#压褶*裙长#连衣裙*裙领型#圆领 1\. 连衣裙:简约风格,裙长为膝盖以上,裙领型为圆领。2\. 裙下摆:压褶设计,使裙摆呈现出流畅的褶皱效果。3\. 裙领型:裙领型为圆领,使穿上连衣裙后更加有型。4\. 版型:采用显瘦设计,让连衣裙看起来更加苗条。5\. 风格:文艺风格,让连衣裙更加有内涵和品味。6\. 图案:印花设计,在连衣裙上印有独特的图案。7\. 撞色:采用撞色设计,让连衣裙在色彩上更加鲜明、富有层次感。 -* 微调后Output: 这是一款文艺范的连衣裙,以印花为元素,采用简约的印花,既能够突出文艺气质,又能够展现简约风。在印花的同时又有领子和裙摆的压褶设计,更加凸显文艺气质。简约而不会过于单调,搭配出街,穿着十分舒适。 +* Output[微调前]: 类型#裙*版型#显瘦*风格#文艺*风格#简约*图案#印花*图案#撞色*裙下摆#压褶*裙长#连衣裙*裙领型#圆领 1\. 连衣裙:简约风格,裙长为膝盖以上,裙领型为圆领。2\. 裙下摆:压褶设计,使裙摆呈现出流畅的褶皱效果。3\. 裙领型:裙领型为圆领,使穿上连衣裙后更加有型。4\. 版型:采用显瘦设计,让连衣裙看起来更加苗条。5\. 风格:文艺风格,让连衣裙更加有内涵和品味。6\. 图案:印花设计,在连衣裙上印有独特的图案。7\. 撞色:采用撞色设计,让连衣裙在色彩上更加鲜明、富有层次感。 +* Output[微调后]: 这是一款文艺范的连衣裙,以印花为元素,采用简约的印花,既能够突出文艺气质,又能够展现简约风。在印花的同时又有领子和裙摆的压褶设计,更加凸显文艺气质。简约而不会过于单调,搭配出街,穿着十分舒适。 ## 使用自己的数据集 -修改 `train.sh` 和 `evaluate.sh` 中的 `train_file`、`validation_file`和`test_file`为你自己的json格式数据集路径,并将`prompt_column`和`response_column`改为json文件中输入文本和输出文本对应的key。 +修改 `train.sh` 和 `evaluate.sh` 中的 `train_file`、`validation_file`和`test_file`为你自己的 JSON 格式数据集路径,并将 `prompt_column` 和 `response_column` 改为 JSON 文件中输入文本和输出文本对应的 KEY。 ## 引用 From 4c923c4ed769ee0c1bcd6035c49164412c604cac Mon Sep 17 00:00:00 2001 From: Aohan Zeng Date: Fri, 31 Mar 2023 12:21:09 +0800 Subject: [PATCH 021/110] Update p-tuning-v2 --- README.md | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index 2c73467..33e6c29 100644 --- a/README.md +++ b/README.md @@ -7,16 +7,16 @@ ## 介绍 ChatGLM-6B 是一个开源的、支持中英双语的对话语言模型,基于 [General Language Model (GLM)](https://github.com/THUDM/GLM) 架构,具有 62 亿参数。结合模型量化技术,用户可以在消费级的显卡上进行本地部署(INT4 量化级别下最低只需 6GB 显存)。 -ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进行了优化。经过约 1T 标识符的中英双语训练,辅以监督微调、反馈自助、人类反馈强化学习等技术的加持,62 亿参数的 ChatGLM-6B 已经能生成相当符合人类偏好的回答。同时实现了基于P-Tuning v2的[模型微调](ptuning/README.md)(INT4量化级别下最低只需 7GB 显存)。更多信息请参考我们的[博客](https://chatglm.cn/blog)。 +ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进行了优化。经过约 1T 标识符的中英双语训练,辅以监督微调、反馈自助、人类反馈强化学习等技术的加持,62 亿参数的 ChatGLM-6B 已经能生成相当符合人类偏好的回答,更多信息请参考我们的[博客](https://chatglm.cn/blog)。此外,为了方便下游开发者针对自己的应用场景定制模型,我们同时实现了基于 P-Tuning v2 的[高效参数微调方法](ptuning/README.md),INT4 量化级别下最低只需 7GB 显存即可启动微调。 -不过,由于 ChatGLM-6B 的规模较小,目前已知其具有相当多的[**局限性**](#局限性),如事实性/数学逻辑错误,可能生成有害/有偏见内容,较弱的上下文能力,自我认知混乱,以及对英文指示生成与中文指示完全矛盾的内容。请大家在使用前了解这些问题,以免产生误解。更大的基于1300亿参数 [GLM-130B](https://github.com/THUDM/GLM-130B) 的 ChatGLM 正在内测开发中。 +不过,由于 ChatGLM-6B 的规模较小,目前已知其具有相当多的[**局限性**](#局限性),如事实性/数学逻辑错误,可能生成有害/有偏见内容,较弱的上下文能力,自我认知混乱,以及对英文指示生成与中文指示完全矛盾的内容。请大家在使用前了解这些问题,以免产生误解。更大的基于 1300 亿参数 [GLM-130B](https://github.com/THUDM/GLM-130B) 的 ChatGLM 正在内测开发中。 *Read this in [English](README_en.md).* ## 更新信息 -**[2023/03/31]** 增加基于 P-Tuning-v2 的微调实现,最低只需 8GB 显存即可进行模型微调。详见[模型微调](ptuning/README.md)。 +**[2023/03/31]** 增加基于 P-Tuning-v2 的高效参数微调实现,INT4 量化级别下最低只需 7GB 显存即可进行模型微调。详见[高效参数微调方法](ptuning/README.md)。 -**[2023/03/23]** 增加API部署(感谢 [@LemonQu-GIT](https://github.com/LemonQu-GIT))。增加Embedding量化模型[ChatGLM-6B-INT4-QE](https://huggingface.co/THUDM/chatglm-6b-int4-qe)。增加对基于Apple Silicon的Mac上GPU加速的支持。 +**[2023/03/23]** 增加 API 部署(感谢 [@LemonQu-GIT](https://github.com/LemonQu-GIT))。增加 Embedding 量化模型 [ChatGLM-6B-INT4-QE](https://huggingface.co/THUDM/chatglm-6b-int4-qe)。增加配备 Apple Silicon 芯片的 Mac 上 GPU 加速的支持。 **[2023/03/19]** 增加流式输出接口 `stream_chat`,已更新到网页版和命令行 Demo。修复输出中的中文标点。增加量化后的模型 [ChatGLM-6B-INT4](https://huggingface.co/THUDM/chatglm-6b-int4) @@ -24,12 +24,11 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 ### 硬件需求 -| **量化等级** | **最低 GPU 显存** | -| -------------- | ----------------- | -| FP16(无量化) | 13 GB | -| INT8 | 10 GB | -| INT4 | 6 GB | - +| **量化等级** | **最低 GPU 显存**(推理) | **最低 GPU 显存**(高效参数微调) | +| -------------- | ------------------------- | --------------------------------- | +| FP16(无量化) | 13 GB | 14 GB | +| INT8 | 10 GB | 11 GB | +| INT4 | 6 GB | 7 GB | ### 环境安装 使用 pip 安装依赖:`pip install -r requirements.txt`,其中 `transformers` 库版本推荐为 `4.26.1`,但理论上不低于 `4.23.1` 即可。 @@ -59,7 +58,7 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 如果这些方法无法帮助你入睡,你可以考虑咨询医生或睡眠专家,寻求进一步的建议。 ``` -完整的模型实现可以在 [Hugging Face Hub](https://huggingface.co/THUDM/chatglm-6b) 上查看。如果你从 Hugging Face Hub 上下载checkpoint的速度较慢,也可以从[这里](https://cloud.tsinghua.edu.cn/d/fb9f16d6dc8f482596c2/)手动下载。 +完整的模型实现可以在 [Hugging Face Hub](https://huggingface.co/THUDM/chatglm-6b) 上查看。如果你从 Hugging Face Hub 上下载 checkpoint 的速度较慢,也可以从[这里](https://cloud.tsinghua.edu.cn/d/fb9f16d6dc8f482596c2/)手动下载。 ### Demo @@ -94,14 +93,14 @@ python web_demo.py python cli_demo.py ``` -程序会在命令行中进行交互式的对话,在命令行中输入指示并回车即可生成回复,输入`clear`可以清空对话历史,输入`stop`终止程序。 +程序会在命令行中进行交互式的对话,在命令行中输入指示并回车即可生成回复,输入 `clear` 可以清空对话历史,输入 `stop` 终止程序。 ### API部署 -首先需要安装额外的依赖`pip install fastapi uvicorn`,然后运行仓库中的[api.py](api.py): +首先需要安装额外的依赖 `pip install fastapi uvicorn`,然后运行仓库中的 [api.py](api.py): ```shell python api.py ``` -默认部署在本地的8000端口,通过POST方法进行调用 +默认部署在本地的 8000 端口,通过 POST 方法进行调用 ```shell curl -X POST "http://127.0.0.1:8000" \ -H 'Content-Type: application/json' \ @@ -167,7 +166,7 @@ model = AutoModel.from_pretrained("your local path", trust_remote_code=True).hal ``` 即可使用在 Mac 上使用 GPU 加速模型推理。 -## 模型微调 +## 高效参数微调 详见 [ptuning/README.md](ptuning/README.md)。 ## ChatGLM-6B 示例 @@ -266,7 +265,7 @@ model = AutoModel.from_pretrained("your local path", trust_remote_code=True).hal 以下是部分针对本项目的教程/文档: * [Windows部署文档](https://github.com/ZhangErling/ChatGLM-6B/blob/main/deployment_windows.md) -如果你有其他好的项目/教程的话,欢迎参照上述格式添加到README中并提出 [PR](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork). +如果你有其他好的项目/教程的话,欢迎参照上述格式添加到 README 中并提出 [Pull Request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork)。 ## 引用 From 9853cd2c9777b2e2289dc65371c430e918ff0118 Mon Sep 17 00:00:00 2001 From: Aohan Zeng Date: Fri, 31 Mar 2023 12:26:09 +0800 Subject: [PATCH 022/110] Update README.md --- ptuning/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ptuning/README.md b/ptuning/README.md index 14011f0..a9b7f41 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -27,9 +27,9 @@ ADGEN 数据集任务为根据输入(content)生成一段广告词(summary ```shell bash train.sh ``` -`train.sh` 中的`PRE_SEQ_LEN`和 `LR` 分别是 soft prompt 长度和训练的学习率,可以进行调节以取得最佳的效果。 +`train.sh` 中的 `PRE_SEQ_LEN` 和 `LR` 分别是 soft prompt 长度和训练的学习率,可以进行调节以取得最佳的效果。P-Tuning-v2 方法会冻结全部的模型参数,可通过调整 `quantization_bit` 来被原始模型的量化等级,不加此选项则为 FP16 精度加载。 -在默认配置 `per_device_train_batch_size=1`, `gradient_accumulation_steps=16` 下,一次训练迭代会以 1 的批处理大小进行 16 次累加的前后向传播,等效为 16 的总批处理大小,此时最低只需 6.7G 显存。若想在同等批处理大小下提升训练效率,可在二者乘积不变的情况下,加大 `per_device_train_batch_size` 的值,但也会带来更多的显存消耗,请根据实际情况酌情调整。 +在默认配置 `quantization_bit=4`、`per_device_train_batch_size=1`、`gradient_accumulation_steps=16` 下,INT4 的模型参数被冻结,一次训练迭代会以 1 的批处理大小进行 16 次累加的前后向传播,等效为 16 的总批处理大小,此时最低只需 6.7G 显存。若想在同等批处理大小下提升训练效率,可在二者乘积不变的情况下,加大 `per_device_train_batch_size` 的值,但也会带来更多的显存消耗,请根据实际情况酌情调整。 ### 推理 From ba93cafa7d828966a2ca4c86b229afeea3381ab8 Mon Sep 17 00:00:00 2001 From: Aohan Zeng Date: Fri, 31 Mar 2023 12:29:39 +0800 Subject: [PATCH 023/110] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 33e6c29..60ee417 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 | **量化等级** | **最低 GPU 显存**(推理) | **最低 GPU 显存**(高效参数微调) | | -------------- | ------------------------- | --------------------------------- | | FP16(无量化) | 13 GB | 14 GB | -| INT8 | 10 GB | 11 GB | +| INT8 | 8 GB | 9 GB | | INT4 | 6 GB | 7 GB | ### 环境安装 From 7e84262ddc342798a04ad034fee75ad3432d75e3 Mon Sep 17 00:00:00 2001 From: Shaw Date: Fri, 31 Mar 2023 14:13:51 +0800 Subject: [PATCH 024/110] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 60ee417..622a316 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 *Read this in [English](README_en.md).* ## 更新信息 -**[2023/03/31]** 增加基于 P-Tuning-v2 的高效参数微调实现,INT4 量化级别下最低只需 7GB 显存即可进行模型微调。详见[高效参数微调方法](ptuning/README.md)。 +**[2023/03/31]** 增加基于 [P-Tuning-v2](https://github.com/THUDM/P-tuning-v2) 的高效参数微调实现,INT4 量化级别下最低只需 7GB 显存即可进行模型微调。详见[高效参数微调方法](ptuning/README.md)。 **[2023/03/23]** 增加 API 部署(感谢 [@LemonQu-GIT](https://github.com/LemonQu-GIT))。增加 Embedding 量化模型 [ChatGLM-6B-INT4-QE](https://huggingface.co/THUDM/chatglm-6b-int4-qe)。增加配备 Apple Silicon 芯片的 Mac 上 GPU 加速的支持。 @@ -167,7 +167,7 @@ model = AutoModel.from_pretrained("your local path", trust_remote_code=True).hal 即可使用在 Mac 上使用 GPU 加速模型推理。 ## 高效参数微调 -详见 [ptuning/README.md](ptuning/README.md)。 +基于 [P-tuning v2](https://github.com/THUDM/P-tuning-v2) 的高效参数微调。具体使用方法详见 [ptuning/README.md](ptuning/README.md)。 ## ChatGLM-6B 示例 From fdc2c7f70d00b7ce6ca098934b697e31fd8be8fe Mon Sep 17 00:00:00 2001 From: Shaw Date: Fri, 31 Mar 2023 14:48:55 +0800 Subject: [PATCH 025/110] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 622a316..6e8b72a 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ ## 介绍 ChatGLM-6B 是一个开源的、支持中英双语的对话语言模型,基于 [General Language Model (GLM)](https://github.com/THUDM/GLM) 架构,具有 62 亿参数。结合模型量化技术,用户可以在消费级的显卡上进行本地部署(INT4 量化级别下最低只需 6GB 显存)。 -ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进行了优化。经过约 1T 标识符的中英双语训练,辅以监督微调、反馈自助、人类反馈强化学习等技术的加持,62 亿参数的 ChatGLM-6B 已经能生成相当符合人类偏好的回答,更多信息请参考我们的[博客](https://chatglm.cn/blog)。此外,为了方便下游开发者针对自己的应用场景定制模型,我们同时实现了基于 P-Tuning v2 的[高效参数微调方法](ptuning/README.md),INT4 量化级别下最低只需 7GB 显存即可启动微调。 +ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进行了优化。经过约 1T 标识符的中英双语训练,辅以监督微调、反馈自助、人类反馈强化学习等技术的加持,62 亿参数的 ChatGLM-6B 已经能生成相当符合人类偏好的回答,更多信息请参考我们的[博客](https://chatglm.cn/blog)。此外,为了方便下游开发者针对自己的应用场景定制模型,我们同时实现了基于 [P-Tuning v2](https://github.com/THUDM/P-tuning-v2) 的高效参数微调方法 [(使用指南)](ptuning/README.md) ,INT4 量化级别下最低只需 7GB 显存即可启动微调。 不过,由于 ChatGLM-6B 的规模较小,目前已知其具有相当多的[**局限性**](#局限性),如事实性/数学逻辑错误,可能生成有害/有偏见内容,较弱的上下文能力,自我认知混乱,以及对英文指示生成与中文指示完全矛盾的内容。请大家在使用前了解这些问题,以免产生误解。更大的基于 1300 亿参数 [GLM-130B](https://github.com/THUDM/GLM-130B) 的 ChatGLM 正在内测开发中。 From c206e7d9ad2911d1f7fb0294110b7ef361d41a58 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Fri, 31 Mar 2023 15:18:21 +0800 Subject: [PATCH 026/110] Update requirements.txt --- README.md | 2 +- README_en.md | 2 +- ptuning/README.md | 2 +- requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 2c73467..9e072a0 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 ### 环境安装 -使用 pip 安装依赖:`pip install -r requirements.txt`,其中 `transformers` 库版本推荐为 `4.26.1`,但理论上不低于 `4.23.1` 即可。 +使用 pip 安装依赖:`pip install -r requirements.txt`,其中 `transformers` 库版本推荐为 `4.27.1`,但理论上不低于 `4.23.1` 即可。 ### 代码调用 diff --git a/README_en.md b/README_en.md index b5b4b62..d5c05bb 100644 --- a/README_en.md +++ b/README_en.md @@ -32,7 +32,7 @@ If you have other good projects, please refer to the above format to add to READ ### Environment Setup -Install the requirements with pip: `pip install -r requirements.txt`. `transformers` library version is recommended to be `4.26.1`, but theoretically any version no lower than `4.23.1` is acceptable. +Install the requirements with pip: `pip install -r requirements.txt`. `transformers` library version is recommended to be `4.27.1`, but theoretically any version no lower than `4.23.1` is acceptable. ### Usage diff --git a/ptuning/README.md b/ptuning/README.md index a4097f6..4434e3a 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -4,7 +4,7 @@ 下面以 [ADGEN](https://aclanthology.org/D19-1321.pdf) (广告生成) 数据集为例介绍代码的使用方法。 ## 软件依赖 -除 ChatGLM-6B 的依赖之外,还需要按照以下依赖 +运行微调需要4.27.1版本的`transformers`。除 ChatGLM-6B 的依赖之外,还需要按照以下依赖 ``` pip install rouge_chinese nltk jieba datasets ``` diff --git a/requirements.txt b/requirements.txt index 2948480..00707fe 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ protobuf>=3.19.5,<3.20.1 -transformers==4.26.1 +transformers==4.27.1 icetk cpm_kernels torch>=1.10 From 08d880141d7450099a579e00877e3318be26da5f Mon Sep 17 00:00:00 2001 From: duzx16 Date: Fri, 31 Mar 2023 16:32:34 +0800 Subject: [PATCH 027/110] Fix revision for loading model --- ptuning/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ptuning/main.py b/ptuning/main.py index d82fccc..5b23234 100644 --- a/ptuning/main.py +++ b/ptuning/main.py @@ -110,7 +110,7 @@ def main(): tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, trust_remote_code=True) - model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, revision=True, trust_remote_code=True) + model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True) model = model.half() if model_args.quantization_bit is not None: From 893706a82d5529e6a99861c31335c99b440f8842 Mon Sep 17 00:00:00 2001 From: rainatam Date: Fri, 31 Mar 2023 18:12:04 +0800 Subject: [PATCH 028/110] Update train script --- ptuning/arguments.py | 4 ++-- ptuning/train.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ptuning/arguments.py b/ptuning/arguments.py index 1c61f97..95d766f 100644 --- a/ptuning/arguments.py +++ b/ptuning/arguments.py @@ -203,8 +203,8 @@ class DataTrainingArguments: def __post_init__(self): - if self.dataset_name is None and self.train_file is None and self.validation_file is None: - raise ValueError("Need either a dataset name or a training/validation file.") + if self.dataset_name is None and self.train_file is None and self.validation_file is None and self.test_file is None: + raise ValueError("Need either a dataset name or a training/validation/test file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] diff --git a/ptuning/train.sh b/ptuning/train.sh index 1d03a25..3189829 100644 --- a/ptuning/train.sh +++ b/ptuning/train.sh @@ -9,7 +9,7 @@ CUDA_VISIBLE_DEVICES=0 python3 main.py \ --response_column summary \ --overwrite_cache \ --model_name_or_path THUDM/chatglm-6b \ - --output_dir output/adgen-chatglm-6b-pt-$PRE_SEQ_LEN-$LR-dev \ + --output_dir output/adgen-chatglm-6b-pt-$PRE_SEQ_LEN-$LR \ --overwrite_output_dir \ --max_source_length 64 \ --max_target_length 64 \ From 73f4fe1ffe6af778796a2ed88882af5655062eb0 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Fri, 31 Mar 2023 20:15:35 +0800 Subject: [PATCH 029/110] Add validation file name Use full prediction --- ptuning/evaluate.sh | 2 +- ptuning/train.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ptuning/evaluate.sh b/ptuning/evaluate.sh index db2a8c1..1217ceb 100644 --- a/ptuning/evaluate.sh +++ b/ptuning/evaluate.sh @@ -4,6 +4,7 @@ STEP=3000 CUDA_VISIBLE_DEVICES=0 python3 main.py \ --do_predict \ + --validation_file AdvertiseGen/dev.json \ --test_file AdvertiseGen/dev.json \ --overwrite_cache \ --prompt_column content \ @@ -15,6 +16,5 @@ CUDA_VISIBLE_DEVICES=0 python3 main.py \ --max_target_length 64 \ --per_device_eval_batch_size 1 \ --predict_with_generate \ - --max_predict_samples 10 \ --pre_seq_len $PRE_SEQ_LEN \ --quantization_bit 4 diff --git a/ptuning/train.sh b/ptuning/train.sh index 1d03a25..3189829 100644 --- a/ptuning/train.sh +++ b/ptuning/train.sh @@ -9,7 +9,7 @@ CUDA_VISIBLE_DEVICES=0 python3 main.py \ --response_column summary \ --overwrite_cache \ --model_name_or_path THUDM/chatglm-6b \ - --output_dir output/adgen-chatglm-6b-pt-$PRE_SEQ_LEN-$LR-dev \ + --output_dir output/adgen-chatglm-6b-pt-$PRE_SEQ_LEN-$LR \ --overwrite_output_dir \ --max_source_length 64 \ --max_target_length 64 \ From 7436f0840f16136266e347157a45f7c74f390d15 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Fri, 31 Mar 2023 22:55:36 +0800 Subject: [PATCH 030/110] Add todo --- ptuning/README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ptuning/README.md b/ptuning/README.md index dddae50..6ce6384 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -59,6 +59,10 @@ bash evaluate.sh ## 使用自己的数据集 修改 `train.sh` 和 `evaluate.sh` 中的 `train_file`、`validation_file`和`test_file`为你自己的 JSON 格式数据集路径,并将 `prompt_column` 和 `response_column` 改为 JSON 文件中输入文本和输出文本对应的 KEY。 +## TODO +* [ ] Support for chat data +* [ ] Support for full finetuning + ## 引用 ``` From 7a67ddd61f65e4326c96c7c02ee48027151abccb Mon Sep 17 00:00:00 2001 From: maybeluo Date: Sat, 1 Apr 2023 00:34:19 +0800 Subject: [PATCH 031/110] write generated result with utf-8 --- ptuning/main.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ptuning/main.py b/ptuning/main.py index 5b23234..d1c4977 100644 --- a/ptuning/main.py +++ b/ptuning/main.py @@ -374,9 +374,10 @@ def main(): ) labels = [label.strip() for label in labels] output_prediction_file = os.path.join(training_args.output_dir, "generated_predictions.txt") - with open(output_prediction_file, "w") as writer: + with open(output_prediction_file, "w", encoding="utf-8") as writer: for p, l in zip(predictions, labels): - writer.write(json.dumps({"labels": l, "predict": p}, ensure_ascii=False)) + res = json.dumps({"labels": l, "predict": p}, ensure_ascii=False) + writer.write(f"{res}\n") return results @@ -386,4 +387,4 @@ def _mp_fn(index): if __name__ == "__main__": - main() \ No newline at end of file + main() From acd4adcb651211bcc4b01c86e92ed09de2edf413 Mon Sep 17 00:00:00 2001 From: Yam Date: Sat, 1 Apr 2023 11:17:35 +0800 Subject: [PATCH 032/110] Update README.md --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 73ea77f..1f82c03 100644 --- a/README.md +++ b/README.md @@ -169,6 +169,8 @@ model = AutoModel.from_pretrained("your local path", trust_remote_code=True).hal ## 高效参数微调 基于 [P-tuning v2](https://github.com/THUDM/P-tuning-v2) 的高效参数微调。具体使用方法详见 [ptuning/README.md](ptuning/README.md)。 + + ## ChatGLM-6B 示例 以下是一些使用 `web_demo.py` 得到的示例截图。更多 ChatGLM-6B 的可能,等待你来探索发现! @@ -261,6 +263,7 @@ model = AutoModel.from_pretrained("your local path", trust_remote_code=True).hal 以下是部分基于本仓库开发的开源项目: * [ChatGLM-MNN](https://github.com/wangzhaode/ChatGLM-MNN): 一个基于 MNN 的 ChatGLM-6B C++ 推理实现,支持根据显存大小自动分配计算任务给 GPU 和 CPU * [ChatGLM-Tuning](https://github.com/mymusise/ChatGLM-Tuning): 基于 LoRA 对 ChatGLM-6B 进行微调 +* [Humanable ChatGLM/GPT Fine-tuning | ChatGLM 微调](https://github.com/hscspring/hcgf):基于 LoRA 进行微调 以下是部分针对本项目的教程/文档: * [Windows部署文档](https://github.com/ZhangErling/ChatGLM-6B/blob/main/deployment_windows.md) From 6dd6f7c7b53604fb8eee2aa27509619752bf3133 Mon Sep 17 00:00:00 2001 From: Ashing Zheng Date: Sat, 1 Apr 2023 13:18:28 +0800 Subject: [PATCH 033/110] chore(cli_demo): strip the query input for `stop` or `clear` command Remove the query space to determine whether it is a stop command or a clear command --- cli_demo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cli_demo.py b/cli_demo.py index 1c3ff2b..da80fff 100644 --- a/cli_demo.py +++ b/cli_demo.py @@ -31,9 +31,9 @@ def main(): print("欢迎使用 ChatGLM-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序") while True: query = input("\n用户:") - if query == "stop": + if query.strip() == "stop": break - if query == "clear": + if query.strip() == "clear": history = [] os.system(clear_command) print("欢迎使用 ChatGLM-6B 模型,输入内容即可进行对话,clear 清空对话历史,stop 终止程序") From e8aba3d3f3e77c6f95211a355f44883b1d213794 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Sat, 1 Apr 2023 23:08:44 +0800 Subject: [PATCH 034/110] Add project links --- README.md | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 1f82c03..9d5386e 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,9 @@ ## 介绍 ChatGLM-6B 是一个开源的、支持中英双语的对话语言模型,基于 [General Language Model (GLM)](https://github.com/THUDM/GLM) 架构,具有 62 亿参数。结合模型量化技术,用户可以在消费级的显卡上进行本地部署(INT4 量化级别下最低只需 6GB 显存)。 -ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进行了优化。经过约 1T 标识符的中英双语训练,辅以监督微调、反馈自助、人类反馈强化学习等技术的加持,62 亿参数的 ChatGLM-6B 已经能生成相当符合人类偏好的回答,更多信息请参考我们的[博客](https://chatglm.cn/blog)。此外,为了方便下游开发者针对自己的应用场景定制模型,我们同时实现了基于 [P-Tuning v2](https://github.com/THUDM/P-tuning-v2) 的高效参数微调方法 [(使用指南)](ptuning/README.md) ,INT4 量化级别下最低只需 7GB 显存即可启动微调。 +ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进行了优化。经过约 1T 标识符的中英双语训练,辅以监督微调、反馈自助、人类反馈强化学习等技术的加持,62 亿参数的 ChatGLM-6B 已经能生成相当符合人类偏好的回答,更多信息请参考我们的[博客](https://chatglm.cn/blog)。 + +为了方便下游开发者针对自己的应用场景定制模型,我们同时实现了基于 [P-Tuning v2](https://github.com/THUDM/P-tuning-v2) 的高效参数微调方法 [(使用指南)](ptuning/README.md) ,INT4 量化级别下最低只需 7GB 显存即可启动微调。 不过,由于 ChatGLM-6B 的规模较小,目前已知其具有相当多的[**局限性**](#局限性),如事实性/数学逻辑错误,可能生成有害/有偏见内容,较弱的上下文能力,自我认知混乱,以及对英文指示生成与中文指示完全矛盾的内容。请大家在使用前了解这些问题,以免产生误解。更大的基于 1300 亿参数 [GLM-130B](https://github.com/THUDM/GLM-130B) 的 ChatGLM 正在内测开发中。 @@ -20,6 +22,18 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 **[2023/03/19]** 增加流式输出接口 `stream_chat`,已更新到网页版和命令行 Demo。修复输出中的中文标点。增加量化后的模型 [ChatGLM-6B-INT4](https://huggingface.co/THUDM/chatglm-6b-int4) +## 友情链接 +以下是部分基于本仓库开发的开源项目: +* [ChatGLM-MNN](https://github.com/wangzhaode/ChatGLM-MNN): 一个基于 MNN 的 ChatGLM-6B C++ 推理实现,支持根据显存大小自动分配计算任务给 GPU 和 CPU +* [ChatGLM-Tuning](https://github.com/mymusise/ChatGLM-Tuning): 基于 LoRA 对 ChatGLM-6B 进行微调。类似的项目还包括 [Humanable ChatGLM/GPT Fine-tuning | ChatGLM 微调](https://github.com/hscspring/hcgf) +* [langchain-ChatGLM](https://github.com/imClumsyPanda/langchain-ChatGLM):基于本地知识的 ChatGLM 应用,基于LangChain +* [闻达](https://github.com/l15y/wenda):大型语言模型调用平台,基于 ChatGLM-6B 实现了类 ChatPDF 功能 + +以下是部分针对本项目的教程/文档: +* [Windows部署文档](https://github.com/ZhangErling/ChatGLM-6B/blob/main/deployment_windows.md) + +如果你有其他好的项目/教程的话,欢迎参照上述格式添加到 README 中并提出 [Pull Request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork)。 + ## 使用方式 ### 硬件需求 @@ -259,17 +273,6 @@ model = AutoModel.from_pretrained("your local path", trust_remote_code=True).hal 本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源,ChatGLM-6B 模型的权重的使用则需要遵循 [Model License](MODEL_LICENSE)。 -## 友情链接 -以下是部分基于本仓库开发的开源项目: -* [ChatGLM-MNN](https://github.com/wangzhaode/ChatGLM-MNN): 一个基于 MNN 的 ChatGLM-6B C++ 推理实现,支持根据显存大小自动分配计算任务给 GPU 和 CPU -* [ChatGLM-Tuning](https://github.com/mymusise/ChatGLM-Tuning): 基于 LoRA 对 ChatGLM-6B 进行微调 -* [Humanable ChatGLM/GPT Fine-tuning | ChatGLM 微调](https://github.com/hscspring/hcgf):基于 LoRA 进行微调 - -以下是部分针对本项目的教程/文档: -* [Windows部署文档](https://github.com/ZhangErling/ChatGLM-6B/blob/main/deployment_windows.md) - -如果你有其他好的项目/教程的话,欢迎参照上述格式添加到 README 中并提出 [Pull Request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork)。 - ## 引用 如果你觉得我们的工作有帮助的话,请考虑引用下列论文 From 4371f7a57285d94f6c22827418f7cebe2983c0b5 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Sat, 1 Apr 2023 23:09:26 +0800 Subject: [PATCH 035/110] Add padding for evaluation data --- ptuning/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ptuning/main.py b/ptuning/main.py index d1c4977..1776055 100644 --- a/ptuning/main.py +++ b/ptuning/main.py @@ -147,7 +147,7 @@ def main(): targets.append(examples[response_column][i]) inputs = [prefix + inp for inp in inputs] - model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, truncation=True) + model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, truncation=True, padding=True) labels = tokenizer(text_target=targets, max_length=max_target_length, truncation=True) if data_args.ignore_pad_token_for_loss: From 5f8f6bac7b2ca3212ba913aa4cbd85aa2313f632 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Sun, 2 Apr 2023 00:35:40 +0800 Subject: [PATCH 036/110] Change quantization instruction --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9d5386e..a286cee 100644 --- a/README.md +++ b/README.md @@ -136,7 +136,7 @@ curl -X POST "http://127.0.0.1:8000" \ ```python # 按需修改,目前只支持 4/8 bit 量化 -model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().quantize(4).cuda() +model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).quantize(4).half().cuda() ``` 进行 2 至 3 轮对话后,8-bit 量化下 GPU 显存占用约为 10GB,4-bit 量化下仅需 6GB 占用。随着对话轮数的增多,对应消耗显存也随之增长,由于采用了相对位置编码,理论上 ChatGLM-6B 支持无限长的 context-length,但总长度超过 2048(训练长度)后性能会逐渐下降。 From ca43864f39eaa6a19493ebd812bff25c44c9100e Mon Sep 17 00:00:00 2001 From: duzx16 Date: Sun, 2 Apr 2023 00:35:40 +0800 Subject: [PATCH 037/110] Change quantization instruction --- README.md | 2 +- ptuning/main.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 9d5386e..a286cee 100644 --- a/README.md +++ b/README.md @@ -136,7 +136,7 @@ curl -X POST "http://127.0.0.1:8000" \ ```python # 按需修改,目前只支持 4/8 bit 量化 -model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().quantize(4).cuda() +model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).quantize(4).half().cuda() ``` 进行 2 至 3 轮对话后,8-bit 量化下 GPU 显存占用约为 10GB,4-bit 量化下仅需 6GB 占用。随着对话轮数的增多,对应消耗显存也随之增长,由于采用了相对位置编码,理论上 ChatGLM-6B 支持无限长的 context-length,但总长度超过 2048(训练长度)后性能会逐渐下降。 diff --git a/ptuning/main.py b/ptuning/main.py index 1776055..020c514 100644 --- a/ptuning/main.py +++ b/ptuning/main.py @@ -112,10 +112,10 @@ def main(): model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True) - model = model.half() if model_args.quantization_bit is not None: print(f"Quantized to {model_args.quantization_bit} bit") model = model.quantize(model_args.quantization_bit) + model = model.half() model.transformer.prefix_encoder.float() prefix = data_args.source_prefix if data_args.source_prefix is not None else "" From c508f62b7009a5d5c0b390fb0e67f297dbcd014e Mon Sep 17 00:00:00 2001 From: duzx16 Date: Sun, 2 Apr 2023 01:59:07 +0800 Subject: [PATCH 038/110] Fix position_ids in prediction --- ptuning/trainer_seq2seq.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ptuning/trainer_seq2seq.py b/ptuning/trainer_seq2seq.py index 0087786..518daa0 100644 --- a/ptuning/trainer_seq2seq.py +++ b/ptuning/trainer_seq2seq.py @@ -185,6 +185,8 @@ class Seq2SeqTrainer(Trainer): if "attention_mask" in inputs: gen_kwargs["attention_mask"] = inputs.get("attention_mask", None) + if "position_ids" in inputs: + gen_kwargs["position_ids"] = inputs.get("position_ids", None) if "global_attention_mask" in inputs: gen_kwargs["global_attention_mask"] = inputs.get("global_attention_mask", None) From 4227999d4c61f17bacf9d09272168cbd46f5b1f5 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Sun, 2 Apr 2023 02:05:03 +0800 Subject: [PATCH 039/110] No padding in colloator --- ptuning/main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ptuning/main.py b/ptuning/main.py index 020c514..cd56e43 100644 --- a/ptuning/main.py +++ b/ptuning/main.py @@ -262,6 +262,7 @@ def main(): model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=None, + padding=False ) # Metric From 06038d8e3f51e58ed02d67aafc2c3795b556dc2e Mon Sep 17 00:00:00 2001 From: coderabbit214 <1157237955@qq.com> Date: Mon, 3 Apr 2023 09:43:45 +0800 Subject: [PATCH 040/110] =?UTF-8?q?bibliothecarius=EF=BC=9A=E5=BF=AB?= =?UTF-8?q?=E9=80=9F=E6=9E=84=E5=BB=BA=E6=9C=8D=E5=8A=A1=E4=BB=A5=E9=9B=86?= =?UTF-8?q?=E6=88=90=E6=82=A8=E7=9A=84=E6=9C=AC=E5=9C=B0=E6=95=B0=E6=8D=AE?= =?UTF-8?q?=E5=92=8CAI=E6=A8=A1=E5=9E=8B=EF=BC=8C=E6=94=AF=E6=8C=81ChatGLM?= =?UTF-8?q?=E7=AD=89=E6=9C=AC=E5=9C=B0=E5=8C=96=E6=A8=A1=E5=9E=8B=E6=8E=A5?= =?UTF-8?q?=E5=85=A5=E3=80=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index a286cee..81f3fdc 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,7 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 * [ChatGLM-MNN](https://github.com/wangzhaode/ChatGLM-MNN): 一个基于 MNN 的 ChatGLM-6B C++ 推理实现,支持根据显存大小自动分配计算任务给 GPU 和 CPU * [ChatGLM-Tuning](https://github.com/mymusise/ChatGLM-Tuning): 基于 LoRA 对 ChatGLM-6B 进行微调。类似的项目还包括 [Humanable ChatGLM/GPT Fine-tuning | ChatGLM 微调](https://github.com/hscspring/hcgf) * [langchain-ChatGLM](https://github.com/imClumsyPanda/langchain-ChatGLM):基于本地知识的 ChatGLM 应用,基于LangChain +* [bibliothecarius](https://github.com/coderabbit214/bibliothecarius):快速构建服务以集成您的本地数据和AI模型,支持ChatGLM等本地化模型接入。 * [闻达](https://github.com/l15y/wenda):大型语言模型调用平台,基于 ChatGLM-6B 实现了类 ChatPDF 功能 以下是部分针对本项目的教程/文档: From ed9631a96be154f689ed8b32870d858cd1673998 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Mon, 3 Apr 2023 15:23:32 +0800 Subject: [PATCH 041/110] Add deploement for ptuning model --- ptuning/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ptuning/README.md b/ptuning/README.md index 6ce6384..4978dc4 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -56,6 +56,9 @@ bash evaluate.sh * Output[微调前]: 类型#裙*版型#显瘦*风格#文艺*风格#简约*图案#印花*图案#撞色*裙下摆#压褶*裙长#连衣裙*裙领型#圆领 1\. 连衣裙:简约风格,裙长为膝盖以上,裙领型为圆领。2\. 裙下摆:压褶设计,使裙摆呈现出流畅的褶皱效果。3\. 裙领型:裙领型为圆领,使穿上连衣裙后更加有型。4\. 版型:采用显瘦设计,让连衣裙看起来更加苗条。5\. 风格:文艺风格,让连衣裙更加有内涵和品味。6\. 图案:印花设计,在连衣裙上印有独特的图案。7\. 撞色:采用撞色设计,让连衣裙在色彩上更加鲜明、富有层次感。 * Output[微调后]: 这是一款文艺范的连衣裙,以印花为元素,采用简约的印花,既能够突出文艺气质,又能够展现简约风。在印花的同时又有领子和裙摆的压褶设计,更加凸显文艺气质。简约而不会过于单调,搭配出街,穿着十分舒适。 +## 模型部署 +将对应的demo或代码中的`THUDM/chatglm-6b`换成经过 P-Tuning 微调之后 checkpoint 的地址(在示例中为 `./output/adgen-chatglm-6b-pt-8-1e-2/checkpoint-3000`)。注意,目前的微调还不支持多轮数据,所以只有对话第一轮的回复是经过微调的。 + ## 使用自己的数据集 修改 `train.sh` 和 `evaluate.sh` 中的 `train_file`、`validation_file`和`test_file`为你自己的 JSON 格式数据集路径,并将 `prompt_column` 和 `response_column` 改为 JSON 文件中输入文本和输出文本对应的 KEY。 From 5de005540878b057be34601084a5803dcec08d01 Mon Sep 17 00:00:00 2001 From: Qingsong Lv Date: Mon, 3 Apr 2023 11:27:28 +0000 Subject: [PATCH 042/110] fix finetune pad bug and add sat readme --- README.md | 1 + ptuning/main.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/README.md b/README.md index 81f3fdc..d75ede6 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,7 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 ## 友情链接 以下是部分基于本仓库开发的开源项目: +* [SwissArmyTransformer](https://github.com/THUDM/SwissArmyTransformer): 一个Transformer统一编程框架,ChatGLM-6B已经在SAT中进行实现并可以进行P-tuning微调。 * [ChatGLM-MNN](https://github.com/wangzhaode/ChatGLM-MNN): 一个基于 MNN 的 ChatGLM-6B C++ 推理实现,支持根据显存大小自动分配计算任务给 GPU 和 CPU * [ChatGLM-Tuning](https://github.com/mymusise/ChatGLM-Tuning): 基于 LoRA 对 ChatGLM-6B 进行微调。类似的项目还包括 [Humanable ChatGLM/GPT Fine-tuning | ChatGLM 微调](https://github.com/hscspring/hcgf) * [langchain-ChatGLM](https://github.com/imClumsyPanda/langchain-ChatGLM):基于本地知识的 ChatGLM 应用,基于LangChain diff --git a/ptuning/main.py b/ptuning/main.py index cd56e43..112c9ca 100644 --- a/ptuning/main.py +++ b/ptuning/main.py @@ -187,6 +187,8 @@ def main(): pad_len = max_seq_length - len(input_ids) input_ids = input_ids + [tokenizer.pad_token_id] * pad_len labels = labels + [tokenizer.pad_token_id] * pad_len + if data_args.ignore_pad_token_for_loss: + labels = [(l if l != tokenizer.pad_token_id else -100) for l in labels] model_inputs["input_ids"].append(input_ids) model_inputs["labels"].append(labels) From 6148d6d6ac41a416846df1ebfa9b1341a1f69859 Mon Sep 17 00:00:00 2001 From: tuteng0915 Date: Mon, 3 Apr 2023 23:11:31 +0800 Subject: [PATCH 043/110] add web_demo3 --- .gitignore | 133 +++++++++++++++++++++++++++++++++++++++++++++++ requirements.txt | 1 + web_demo3.py | 69 ++++++++++++++++++++++++ 3 files changed, 203 insertions(+) create mode 100644 .gitignore create mode 100644 web_demo3.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c3dd476 --- /dev/null +++ b/.gitignore @@ -0,0 +1,133 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST +history/ + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# Mac system file +model/ \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 00707fe..072d12c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,3 +4,4 @@ icetk cpm_kernels torch>=1.10 gradio +mdtex2html \ No newline at end of file diff --git a/web_demo3.py b/web_demo3.py new file mode 100644 index 0000000..d6a62ec --- /dev/null +++ b/web_demo3.py @@ -0,0 +1,69 @@ +from transformers import AutoModel, AutoTokenizer +import gradio as gr +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type +import mdtex2html + +tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) +model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).quantize(8).half().cuda() +model = model.eval() + +# MAX_TURNS = 20 +# MAX_BOXES = MAX_TURNS * 2 + +"""Override Chatbot.postprocess""" +def postprocess(self, y): + if y is None: + return [] + for i, (message, response) in enumerate(y): + y[i] = ( + None if message is None else mdtex2html.convert((message)), + None if response is None else mdtex2html.convert(response), + ) + return y +gr.Chatbot.postprocess = postprocess + + +def predict(input, chatbot, max_length, top_p, temperature, history): + chatbot.append((input, "")) + for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p, + temperature=temperature): + chatbot[-1] = (input, response) + yield chatbot, history + +def reset_user_input(): + return gr.update(value='') + + +def reset_state(): + return [], [] + +with gr.Blocks() as demo: + gr.HTML("""

ChatGLM

""") + + with gr.Row(): + with gr.Column(scale=4): + chatbot = gr.Chatbot() + with gr.Row(): + with gr.Column(scale=12): + user_input = gr.Textbox(show_label=False, placeholder="Input...").style( + container=False) + with gr.Column(min_width=32, scale=1): + submitBtn = gr.Button("Submit", variant="primary") + with gr.Column(scale=1): + emptyBtn = gr.Button("Clear History") + max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True) + top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True) + temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True) + + history = gr.State([]) + + user_input.submit(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history], show_progress=True) + user_input.submit(reset_user_input, [], [user_input]) + + submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history], show_progress=True) + submitBtn.click(reset_user_input, [], [user_input]) + + emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress=True) + + +demo.queue().launch(share=False, inbrowser=True) From ec069419becceac2d69a07149c07b9fc564e19db Mon Sep 17 00:00:00 2001 From: duzx16 Date: Mon, 3 Apr 2023 23:29:04 +0800 Subject: [PATCH 044/110] Add another web demo with Gradio --- web_demo3.py | 40 ++++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/web_demo3.py b/web_demo3.py index d6a62ec..203ba1f 100644 --- a/web_demo3.py +++ b/web_demo3.py @@ -4,22 +4,23 @@ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type import mdtex2html tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) -model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).quantize(8).half().cuda() +model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() model = model.eval() -# MAX_TURNS = 20 -# MAX_BOXES = MAX_TURNS * 2 - """Override Chatbot.postprocess""" + + def postprocess(self, y): - if y is None: - return [] - for i, (message, response) in enumerate(y): - y[i] = ( - None if message is None else mdtex2html.convert((message)), - None if response is None else mdtex2html.convert(response), - ) - return y + if y is None: + return [] + for i, (message, response) in enumerate(y): + y[i] = ( + None if message is None else mdtex2html.convert((message)), + None if response is None else mdtex2html.convert(response), + ) + return y + + gr.Chatbot.postprocess = postprocess @@ -27,9 +28,10 @@ def predict(input, chatbot, max_length, top_p, temperature, history): chatbot.append((input, "")) for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p, temperature=temperature): - chatbot[-1] = (input, response) + chatbot[-1] = (input, response) yield chatbot, history + def reset_user_input(): return gr.update(value='') @@ -37,6 +39,7 @@ def reset_user_input(): def reset_state(): return [], [] + with gr.Blocks() as demo: gr.HTML("""

ChatGLM

""") @@ -54,16 +57,17 @@ with gr.Blocks() as demo: max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True) top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True) temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True) - + history = gr.State([]) - user_input.submit(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history], show_progress=True) + user_input.submit(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history], + show_progress=True) user_input.submit(reset_user_input, [], [user_input]) - submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history], show_progress=True) + submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history], + show_progress=True) submitBtn.click(reset_user_input, [], [user_input]) emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress=True) - -demo.queue().launch(share=False, inbrowser=True) +demo.queue().launch(share=True, inbrowser=True) From 119caa15ef98de6faf3c66e82fa900f9b21b505c Mon Sep 17 00:00:00 2001 From: tuteng0915 Date: Mon, 3 Apr 2023 23:31:30 +0800 Subject: [PATCH 045/110] add parse_text --- web_demo3.py | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/web_demo3.py b/web_demo3.py index d6a62ec..80ffce9 100644 --- a/web_demo3.py +++ b/web_demo3.py @@ -23,11 +23,44 @@ def postprocess(self, y): gr.Chatbot.postprocess = postprocess +def parse_text(text): + """revise from https://github.com/GaiZhenbiao/ChuanhuChatGPT/""" + lines = text.split("\n") + lines = [line for line in lines if line != ""] + count = 0 + for i, line in enumerate(lines): + if "```" in line: + count += 1 + items = line.split('`') + if count % 2 == 1: + lines[i] = f'
'
+            else:
+                lines[i] = f'
' + else: + if i > 0: + if count % 2 == 1: + line = line.replace("`", "\`") + line = line.replace("<", "<") + line = line.replace(">", ">") + line = line.replace(" ", " ") + line = line.replace("*", "*") + line = line.replace("_", "_") + line = line.replace("-", "-") + line = line.replace(".", ".") + line = line.replace("!", "!") + line = line.replace("(", "(") + line = line.replace(")", ")") + line = line.replace("$", "$") + lines[i] = "
"+line + text = "".join(lines) + return text + + def predict(input, chatbot, max_length, top_p, temperature, history): - chatbot.append((input, "")) + chatbot.append((parse_text(input), "")) for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p, temperature=temperature): - chatbot[-1] = (input, response) + chatbot[-1] = (parse_text(input), parse_text(response)) yield chatbot, history def reset_user_input(): From d21f891a76e9df2da7d3e0f6e5c5d28ef1dde337 Mon Sep 17 00:00:00 2001 From: tuteng0915 Date: Mon, 3 Apr 2023 23:36:18 +0800 Subject: [PATCH 046/110] add parse_text --- web_demo3.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web_demo3.py b/web_demo3.py index 80ffce9..ad5ba11 100644 --- a/web_demo3.py +++ b/web_demo3.py @@ -4,7 +4,7 @@ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type import mdtex2html tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) -model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).quantize(8).half().cuda() +model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() model = model.eval() # MAX_TURNS = 20 @@ -24,7 +24,7 @@ gr.Chatbot.postprocess = postprocess def parse_text(text): - """revise from https://github.com/GaiZhenbiao/ChuanhuChatGPT/""" + """copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/""" lines = text.split("\n") lines = [line for line in lines if line != ""] count = 0 From f505f9a5e3b82b7db51f7715d64942cca8b5d7b6 Mon Sep 17 00:00:00 2001 From: Dun Liang Date: Tue, 4 Apr 2023 13:40:21 +0800 Subject: [PATCH 047/110] add jittorLLMs support link --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index d75ede6..6d58ffe 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,7 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 * [langchain-ChatGLM](https://github.com/imClumsyPanda/langchain-ChatGLM):基于本地知识的 ChatGLM 应用,基于LangChain * [bibliothecarius](https://github.com/coderabbit214/bibliothecarius):快速构建服务以集成您的本地数据和AI模型,支持ChatGLM等本地化模型接入。 * [闻达](https://github.com/l15y/wenda):大型语言模型调用平台,基于 ChatGLM-6B 实现了类 ChatPDF 功能 +* [JittorLLMs](https://github.com/Jittor/JittorLLMs)最低3G显存或者没有显卡都可运行 ChatGLM-6B FP16, 支持Linux、windows、Mac部署 以下是部分针对本项目的教程/文档: * [Windows部署文档](https://github.com/ZhangErling/ChatGLM-6B/blob/main/deployment_windows.md) From 5cd99594841f971bbe79556b6597f99877d800f1 Mon Sep 17 00:00:00 2001 From: Dun Liang Date: Tue, 4 Apr 2023 13:41:17 +0800 Subject: [PATCH 048/110] fix typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 6d58ffe..c63913c 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 * [langchain-ChatGLM](https://github.com/imClumsyPanda/langchain-ChatGLM):基于本地知识的 ChatGLM 应用,基于LangChain * [bibliothecarius](https://github.com/coderabbit214/bibliothecarius):快速构建服务以集成您的本地数据和AI模型,支持ChatGLM等本地化模型接入。 * [闻达](https://github.com/l15y/wenda):大型语言模型调用平台,基于 ChatGLM-6B 实现了类 ChatPDF 功能 -* [JittorLLMs](https://github.com/Jittor/JittorLLMs)最低3G显存或者没有显卡都可运行 ChatGLM-6B FP16, 支持Linux、windows、Mac部署 +* [JittorLLMs](https://github.com/Jittor/JittorLLMs):最低3G显存或者没有显卡都可运行 ChatGLM-6B FP16, 支持Linux、windows、Mac部署 以下是部分针对本项目的教程/文档: * [Windows部署文档](https://github.com/ZhangErling/ChatGLM-6B/blob/main/deployment_windows.md) From a9fc0184446fba7f4f27addf519fea0b371df83a Mon Sep 17 00:00:00 2001 From: rainatam Date: Thu, 6 Apr 2023 15:16:36 +0800 Subject: [PATCH 049/110] Update evaluation results and bleu score function --- ptuning/README.md | 39 ++++++++++++++++++++++++++++++++++++++- ptuning/main.py | 4 ++-- 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/ptuning/README.md b/ptuning/README.md index 4978dc4..ca1fc73 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -52,10 +52,46 @@ bash evaluate.sh * Input: 类型#裙\*版型#显瘦\*风格#文艺\*风格#简约\*图案#印花\*图案#撞色\*裙下摆#压褶\*裙长#连衣裙\*裙领型#圆领 * Label: 文艺个性的印花连衣裙,藏青色底蕴,低调又大气,撞色太阳花分布整个裙身,绚丽而美好,带来时尚减龄的气质。基础款的舒适圆领,简约不失大方,勾勒精致脸庞。领后是一粒包布扣固定,穿脱十分方便。前片立体的打褶设计,搭配后片压褶的做工,增添层次和空间感,显瘦又有型。 - * Output[微调前]: 类型#裙*版型#显瘦*风格#文艺*风格#简约*图案#印花*图案#撞色*裙下摆#压褶*裙长#连衣裙*裙领型#圆领 1\. 连衣裙:简约风格,裙长为膝盖以上,裙领型为圆领。2\. 裙下摆:压褶设计,使裙摆呈现出流畅的褶皱效果。3\. 裙领型:裙领型为圆领,使穿上连衣裙后更加有型。4\. 版型:采用显瘦设计,让连衣裙看起来更加苗条。5\. 风格:文艺风格,让连衣裙更加有内涵和品味。6\. 图案:印花设计,在连衣裙上印有独特的图案。7\. 撞色:采用撞色设计,让连衣裙在色彩上更加鲜明、富有层次感。 * Output[微调后]: 这是一款文艺范的连衣裙,以印花为元素,采用简约的印花,既能够突出文艺气质,又能够展现简约风。在印花的同时又有领子和裙摆的压褶设计,更加凸显文艺气质。简约而不会过于单调,搭配出街,穿着十分舒适。 +### 评估结果 + +| | P-tuning v2 | LoRA | +| ------- | ----------- | ----- | +| BLEU-4 | 7.71 | 6.13 | +| Rouge-1 | 31.35 | 28.36 | +| Rouge-2 | 7.19 | 4.38 | +| Rouge-l | 25.17 | 17.54 | + +#### 实验设置 + + ``` +max_source_length=64 +max_target_length=64 +per_device_train_batch_size=1 +gradient_accumulation_steps=16 +max_steps=3000 + ``` + +##### P-tuning v2 + +``` +pre_seq_len=128 +learning_rate=2e-2 +quantization_bit=4 +``` + +##### LoRA + +``` +learning_rate=5e-4 +``` + +实现采用的是 [simple_thu_chatglm6b](https://github.com/yuanzhoulvpi2017/zero_nlp/tree/main/simple_thu_chatglm6b) + + + ## 模型部署 将对应的demo或代码中的`THUDM/chatglm-6b`换成经过 P-Tuning 微调之后 checkpoint 的地址(在示例中为 `./output/adgen-chatglm-6b-pt-8-1e-2/checkpoint-3000`)。注意,目前的微调还不支持多轮数据,所以只有对话第一轮的回复是经过微调的。 @@ -77,3 +113,4 @@ bash evaluate.sh year={2022} } ``` + diff --git a/ptuning/main.py b/ptuning/main.py index 112c9ca..fbf3924 100644 --- a/ptuning/main.py +++ b/ptuning/main.py @@ -27,7 +27,7 @@ import numpy as np from datasets import load_dataset import jieba from rouge_chinese import Rouge -from nltk.translate.bleu_score import sentence_bleu +from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction import transformers from transformers import ( @@ -293,7 +293,7 @@ def main(): for k, v in result.items(): score_dict[k].append(round(v["f"] * 100, 4)) - bleu_score = sentence_bleu([list(label)], list(pred)) + bleu_score = sentence_bleu([list(label)], list(pred), smoothing_function=SmoothingFunction().method3) score_dict["bleu-4"].append(round(bleu_score * 100, 4)) for k, v in score_dict.items(): From cc4be399ff1a88f5459b3b8793b83c4372409517 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Thu, 6 Apr 2023 16:58:40 +0800 Subject: [PATCH 050/110] Update web demo3 --- web_demo3.py | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/web_demo3.py b/web_demo3.py index 7c0777c..0e39968 100644 --- a/web_demo3.py +++ b/web_demo3.py @@ -1,10 +1,9 @@ from transformers import AutoModel, AutoTokenizer import gradio as gr -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type import mdtex2html -tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) -model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() +tokenizer = AutoTokenizer.from_pretrained("/mnt/vepfs/workspace/zxdu/chatglm_6b", trust_remote_code=True) +model = AutoModel.from_pretrained("/mnt/vepfs/workspace/zxdu/chatglm_6b", trust_remote_code=True).half().cuda() model = model.eval() """Override Chatbot.postprocess""" @@ -77,15 +76,14 @@ def reset_state(): with gr.Blocks() as demo: gr.HTML("""

ChatGLM

""") + chatbot = gr.Chatbot() with gr.Row(): with gr.Column(scale=4): - chatbot = gr.Chatbot() - with gr.Row(): - with gr.Column(scale=12): - user_input = gr.Textbox(show_label=False, placeholder="Input...").style( - container=False) - with gr.Column(min_width=32, scale=1): - submitBtn = gr.Button("Submit", variant="primary") + with gr.Column(scale=12): + user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style( + container=False) + with gr.Column(min_width=32, scale=1): + submitBtn = gr.Button("Submit", variant="primary") with gr.Column(scale=1): emptyBtn = gr.Button("Clear History") max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True) @@ -94,10 +92,6 @@ with gr.Blocks() as demo: history = gr.State([]) - user_input.submit(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history], - show_progress=True) - user_input.submit(reset_user_input, [], [user_input]) - submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history], show_progress=True) submitBtn.click(reset_user_input, [], [user_input]) From 40d83f32feb6fcbec54ab8c8479a4830378edb3e Mon Sep 17 00:00:00 2001 From: duzx16 Date: Thu, 6 Apr 2023 17:00:51 +0800 Subject: [PATCH 051/110] Update model path --- web_demo3.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web_demo3.py b/web_demo3.py index 0e39968..df7f983 100644 --- a/web_demo3.py +++ b/web_demo3.py @@ -2,8 +2,8 @@ from transformers import AutoModel, AutoTokenizer import gradio as gr import mdtex2html -tokenizer = AutoTokenizer.from_pretrained("/mnt/vepfs/workspace/zxdu/chatglm_6b", trust_remote_code=True) -model = AutoModel.from_pretrained("/mnt/vepfs/workspace/zxdu/chatglm_6b", trust_remote_code=True).half().cuda() +tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) +model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() model = model.eval() """Override Chatbot.postprocess""" From 28335463394983ddfc7b554f8fd3ee894a7b98b5 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Thu, 6 Apr 2023 17:01:24 +0800 Subject: [PATCH 052/110] Use chatbot web demo --- web_demo.py | 104 +++++++++++++++++++++++++++++++++++++----------- web_demo3.py | 101 ---------------------------------------------- web_demo_old.py | 45 +++++++++++++++++++++ 3 files changed, 125 insertions(+), 125 deletions(-) delete mode 100644 web_demo3.py create mode 100644 web_demo_old.py diff --git a/web_demo.py b/web_demo.py index 88a6dc8..df7f983 100644 --- a/web_demo.py +++ b/web_demo.py @@ -1,45 +1,101 @@ from transformers import AutoModel, AutoTokenizer import gradio as gr +import mdtex2html tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() model = model.eval() -MAX_TURNS = 20 -MAX_BOXES = MAX_TURNS * 2 +"""Override Chatbot.postprocess""" -def predict(input, max_length, top_p, temperature, history=None): - if history is None: - history = [] +def postprocess(self, y): + if y is None: + return [] + for i, (message, response) in enumerate(y): + y[i] = ( + None if message is None else mdtex2html.convert((message)), + None if response is None else mdtex2html.convert(response), + ) + return y + + +gr.Chatbot.postprocess = postprocess + + +def parse_text(text): + """copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/""" + lines = text.split("\n") + lines = [line for line in lines if line != ""] + count = 0 + for i, line in enumerate(lines): + if "```" in line: + count += 1 + items = line.split('`') + if count % 2 == 1: + lines[i] = f'
'
+            else:
+                lines[i] = f'
' + else: + if i > 0: + if count % 2 == 1: + line = line.replace("`", "\`") + line = line.replace("<", "<") + line = line.replace(">", ">") + line = line.replace(" ", " ") + line = line.replace("*", "*") + line = line.replace("_", "_") + line = line.replace("-", "-") + line = line.replace(".", ".") + line = line.replace("!", "!") + line = line.replace("(", "(") + line = line.replace(")", ")") + line = line.replace("$", "$") + lines[i] = "
"+line + text = "".join(lines) + return text + + +def predict(input, chatbot, max_length, top_p, temperature, history): + chatbot.append((parse_text(input), "")) for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p, temperature=temperature): - updates = [] - for query, response in history: - updates.append(gr.update(visible=True, value="用户:" + query)) - updates.append(gr.update(visible=True, value="ChatGLM-6B:" + response)) - if len(updates) < MAX_BOXES: - updates = updates + [gr.Textbox.update(visible=False)] * (MAX_BOXES - len(updates)) - yield [history] + updates + chatbot[-1] = (parse_text(input), parse_text(response)) + + yield chatbot, history + + +def reset_user_input(): + return gr.update(value='') + + +def reset_state(): + return [], [] with gr.Blocks() as demo: - state = gr.State([]) - text_boxes = [] - for i in range(MAX_BOXES): - if i % 2 == 0: - text_boxes.append(gr.Markdown(visible=False, label="提问:")) - else: - text_boxes.append(gr.Markdown(visible=False, label="回复:")) + gr.HTML("""

ChatGLM

""") + chatbot = gr.Chatbot() with gr.Row(): with gr.Column(scale=4): - txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter", lines=11).style( - container=False) + with gr.Column(scale=12): + user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style( + container=False) + with gr.Column(min_width=32, scale=1): + submitBtn = gr.Button("Submit", variant="primary") with gr.Column(scale=1): + emptyBtn = gr.Button("Clear History") max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True) top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True) temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True) - button = gr.Button("Generate") - button.click(predict, [txt, max_length, top_p, temperature, state], [state] + text_boxes) -demo.queue().launch(share=False, inbrowser=True) + + history = gr.State([]) + + submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history], + show_progress=True) + submitBtn.click(reset_user_input, [], [user_input]) + + emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress=True) + +demo.queue().launch(share=True, inbrowser=True) diff --git a/web_demo3.py b/web_demo3.py deleted file mode 100644 index df7f983..0000000 --- a/web_demo3.py +++ /dev/null @@ -1,101 +0,0 @@ -from transformers import AutoModel, AutoTokenizer -import gradio as gr -import mdtex2html - -tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) -model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() -model = model.eval() - -"""Override Chatbot.postprocess""" - - -def postprocess(self, y): - if y is None: - return [] - for i, (message, response) in enumerate(y): - y[i] = ( - None if message is None else mdtex2html.convert((message)), - None if response is None else mdtex2html.convert(response), - ) - return y - - -gr.Chatbot.postprocess = postprocess - - -def parse_text(text): - """copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/""" - lines = text.split("\n") - lines = [line for line in lines if line != ""] - count = 0 - for i, line in enumerate(lines): - if "```" in line: - count += 1 - items = line.split('`') - if count % 2 == 1: - lines[i] = f'
'
-            else:
-                lines[i] = f'
' - else: - if i > 0: - if count % 2 == 1: - line = line.replace("`", "\`") - line = line.replace("<", "<") - line = line.replace(">", ">") - line = line.replace(" ", " ") - line = line.replace("*", "*") - line = line.replace("_", "_") - line = line.replace("-", "-") - line = line.replace(".", ".") - line = line.replace("!", "!") - line = line.replace("(", "(") - line = line.replace(")", ")") - line = line.replace("$", "$") - lines[i] = "
"+line - text = "".join(lines) - return text - - -def predict(input, chatbot, max_length, top_p, temperature, history): - chatbot.append((parse_text(input), "")) - for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p, - temperature=temperature): - chatbot[-1] = (parse_text(input), parse_text(response)) - - yield chatbot, history - - -def reset_user_input(): - return gr.update(value='') - - -def reset_state(): - return [], [] - - -with gr.Blocks() as demo: - gr.HTML("""

ChatGLM

""") - - chatbot = gr.Chatbot() - with gr.Row(): - with gr.Column(scale=4): - with gr.Column(scale=12): - user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style( - container=False) - with gr.Column(min_width=32, scale=1): - submitBtn = gr.Button("Submit", variant="primary") - with gr.Column(scale=1): - emptyBtn = gr.Button("Clear History") - max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True) - top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True) - temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True) - - history = gr.State([]) - - submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history], - show_progress=True) - submitBtn.click(reset_user_input, [], [user_input]) - - emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress=True) - -demo.queue().launch(share=True, inbrowser=True) diff --git a/web_demo_old.py b/web_demo_old.py new file mode 100644 index 0000000..88a6dc8 --- /dev/null +++ b/web_demo_old.py @@ -0,0 +1,45 @@ +from transformers import AutoModel, AutoTokenizer +import gradio as gr + +tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) +model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() +model = model.eval() + +MAX_TURNS = 20 +MAX_BOXES = MAX_TURNS * 2 + + +def predict(input, max_length, top_p, temperature, history=None): + if history is None: + history = [] + for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p, + temperature=temperature): + updates = [] + for query, response in history: + updates.append(gr.update(visible=True, value="用户:" + query)) + updates.append(gr.update(visible=True, value="ChatGLM-6B:" + response)) + if len(updates) < MAX_BOXES: + updates = updates + [gr.Textbox.update(visible=False)] * (MAX_BOXES - len(updates)) + yield [history] + updates + + +with gr.Blocks() as demo: + state = gr.State([]) + text_boxes = [] + for i in range(MAX_BOXES): + if i % 2 == 0: + text_boxes.append(gr.Markdown(visible=False, label="提问:")) + else: + text_boxes.append(gr.Markdown(visible=False, label="回复:")) + + with gr.Row(): + with gr.Column(scale=4): + txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter", lines=11).style( + container=False) + with gr.Column(scale=1): + max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True) + top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True) + temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True) + button = gr.Button("Generate") + button.click(predict, [txt, max_length, top_p, temperature, state], [state] + text_boxes) +demo.queue().launch(share=False, inbrowser=True) From 7131d29f2d49ed984dba8aeeb88b5a16a83b07ab Mon Sep 17 00:00:00 2001 From: duzx16 Date: Thu, 6 Apr 2023 17:51:20 +0800 Subject: [PATCH 053/110] Add English readme --- README_en.md | 4 ++++ ptuning/README.md | 2 ++ 2 files changed, 6 insertions(+) diff --git a/README_en.md b/README_en.md index d5c05bb..da2b8dc 100644 --- a/README_en.md +++ b/README_en.md @@ -9,6 +9,8 @@ ChatGLM-6B uses technology similar to ChatGPT, optimized for Chinese QA and dial Try the [online demo](https://huggingface.co/spaces/ysharma/ChatGLM-6b_Gradio_Streaming) on Huggingface Spaces. ## Update +**[2023/03/31]** Added a parameter-efficient tuning implementation based on [P-Tuning-v2](https://github.com/THUDM/P-tuning-v2). The minimum INT4 quantization level only needs 7GB GPU memory is enough for model tuning. See [Parameter-efficient tuning method](ptuning/README.md) for details. + **[2023/03/23]** Add API deployment, thanks to [@LemonQu-GIT](https://github.com/LemonQu-GIT). Add embedding-quantized model [ChatGLM-6B-INT4-QE](https://huggingface.co/THUDM/chatglm-6b-int4-qe). Add support for GPU inference on Mac with Apple Silicon. **[2023/03/19]** Add streaming output function `stream_chat`, already applied in web and CLI demo. Fix Chinese punctuations in output. Add quantized model [ChatGLM-6B-INT4](https://huggingface.co/THUDM/chatglm-6b-int4). @@ -168,6 +170,8 @@ model = AutoModel.from_pretrained("your local path", trust_remote_code=True).hal ``` Then you can use GPU-accelerated model inference on Mac. +## Parameter-efficient Tuning +Parameter-efficient tuning based on [P-tuning v2](https://github.com/THUDM/P-tuning-v2). See [ptuning/README.md](ptuning/README.md) for details on how to use it. ## ChatGLM-6B Examples diff --git a/ptuning/README.md b/ptuning/README.md index ca1fc73..11ee326 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -3,6 +3,8 @@ 下面以 [ADGEN](https://aclanthology.org/D19-1321.pdf) (广告生成) 数据集为例介绍代码的使用方法。 +*Read this in [English](README_en.md).* + ## 软件依赖 运行微调需要4.27.1版本的`transformers`。除 ChatGLM-6B 的依赖之外,还需要按照以下依赖 ``` From 66e641d572d612e905ca5e16b1ebdab029eb6910 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Thu, 6 Apr 2023 17:54:46 +0800 Subject: [PATCH 054/110] Add English readme --- .idea/ChatGLM-6B.iml | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .idea/ChatGLM-6B.iml diff --git a/.idea/ChatGLM-6B.iml b/.idea/ChatGLM-6B.iml new file mode 100644 index 0000000..ec63674 --- /dev/null +++ b/.idea/ChatGLM-6B.iml @@ -0,0 +1,7 @@ + + + + + \ No newline at end of file From e79e4f2859321406272c322ea9909fb5395f285d Mon Sep 17 00:00:00 2001 From: duzx16 Date: Thu, 6 Apr 2023 17:55:27 +0800 Subject: [PATCH 055/110] Revert "Add English readme" This reverts commit 66e641d572d612e905ca5e16b1ebdab029eb6910. --- .idea/ChatGLM-6B.iml | 7 ------- 1 file changed, 7 deletions(-) delete mode 100644 .idea/ChatGLM-6B.iml diff --git a/.idea/ChatGLM-6B.iml b/.idea/ChatGLM-6B.iml deleted file mode 100644 index ec63674..0000000 --- a/.idea/ChatGLM-6B.iml +++ /dev/null @@ -1,7 +0,0 @@ - - - - - \ No newline at end of file From 6792ca6805dcae1fbf83c2eb33a7ffc3a96b243a Mon Sep 17 00:00:00 2001 From: duzx16 Date: Thu, 6 Apr 2023 17:55:31 +0800 Subject: [PATCH 056/110] Add English readme --- ptuning/README_en.md | 115 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 115 insertions(+) create mode 100644 ptuning/README_en.md diff --git a/ptuning/README_en.md b/ptuning/README_en.md new file mode 100644 index 0000000..9282da3 --- /dev/null +++ b/ptuning/README_en.md @@ -0,0 +1,115 @@ +# ChatGLM-6B-PT +This repository implements tuning of the ChatGLM-6B model based on [P-Tuning v2](https://github.com/THUDM/P-tuning-v2). P-Tuning v2 reduces the amount of parameters that need to be optimized to 0.1% of the full fine-tuning, and then through model quantization, Gradient Checkpoint and other methods, it only needs a minimum of 7GB of video memory to run. + +The following uses the [ADGEN](https://aclanthology.org/D19-1321.pdf) (advertising generation) dataset as an example to introduce how to use the code. + +## Software dependencies +Running p-tuning requires version 4.27.1 of `transformers`. In addition to the dependencies of ChatGLM-6B, the following dependencies are required +``` +pip install rouge_chinese nltk jieba datasets +``` +## Instructions + +### Download the dataset +The task of the ADGEN dataset is to generate an advertisement word (summary) based on the input (content). + +```json +{ + "content": "类型#上衣*版型#宽松*版型#显瘦*图案#线条*衣样式#衬衫*衣袖型#泡泡袖*衣款式#抽绳", + "summary": "这件衬衫的款式非常的宽松,利落的线条可以很好的隐藏身材上的小缺点,穿在身上有着很好的显瘦效果。领口装饰了一个可爱的抽绳,漂亮的绳结展现出了十足的个性,配合时尚的泡泡袖型,尽显女性甜美可爱的气息。" +} +``` + +From [Google Drive](https://drive.google.com/file/d/13_vf0xRTQsyneRKdD1bZIr93vBGOczrk/view?usp=sharing) or [Tsinghua Cloud](https://cloud.tsinghua.edu.cn/f/b3f119a008264b1cabd1/?dl=1) Download the processed ADGEN dataset, and put the decompressed `AdvertiseGen` directory into this directory. + +### Training +Run the following commands for training: +```shell +bash train.sh +``` +`PRE_SEQ_LEN` and `LR` in `train.sh` are soft prompt length and training learning rate respectively, which can be adjusted to achieve the best results. The P-Tuning-v2 method will freeze all model parameters, and the quantization level of the original model can be adjusted by adjusting `quantization_bit`. If this option is not added, it will be loaded with FP16 precision. + +Under the default configuration of `per_device_train_batch_size=1`, `gradient_accumulation_steps=16`, the model parameters of INT4 are frozen, and a training iteration will perform 16 cumulative forward and backward propagations with a batch size of 1, which is equivalent to the total batch size of 16, and only 6.7G GPU memory is required at this time with `quantization_bit=4`. If you want to improve the training efficiency under the same batch size, you can increase the value of `per_device_train_batch_size` while keeping the product of the two unchanged, but it will also bring more GPU memory consumption, please adjust it according to the actual situation. + +### Inference + +Change `CHECKPOINT` in `evaluate.sh` to the checkpoint name saved during training, and run the following commands for model inference and evaluation: +```shell +bash evaluate.sh +``` + +The evaluation indicators are Chinese Rouge score and BLEU-4. The generated results are saved in +`./output/adgen-chatglm-6b-pt-8-1e-2/generated_predictions.txt`. + +### Example +#### Example 1 +* Input: 类型#上衣\*材质#牛仔布\*颜色#白色\*风格#简约\*图案#刺绣\*衣样式#外套\*衣款式#破洞 +* Label: 简约而不简单的牛仔外套,白色的衣身十分百搭。衣身多处有做旧破洞设计,打破单调乏味,增加一丝造型看点。衣身后背处有趣味刺绣装饰,丰富层次感,彰显别样时尚。 +* Output[微调前]: 这件上衣的材质是牛仔布,颜色是白色,风格是简约,图案是刺绣,衣样式是外套,衣款式是破洞。 +* Output[微调后]: 这是一款简约的牛仔外套,破洞设计,将牛仔布破洞,带来一种随意与个性。破洞的牛仔外套,展现出时尚气息,带来一种休闲感。同时,刺绣图案,让整件外套更加立体。 + +#### Example 2 + +* Input: 类型#裙\*版型#显瘦\*风格#文艺\*风格#简约\*图案#印花\*图案#撞色\*裙下摆#压褶\*裙长#连衣裙\*裙领型#圆领 +* Label: 文艺个性的印花连衣裙,藏青色底蕴,低调又大气,撞色太阳花分布整个裙身,绚丽而美好,带来时尚减龄的气质。基础款的舒适圆领,简约不失大方,勾勒精致脸庞。领后是一粒包布扣固定,穿脱十分方便。前片立体的打褶设计,搭配后片压褶的做工,增添层次和空间感,显瘦又有型。 +* Output[微调前]: 类型#裙*版型#显瘦*风格#文艺*风格#简约*图案#印花*图案#撞色*裙下摆#压褶*裙长#连衣裙*裙领型#圆领 1\. 连衣裙:简约风格,裙长为膝盖以上,裙领型为圆领。2\. 裙下摆:压褶设计,使裙摆呈现出流畅的褶皱效果。3\. 裙领型:裙领型为圆领,使穿上连衣裙后更加有型。4\. 版型:采用显瘦设计,让连衣裙看起来更加苗条。5\. 风格:文艺风格,让连衣裙更加有内涵和品味。6\. 图案:印花设计,在连衣裙上印有独特的图案。7\. 撞色:采用撞色设计,让连衣裙在色彩上更加鲜明、富有层次感。 +* Output[微调后]: 这是一款文艺范的连衣裙,以印花为元素,采用简约的印花,既能够突出文艺气质,又能够展现简约风。在印花的同时又有领子和裙摆的压褶设计,更加凸显文艺气质。简约而不会过于单调,搭配出街,穿着十分舒适。 + +### evaluation result + +| | P-tuning v2 | LoRA | +| ------- | ----------- | ----- | +| BLEU-4 | 7.71 | 6.13 | +| Rouge-1 | 31.35 | 28.36 | +| Rouge-2 | 7.19 | 4.38 | +| Rouge-l | 25.17 | 17.54 | + +#### Experiment Settings + + ``` +max_source_length=64 +max_target_length=64 +per_device_train_batch_size=1 +gradient_accumulation_steps=16 +max_steps=3000 + ``` + +##### P-tuning v2 + +``` +pre_seq_len=128 +learning_rate=2e-2 +quantization_bit=4 +``` + +##### LoRA + +``` +learning_rate=5e-4 +``` + +The implementation uses [simple_thu_chatglm6b](https://github.com/yuanzhoulvpi2017/zero_nlp/tree/main/simple_thu_chatglm6b) + + + +## Model Deployment +Replace `THUDM/chatglm-6b` in the corresponding demo or code with the path of the checkpoint after P-Tuning(in the example, `./output/adgen-chatglm-6b-pt-8-1e-2/ checkpoint-3000`). Note that the current fine-tuning does not support multiple rounds of data, so only the responses from the first round of the conversation are fine-tuned. + +## Use your own dataset +Modify `train_file`, `validation_file` and `test_file` in `train.sh` and `evaluate.sh` to your own JSON format dataset paths, and change `prompt_column` and `response_column` to the keys in the JSON file corresponding to input text and output text. + +## TODO +* [ ] Support for chat data +* [ ] Support for full finetuning + +## quoting + +``` +@inproceedings{liu2022p, + title={P-tuning: Prompt tuning can be comparable to fine-tuning across scales and tasks}, + author={Liu, Xiao and Ji, Kaixuan and Fu, Yicheng and Tam, Weng and Du, Zhengxiao and Yang, Zhilin and Tang, Jie}, + booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)}, + pages={61--68}, + year={2022} +} +``` \ No newline at end of file From 8a809d4ab712eb61c1ae070452b61f7ab8e4164e Mon Sep 17 00:00:00 2001 From: duzx16 Date: Thu, 6 Apr 2023 19:28:07 +0800 Subject: [PATCH 057/110] Drop icetk dependency --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 072d12c..4788707 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,5 @@ protobuf>=3.19.5,<3.20.1 transformers==4.27.1 -icetk cpm_kernels torch>=1.10 gradio From 5865924cc61b780d4fdc6c8ec499b38ce87f9280 Mon Sep 17 00:00:00 2001 From: rainatam Date: Thu, 6 Apr 2023 20:21:29 +0800 Subject: [PATCH 058/110] Add training for chat data --- ptuning/README.md | 69 ++++++++++++++++++++++++++++++++++++++----- ptuning/arguments.py | 4 +++ ptuning/main.py | 26 ++++++++++++++-- ptuning/train_chat.sh | 27 +++++++++++++++++ 4 files changed, 117 insertions(+), 9 deletions(-) create mode 100644 ptuning/train_chat.sh diff --git a/ptuning/README.md b/ptuning/README.md index ca1fc73..c9497ad 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -57,12 +57,15 @@ bash evaluate.sh ### 评估结果 -| | P-tuning v2 | LoRA | -| ------- | ----------- | ----- | -| BLEU-4 | 7.71 | 6.13 | -| Rouge-1 | 31.35 | 28.36 | -| Rouge-2 | 7.19 | 4.38 | -| Rouge-l | 25.17 | 17.54 | +| | P-tuning v2 | LoRA | +| ------------- | ----------- | ----- | +| BLEU-4 | 7.78 | 6.13 | +| Rouge-1 | 31.34 | 28.36 | +| Rouge-2 | 7.34 | 4.38 | +| Rouge-l | 25.26 | 17.54 | +| Training Loss | 3.8016 | 3.36 | + + #### 实验设置 @@ -98,8 +101,60 @@ learning_rate=5e-4 ## 使用自己的数据集 修改 `train.sh` 和 `evaluate.sh` 中的 `train_file`、`validation_file`和`test_file`为你自己的 JSON 格式数据集路径,并将 `prompt_column` 和 `response_column` 改为 JSON 文件中输入文本和输出文本对应的 KEY。 +## 对话数据集 + +如需要使用多轮对话数据对模型进行微调,可以提供聊天历史,例如 + +```json +{ + "prompt": "是的。上下水管都好的", + "response": "那就要检查线路了,一般风扇继电器是由电脑控制吸合的,如果电路存在断路,或者电脑坏了的话会出现继电器不吸合的情况!", + "history": [ + [ + "长城h3风扇不转。继电器好的。保险丝好的传感器新的风扇也新的这是为什么。就是继电器缺一个信号线", + "用电脑能读数据流吗?水温多少" + ], + [ + "95", + "上下水管温差怎么样啊?空气是不是都排干净了呢?" + ] + ] +} +``` + +训练时需要指定 `--history_column` 为数据中聊天历史的 key(在此例子中是 `history`),将自动把聊天历史拼接,例如: + +- Input + + ``` + [Round 0] + 问:长城h3风扇不转。继电器好的。保险丝好的传感器新的风扇也新的这是为什么。就是继电器缺一个信号线 + 答:用电脑能读数据流吗?水温多少 + [Round 1] + 问:95 + 答:上下水管温差怎么样啊?空气是不是都排干净了呢? + [Round 2] + 问:是的。上下水管都好的 + ``` + +- Label + + ``` + 那就要检查线路了,一般风扇继电器是由电脑控制吸合的,如果电路存在断路,或者电脑坏了的话会出现继电器不吸合的情况! + ``` + +要注意超过输入长度 `max_source_length` 的内容会被截。 + +可以参考以下指令: + +```shell +bash train_chat.sh +``` + + + ## TODO -* [ ] Support for chat data +* [x] Support for chat data * [ ] Support for full finetuning ## 引用 diff --git a/ptuning/arguments.py b/ptuning/arguments.py index 95d766f..f9310da 100644 --- a/ptuning/arguments.py +++ b/ptuning/arguments.py @@ -80,6 +80,10 @@ class DataTrainingArguments: default=None, metadata={"help": "The name of the column in the datasets containing the summaries (for summarization)."}, ) + history_column: Optional[str] = field( + default=None, + metadata={"help": "The name of the column in the datasets containing the history of chat."}, + ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."} ) diff --git a/ptuning/main.py b/ptuning/main.py index fbf3924..a7837ac 100644 --- a/ptuning/main.py +++ b/ptuning/main.py @@ -135,6 +135,7 @@ def main(): # Get the column names for input/target. prompt_column = data_args.prompt_column response_column = data_args.response_column + history_column = data_args.history_column # Temporarily set max_target_length for training. max_target_length = data_args.max_target_length @@ -143,7 +144,16 @@ def main(): inputs, targets = [], [] for i in range(len(examples[prompt_column])): if examples[prompt_column][i] and examples[response_column][i]: - inputs.append(examples[prompt_column][i]) + query = examples[prompt_column][i] + if history_column is None or len(examples[history_column][i]) == 0: + prompt = query + else: + prompt = "" + history = examples[history_column][i] + for i, (old_query, response) in enumerate(history): + prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response) + prompt += "[Round {}]\n问:{}\n答:".format(len(history), query) + inputs.append(prompt) targets.append(examples[response_column][i]) inputs = [prefix + inp for inp in inputs] @@ -167,7 +177,17 @@ def main(): } for i in range(len(examples[prompt_column])): if examples[prompt_column][i] and examples[response_column][i]: - prompt, answer = examples[prompt_column][i], examples[response_column][i] + query, answer = examples[prompt_column][i], examples[response_column][i] + + if history_column is None: + prompt = query + else: + prompt = "" + history = examples[history_column][i] + for i, (old_query, response) in enumerate(history): + prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response) + prompt += "[Round {}]\n问:{}\n答:".format(len(history), query) + prompt = prefix + prompt a_ids = tokenizer.encode(text=prompt, add_special_tokens=False) b_ids = tokenizer.encode(text=answer, add_special_tokens=False) @@ -218,6 +238,8 @@ def main(): desc="Running tokenizer on train dataset", ) print_dataset_example(train_dataset[0]) + print_dataset_example(train_dataset[2]) + exit() if training_args.do_eval: max_target_length = data_args.val_max_target_length diff --git a/ptuning/train_chat.sh b/ptuning/train_chat.sh new file mode 100644 index 0000000..b0f5cdc --- /dev/null +++ b/ptuning/train_chat.sh @@ -0,0 +1,27 @@ +PRE_SEQ_LEN=8 +LR=1e-2 + +CUDA_VISIBLE_DEVICES=0 python3 main.py \ + --do_train \ + --train_file $CHAT_TRAIN_DATA \ + --validation_file $CHAT_VAL_DATA \ + --prompt_column prompt \ + --response_column response \ + --history_column history \ + --overwrite_cache \ + --model_name_or_path THUDM/chatglm-6b \ + --output_dir $CHECKPOINT_NAME \ + --overwrite_output_dir \ + --max_source_length 256 \ + --max_target_length 256 \ + --per_device_train_batch_size 1 \ + --per_device_eval_batch_size 1 \ + --gradient_accumulation_steps 16 \ + --predict_with_generate \ + --max_steps 3000 \ + --logging_steps 10 \ + --save_steps 1000 \ + --learning_rate $LR \ + --pre_seq_len $PRE_SEQ_LEN \ + --quantization_bit 4 + From 1cbe2d19814332a8a75b821f7ad4b58e21a4b115 Mon Sep 17 00:00:00 2001 From: rainatam Date: Thu, 6 Apr 2023 20:22:56 +0800 Subject: [PATCH 059/110] Remove logging --- ptuning/main.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/ptuning/main.py b/ptuning/main.py index a7837ac..1078f9e 100644 --- a/ptuning/main.py +++ b/ptuning/main.py @@ -238,8 +238,6 @@ def main(): desc="Running tokenizer on train dataset", ) print_dataset_example(train_dataset[0]) - print_dataset_example(train_dataset[2]) - exit() if training_args.do_eval: max_target_length = data_args.val_max_target_length From ed79244725e2da2e3e6bb6ee5ef5e203898dddaf Mon Sep 17 00:00:00 2001 From: rainatam Date: Thu, 6 Apr 2023 20:46:26 +0800 Subject: [PATCH 060/110] Update README --- ptuning/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ptuning/README.md b/ptuning/README.md index 10a909d..522735e 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -61,10 +61,10 @@ bash evaluate.sh | | P-tuning v2 | LoRA | | ------------- | ----------- | ----- | -| BLEU-4 | 7.78 | 6.13 | -| Rouge-1 | 31.34 | 28.36 | -| Rouge-2 | 7.34 | 4.38 | -| Rouge-l | 25.26 | 17.54 | +| BLEU-4 | 7.78 | 6.25 | +| Rouge-1 | 31.34 | 28.58 | +| Rouge-2 | 7.34 | 4.42 | +| Rouge-l | 25.26 | 17.56 | | Training Loss | 3.8016 | 3.36 | From a1ecafd91f6fc9b8762846de79ee04454579ce4c Mon Sep 17 00:00:00 2001 From: rainatam Date: Thu, 6 Apr 2023 20:47:32 +0800 Subject: [PATCH 061/110] Update README --- ptuning/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ptuning/README.md b/ptuning/README.md index 522735e..bb3a222 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -65,7 +65,7 @@ bash evaluate.sh | Rouge-1 | 31.34 | 28.58 | | Rouge-2 | 7.34 | 4.42 | | Rouge-l | 25.26 | 17.56 | -| Training Loss | 3.8016 | 3.36 | +| Training Loss | 3.80 | 3.36 | From 0cf3d08841b55e1d60ad91cf1283bca2321c6313 Mon Sep 17 00:00:00 2001 From: rainatam Date: Thu, 6 Apr 2023 22:30:30 +0800 Subject: [PATCH 062/110] Update README --- ptuning/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ptuning/README.md b/ptuning/README.md index bb3a222..753d895 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -137,6 +137,7 @@ learning_rate=5e-4 答:上下水管温差怎么样啊?空气是不是都排干净了呢? [Round 2] 问:是的。上下水管都好的 + 答: ``` - Label From ea682a6f51a43381578d08f0a4cea995c4135cec Mon Sep 17 00:00:00 2001 From: duzx16 Date: Thu, 6 Apr 2023 22:42:20 +0800 Subject: [PATCH 063/110] Update default hyperparameters Remove hardcode token id --- ptuning/evaluate.sh | 4 ++-- ptuning/main.py | 6 +++--- ptuning/train.sh | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ptuning/evaluate.sh b/ptuning/evaluate.sh index 1217ceb..120a8c2 100644 --- a/ptuning/evaluate.sh +++ b/ptuning/evaluate.sh @@ -1,5 +1,5 @@ -PRE_SEQ_LEN=8 -CHECKPOINT=adgen-chatglm-6b-pt-8-1e-2 +PRE_SEQ_LEN=128 +CHECKPOINT=adgen-chatglm-6b-pt-128-2e-2 STEP=3000 CUDA_VISIBLE_DEVICES=0 python3 main.py \ diff --git a/ptuning/main.py b/ptuning/main.py index 1078f9e..e34e95e 100644 --- a/ptuning/main.py +++ b/ptuning/main.py @@ -167,7 +167,7 @@ def main(): model_inputs["labels"] = labels["input_ids"] return model_inputs - + def preprocess_function_train(examples): max_seq_length = data_args.max_source_length + data_args.max_target_length @@ -198,9 +198,9 @@ def main(): if len(b_ids) > data_args.max_target_length - 2: b_ids = b_ids[: data_args.max_target_length - 2] - input_ids = a_ids + [150001, 150004] + b_ids + [150005] + input_ids = tokenizer.build_inputs_with_special_tokens(a_ids, b_ids) - context_length = input_ids.index(150004) + context_length = input_ids.index(tokenizer.bos_token_id) mask_position = context_length - 1 labels = [-100] * context_length + input_ids[mask_position+1:] diff --git a/ptuning/train.sh b/ptuning/train.sh index 3189829..efc9a16 100644 --- a/ptuning/train.sh +++ b/ptuning/train.sh @@ -1,5 +1,5 @@ -PRE_SEQ_LEN=8 -LR=1e-2 +PRE_SEQ_LEN=128 +LR=2e-2 CUDA_VISIBLE_DEVICES=0 python3 main.py \ --do_train \ From b65142b5e54e52b27c1c1269e1b4abd83efcce45 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Thu, 6 Apr 2023 23:13:53 +0800 Subject: [PATCH 064/110] Add update --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index c63913c..b86d18f 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,8 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 *Read this in [English](README_en.md).* ## 更新信息 +**[2023/04/06]** 优化web demo的界面。移除embedding中的image token以减小显存占用(需要更新模型文件`pytorch_model-00001-of-00008.bin`和`pytorch_model-00008-of-00008.bin`)。 + **[2023/03/31]** 增加基于 [P-Tuning-v2](https://github.com/THUDM/P-tuning-v2) 的高效参数微调实现,INT4 量化级别下最低只需 7GB 显存即可进行模型微调。详见[高效参数微调方法](ptuning/README.md)。 **[2023/03/23]** 增加 API 部署(感谢 [@LemonQu-GIT](https://github.com/LemonQu-GIT))。增加 Embedding 量化模型 [ChatGLM-6B-INT4-QE](https://huggingface.co/THUDM/chatglm-6b-int4-qe)。增加配备 Apple Silicon 芯片的 Mac 上 GPU 加速的支持。 From ef9becd129345c18ef73dd439c3e3de09c1c23cb Mon Sep 17 00:00:00 2001 From: duzx16 Date: Fri, 7 Apr 2023 01:03:31 +0800 Subject: [PATCH 065/110] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b86d18f..4c24142 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 *Read this in [English](README_en.md).* ## 更新信息 -**[2023/04/06]** 优化web demo的界面。移除embedding中的image token以减小显存占用(需要更新模型文件`pytorch_model-00001-of-00008.bin`和`pytorch_model-00008-of-00008.bin`)。 +**[2023/04/06]** 优化web demo的界面。移除embedding中的image token以减小显存占用(需要更新模型文件`pytorch_model-00001-of-00008.bin`和`pytorch_model-00008-of-00008.bin`)。去掉了对 `icetk` 的依赖(需要更新模型文件`ice_text.model`)。 **[2023/03/31]** 增加基于 [P-Tuning-v2](https://github.com/THUDM/P-tuning-v2) 的高效参数微调实现,INT4 量化级别下最低只需 7GB 显存即可进行模型微调。详见[高效参数微调方法](ptuning/README.md)。 From 5ed85db84f67c82f8806fc963792974d172b0f26 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Fri, 7 Apr 2023 01:11:44 +0800 Subject: [PATCH 066/110] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 4c24142..35a7d3f 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,8 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 *Read this in [English](README_en.md).* ## 更新信息 +**如果你遇到了任何问题,请先尝试从HF Repo 或 Tsinghua Cloud 重新下载模型文件**。 + **[2023/04/06]** 优化web demo的界面。移除embedding中的image token以减小显存占用(需要更新模型文件`pytorch_model-00001-of-00008.bin`和`pytorch_model-00008-of-00008.bin`)。去掉了对 `icetk` 的依赖(需要更新模型文件`ice_text.model`)。 **[2023/03/31]** 增加基于 [P-Tuning-v2](https://github.com/THUDM/P-tuning-v2) 的高效参数微调实现,INT4 量化级别下最低只需 7GB 显存即可进行模型微调。详见[高效参数微调方法](ptuning/README.md)。 From 1777c155578dd22f86a74943a89982cc02469177 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Fri, 7 Apr 2023 01:13:18 +0800 Subject: [PATCH 067/110] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 35a7d3f..391126e 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 *Read this in [English](README_en.md).* ## 更新信息 -**如果你遇到了任何问题,请先尝试从HF Repo 或 Tsinghua Cloud 重新下载模型文件**。 +**如果你遇到了任何问题并且是从本地加载模型的,请先尝试从 [HF Repo](https://huggingface.co/THUDM/chatglm-6b) 或 [Tsinghua Cloud](https://cloud.tsinghua.edu.cn/d/fb9f16d6dc8f482596c2/) 重新下载模型文件**。 **[2023/04/06]** 优化web demo的界面。移除embedding中的image token以减小显存占用(需要更新模型文件`pytorch_model-00001-of-00008.bin`和`pytorch_model-00008-of-00008.bin`)。去掉了对 `icetk` 的依赖(需要更新模型文件`ice_text.model`)。 From 6426660124373b622b28d0efb817d49464df1808 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Fri, 7 Apr 2023 01:17:11 +0800 Subject: [PATCH 068/110] add sentencepiece in requirements.txt --- requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 4788707..214d68f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,4 +3,5 @@ transformers==4.27.1 cpm_kernels torch>=1.10 gradio -mdtex2html \ No newline at end of file +mdtex2html +sentencepiece \ No newline at end of file From eb22e3ad6a55d9834102eee077b4a98ca393724f Mon Sep 17 00:00:00 2001 From: LogCong Date: Fri, 7 Apr 2023 10:25:33 +0800 Subject: [PATCH 069/110] update readme --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 391126e..5f9689c 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,7 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 * [bibliothecarius](https://github.com/coderabbit214/bibliothecarius):快速构建服务以集成您的本地数据和AI模型,支持ChatGLM等本地化模型接入。 * [闻达](https://github.com/l15y/wenda):大型语言模型调用平台,基于 ChatGLM-6B 实现了类 ChatPDF 功能 * [JittorLLMs](https://github.com/Jittor/JittorLLMs):最低3G显存或者没有显卡都可运行 ChatGLM-6B FP16, 支持Linux、windows、Mac部署 +* [ChatGLM-Finetuning](https://github.com/liucongg/ChatGLM-Finetuning):基于ChatGLM-6B模型,进行下游具体任务微调,涉及Freeze、Lora、P-tuning等,并进行实验效果对比。 以下是部分针对本项目的教程/文档: * [Windows部署文档](https://github.com/ZhangErling/ChatGLM-6B/blob/main/deployment_windows.md) From c9f68cf39a63660e831310675d7a8f8ba934fbd2 Mon Sep 17 00:00:00 2001 From: Bluzy Date: Fri, 7 Apr 2023 11:19:43 +0800 Subject: [PATCH 070/110] update web_demo --- web_demo.py | 2 +- web_demo2.py | 16 +++++++++++++--- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/web_demo.py b/web_demo.py index 88a6dc8..522a4bd 100644 --- a/web_demo.py +++ b/web_demo.py @@ -42,4 +42,4 @@ with gr.Blocks() as demo: temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True) button = gr.Button("Generate") button.click(predict, [txt, max_length, top_p, temperature, state], [state] + text_boxes) -demo.queue().launch(share=False, inbrowser=True) +demo.queue().launch(server_port=6006, server_name='0.0.0.0', share=False, inbrowser=True) diff --git a/web_demo2.py b/web_demo2.py index 4e1f0e4..226682e 100644 --- a/web_demo2.py +++ b/web_demo2.py @@ -21,7 +21,7 @@ MAX_TURNS = 20 MAX_BOXES = MAX_TURNS * 2 -def predict(input, history=None): +def predict(input, max_length, top_p, temperature, history=None): tokenizer, model = get_model() if history is None: history = [] @@ -35,7 +35,8 @@ def predict(input, history=None): message(input, avatar_style="big-smile", key=str(len(history)) + "_user") st.write("AI正在回复:") with st.empty(): - for response, history in model.stream_chat(tokenizer, input, history): + for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p, + temperature=temperature): query, response = history[-1] st.write(response) @@ -49,6 +50,15 @@ prompt_text = st.text_area(label="用户命令输入", height = 100, placeholder="请在这儿输入您的命令") +max_length = st.sidebar.slider( + 'max_length', 0, 4096, 2048, step=1 +) +top_p = st.sidebar.slider( + 'top_p', 0.0, 1.0, 0.6, step=0.01 +) +temperature = st.sidebar.slider( + 'temperature', 0.0, 1.0, 0.95, step=0.01 +) if 'state' not in st.session_state: st.session_state['state'] = [] @@ -56,4 +66,4 @@ if 'state' not in st.session_state: if st.button("发送", key="predict"): with st.spinner("AI正在思考,请稍等........"): # text generation - st.session_state["state"] = predict(prompt_text, st.session_state["state"]) + st.session_state["state"] = predict(prompt_text, max_length, top_p, temperature, st.session_state["state"]) \ No newline at end of file From d694a0087efaff3cabbf94fbf9b5ab12ec49d888 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vinlic=E7=A7=91=E6=8A=80?= Date: Fri, 7 Apr 2023 14:27:02 +0800 Subject: [PATCH 071/110] =?UTF-8?q?=E9=94=99=E5=88=AB=E5=AD=97=E4=BF=AE?= =?UTF-8?q?=E6=AD=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ptuning/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ptuning/README.md b/ptuning/README.md index 753d895..8ec1d6d 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -6,7 +6,7 @@ *Read this in [English](README_en.md).* ## 软件依赖 -运行微调需要4.27.1版本的`transformers`。除 ChatGLM-6B 的依赖之外,还需要按照以下依赖 +运行微调需要4.27.1版本的`transformers`。除 ChatGLM-6B 的依赖之外,还需要安装以下依赖 ``` pip install rouge_chinese nltk jieba datasets ``` From 0cf46b2348f3d36af2508f2db02e86f1daf7e3ca Mon Sep 17 00:00:00 2001 From: duzx16 Date: Fri, 7 Apr 2023 16:43:43 +0800 Subject: [PATCH 072/110] Remove version for protobuf --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 214d68f..6accb30 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -protobuf>=3.19.5,<3.20.1 +protobuf transformers==4.27.1 cpm_kernels torch>=1.10 From 5caab29c9fc37ffb55e709c17144bb4b95cc87c1 Mon Sep 17 00:00:00 2001 From: songxxzp Date: Sat, 8 Apr 2023 00:24:39 +0800 Subject: [PATCH 073/110] Update README.md --- README.md | 2 ++ README_en.md | 2 ++ 2 files changed, 4 insertions(+) diff --git a/README.md b/README.md index 5f9689c..a5e02dd 100644 --- a/README.md +++ b/README.md @@ -55,6 +55,8 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 使用 pip 安装依赖:`pip install -r requirements.txt`,其中 `transformers` 库版本推荐为 `4.27.1`,但理论上不低于 `4.23.1` 即可。 +此外,如果需要在 cpu 上运行量化后的模型,还需要安装 `gcc` 与 `openmp`。多数 Linux 发行版默认已安装。对于 Windows ,可在安装 [TDM-GCC](https://jmeubank.github.io/tdm-gcc/) 时勾选 `openmp`。 Windows 测试环境 `gcc` 版本为 `TDM-GCC 10.3.0`, Linux 为 `gcc 11.3.0`。 + ### 代码调用 可以通过如下代码调用 ChatGLM-6B 模型来生成对话: diff --git a/README_en.md b/README_en.md index da2b8dc..1a56c39 100644 --- a/README_en.md +++ b/README_en.md @@ -36,6 +36,8 @@ If you have other good projects, please refer to the above format to add to READ Install the requirements with pip: `pip install -r requirements.txt`. `transformers` library version is recommended to be `4.27.1`, but theoretically any version no lower than `4.23.1` is acceptable. +In addition, if you need to run the quantified model on the CPU, you also need to install `gcc` and `openmp`. Most Linux distributions are installed by default. For Windows, you can check `openmp` when installing [TDM-GCC](https://jmeubank.github.io/tdm-gcc/). On Windows testing environment, the `gcc` version is `TDM-GCC 10.3.0`, and on Linux is `gcc 11.3.0`. + ### Usage Generate dialogue with the following code From 5811badee6a04bca31123547766634b4090b32ee Mon Sep 17 00:00:00 2001 From: duzx16 Date: Sun, 9 Apr 2023 15:22:27 +0800 Subject: [PATCH 074/110] Add thanks --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a5e02dd..28a9553 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 ## 更新信息 **如果你遇到了任何问题并且是从本地加载模型的,请先尝试从 [HF Repo](https://huggingface.co/THUDM/chatglm-6b) 或 [Tsinghua Cloud](https://cloud.tsinghua.edu.cn/d/fb9f16d6dc8f482596c2/) 重新下载模型文件**。 -**[2023/04/06]** 优化web demo的界面。移除embedding中的image token以减小显存占用(需要更新模型文件`pytorch_model-00001-of-00008.bin`和`pytorch_model-00008-of-00008.bin`)。去掉了对 `icetk` 的依赖(需要更新模型文件`ice_text.model`)。 +**[2023/04/06]** 优化web demo的界面(感谢 [@tuteng0915](https://github.com/tuteng0915))。移除embedding中的image token以减小显存占用(需要更新模型文件`pytorch_model-00001-of-00008.bin`和`pytorch_model-00008-of-00008.bin`,感谢 [@silverriver](https://github.com/silverriver) 提出的想法)。去掉了对 `icetk` 的依赖(需要更新模型文件`ice_text.model`)。 **[2023/03/31]** 增加基于 [P-Tuning-v2](https://github.com/THUDM/P-tuning-v2) 的高效参数微调实现,INT4 量化级别下最低只需 7GB 显存即可进行模型微调。详见[高效参数微调方法](ptuning/README.md)。 From 5111a422a48240fa53b99c75572de136c9740ced Mon Sep 17 00:00:00 2001 From: yanqiangmiffy <1185918903@qq.com> Date: Sun, 9 Apr 2023 16:02:26 +0800 Subject: [PATCH 075/110] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 28a9553..56059b8 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,7 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 * [闻达](https://github.com/l15y/wenda):大型语言模型调用平台,基于 ChatGLM-6B 实现了类 ChatPDF 功能 * [JittorLLMs](https://github.com/Jittor/JittorLLMs):最低3G显存或者没有显卡都可运行 ChatGLM-6B FP16, 支持Linux、windows、Mac部署 * [ChatGLM-Finetuning](https://github.com/liucongg/ChatGLM-Finetuning):基于ChatGLM-6B模型,进行下游具体任务微调,涉及Freeze、Lora、P-tuning等,并进行实验效果对比。 +* [InstructGLM](https://github.com/yanqiangmiffy/InstructGLM):基于ChatGLM-6B进行指令学习,汇总开源中英文指令数据,基于Lora进行指令数据微调,开放了Alpaca、Belle微调后的Lora权重,修复web_demo重复问题 以下是部分针对本项目的教程/文档: * [Windows部署文档](https://github.com/ZhangErling/ChatGLM-6B/blob/main/deployment_windows.md) From f1407bec5f190dfa7d9855d512485a6fecd8bbfe Mon Sep 17 00:00:00 2001 From: duzx16 Date: Sun, 9 Apr 2023 20:41:18 +0800 Subject: [PATCH 076/110] Set share=False --- web_demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web_demo.py b/web_demo.py index df7f983..97ea622 100644 --- a/web_demo.py +++ b/web_demo.py @@ -98,4 +98,4 @@ with gr.Blocks() as demo: emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress=True) -demo.queue().launch(share=True, inbrowser=True) +demo.queue().launch(share=False, inbrowser=True) From 0362542f56ee8b38da2ee092d7a9c0e4ae85176d Mon Sep 17 00:00:00 2001 From: duzx16 Date: Sun, 9 Apr 2023 21:00:54 +0800 Subject: [PATCH 077/110] Update README --- README.md | 69 +++++++++++++++++++++++++------------------------------ 1 file changed, 31 insertions(+), 38 deletions(-) diff --git a/README.md b/README.md index 56059b8..5c25575 100644 --- a/README.md +++ b/README.md @@ -15,31 +15,8 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 *Read this in [English](README_en.md).* -## 更新信息 -**如果你遇到了任何问题并且是从本地加载模型的,请先尝试从 [HF Repo](https://huggingface.co/THUDM/chatglm-6b) 或 [Tsinghua Cloud](https://cloud.tsinghua.edu.cn/d/fb9f16d6dc8f482596c2/) 重新下载模型文件**。 - -**[2023/04/06]** 优化web demo的界面(感谢 [@tuteng0915](https://github.com/tuteng0915))。移除embedding中的image token以减小显存占用(需要更新模型文件`pytorch_model-00001-of-00008.bin`和`pytorch_model-00008-of-00008.bin`,感谢 [@silverriver](https://github.com/silverriver) 提出的想法)。去掉了对 `icetk` 的依赖(需要更新模型文件`ice_text.model`)。 - -**[2023/03/31]** 增加基于 [P-Tuning-v2](https://github.com/THUDM/P-tuning-v2) 的高效参数微调实现,INT4 量化级别下最低只需 7GB 显存即可进行模型微调。详见[高效参数微调方法](ptuning/README.md)。 - -**[2023/03/23]** 增加 API 部署(感谢 [@LemonQu-GIT](https://github.com/LemonQu-GIT))。增加 Embedding 量化模型 [ChatGLM-6B-INT4-QE](https://huggingface.co/THUDM/chatglm-6b-int4-qe)。增加配备 Apple Silicon 芯片的 Mac 上 GPU 加速的支持。 - -**[2023/03/19]** 增加流式输出接口 `stream_chat`,已更新到网页版和命令行 Demo。修复输出中的中文标点。增加量化后的模型 [ChatGLM-6B-INT4](https://huggingface.co/THUDM/chatglm-6b-int4) - ## 友情链接 -以下是部分基于本仓库开发的开源项目: -* [SwissArmyTransformer](https://github.com/THUDM/SwissArmyTransformer): 一个Transformer统一编程框架,ChatGLM-6B已经在SAT中进行实现并可以进行P-tuning微调。 -* [ChatGLM-MNN](https://github.com/wangzhaode/ChatGLM-MNN): 一个基于 MNN 的 ChatGLM-6B C++ 推理实现,支持根据显存大小自动分配计算任务给 GPU 和 CPU -* [ChatGLM-Tuning](https://github.com/mymusise/ChatGLM-Tuning): 基于 LoRA 对 ChatGLM-6B 进行微调。类似的项目还包括 [Humanable ChatGLM/GPT Fine-tuning | ChatGLM 微调](https://github.com/hscspring/hcgf) -* [langchain-ChatGLM](https://github.com/imClumsyPanda/langchain-ChatGLM):基于本地知识的 ChatGLM 应用,基于LangChain -* [bibliothecarius](https://github.com/coderabbit214/bibliothecarius):快速构建服务以集成您的本地数据和AI模型,支持ChatGLM等本地化模型接入。 -* [闻达](https://github.com/l15y/wenda):大型语言模型调用平台,基于 ChatGLM-6B 实现了类 ChatPDF 功能 -* [JittorLLMs](https://github.com/Jittor/JittorLLMs):最低3G显存或者没有显卡都可运行 ChatGLM-6B FP16, 支持Linux、windows、Mac部署 -* [ChatGLM-Finetuning](https://github.com/liucongg/ChatGLM-Finetuning):基于ChatGLM-6B模型,进行下游具体任务微调,涉及Freeze、Lora、P-tuning等,并进行实验效果对比。 -* [InstructGLM](https://github.com/yanqiangmiffy/InstructGLM):基于ChatGLM-6B进行指令学习,汇总开源中英文指令数据,基于Lora进行指令数据微调,开放了Alpaca、Belle微调后的Lora权重,修复web_demo重复问题 - -以下是部分针对本项目的教程/文档: -* [Windows部署文档](https://github.com/ZhangErling/ChatGLM-6B/blob/main/deployment_windows.md) +部分基于本仓库开发的开源项目参见 [PROJECT.md](PROJECT.md) 如果你有其他好的项目/教程的话,欢迎参照上述格式添加到 README 中并提出 [Pull Request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork)。 @@ -83,9 +60,23 @@ ChatGLM-6B 使用了和 ChatGPT 相似的技术,针对中文问答和对话进 如果这些方法无法帮助你入睡,你可以考虑咨询医生或睡眠专家,寻求进一步的建议。 ``` -完整的模型实现可以在 [Hugging Face Hub](https://huggingface.co/THUDM/chatglm-6b) 上查看。如果你从 Hugging Face Hub 上下载 checkpoint 的速度较慢,也可以从[这里](https://cloud.tsinghua.edu.cn/d/fb9f16d6dc8f482596c2/)手动下载。 +### 从本地加载模型 +以上代码会由 `transformers` 自动下载模型实现和参数。完整的模型实现可以在 [Hugging Face Hub](https://huggingface.co/THUDM/chatglm-6b)。如果你的网络环境较差,下载模型参数可能会花费较长时间甚至失败。此时可以先将模型下载到本地,然后从本地加载。 + +从 Hugging Face Hub 下载模型需要先[安装Git LFS](https://docs.github.com/zh/repositories/working-with-files/managing-large-files/installing-git-large-file-storage),然后运行 +```Shell +git clone https://huggingface.co/THUDM/chatglm-6b +``` + +如果你从 Hugging Face Hub 上下载 checkpoint 的速度较慢,可以只下载模型实现 +```Shell +GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/THUDM/chatglm-6b +``` +然后从[这里](https://cloud.tsinghua.edu.cn/d/fb9f16d6dc8f482596c2/)手动下载模型参数文件,并将下载的文件替换到本地的 `chatglm-6b` 目录下。 -### Demo +将模型下载到本地之后,将以上代码中的 `THUDM/chatglm-6b` 替换为你本地的 `chatglm-6b` 文件夹的路径,即可从本地加载模型。 + +## Demo & API 我们提供了一个基于 [Gradio](https://gradio.app) 的网页版 Demo 和一个命令行 Demo。使用时首先需要下载本仓库: @@ -154,38 +145,33 @@ model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).qu 模型量化会带来一定的性能损失,经过测试,ChatGLM-6B 在 4-bit 量化下仍然能够进行自然流畅的生成。使用 [GPT-Q](https://arxiv.org/abs/2210.17323) 等量化方案可以进一步压缩量化精度/提升相同量化精度下的模型性能,欢迎大家提出对应的 Pull Request。 -**[2023/03/19]** 量化过程需要在内存中首先加载 FP16 格式的模型,消耗大概 13GB 的内存。如果你的内存不足的话,可以直接加载量化后的模型,仅需大概 5.2GB 的内存: +量化过程需要在内存中首先加载 FP16 格式的模型,消耗大概 13GB 的内存。如果你的内存不足的话,可以直接加载量化后的模型,仅需大概 5.2GB 的内存: ```python model = AutoModel.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True).half().cuda() ``` -**[2023/03/24]** 我们进一步提供了对Embedding量化后的模型,模型参数仅占用4.3 GB显存: +我们进一步提供了对Embedding量化后的模型,模型参数仅占用4.3 GB显存: ```python model = AutoModel.from_pretrained("THUDM/chatglm-6b-int4-qe", trust_remote_code=True).half().cuda() ``` - - ### CPU 部署 如果你没有 GPU 硬件的话,也可以在 CPU 上进行推理,但是推理速度会更慢。使用方法如下(需要大概 32GB 内存) ```python model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float() ``` -**[2023/03/19]** 如果你的内存不足,可以直接加载量化后的模型: +如果你的内存不足,可以直接加载量化后的模型: ```python model = AutoModel.from_pretrained("THUDM/chatglm-6b-int4",trust_remote_code=True).float() ``` -如果遇到了报错 `Could not find module 'nvcuda.dll'` 或者 `RuntimeError: Unknown platform: darwin` (MacOS) 的话请参考这个[Issue](https://github.com/THUDM/ChatGLM-6B/issues/6#issuecomment-1470060041). +如果遇到了报错 `Could not find module 'nvcuda.dll'` 或者 `RuntimeError: Unknown platform: darwin` (MacOS) ,请[从本地加载模型](README.md#从本地加载模型) ### Mac 上的 GPU 加速 -对于搭载了Apple Silicon的Mac(以及MacBook),可以使用 MPS 后端来在 GPU 上运行 ChatGLM-6B。首先需要参考 Apple 的 [官方说明](https://developer.apple.com/metal/pytorch) 安装 PyTorch-Nightly。然后将模型仓库 clone 到本地(需要先[安装Git LFS](https://docs.github.com/zh/repositories/working-with-files/managing-large-files/installing-git-large-file-storage)) -```shell -git lfs install -git clone https://huggingface.co/THUDM/chatglm-6b -``` -将代码中的模型加载改为从本地加载,并使用 mps 后端 +对于搭载了Apple Silicon的Mac(以及MacBook),可以使用 MPS 后端来在 GPU 上运行 ChatGLM-6B。需要参考 Apple 的 [官方说明](https://developer.apple.com/metal/pytorch) 安装 PyTorch-Nightly。 + +目前在 MacOS 上只支持[从本地加载模型](README.md#从本地加载模型)。将代码中的模型加载改为从本地加载,并使用 mps 后端 ```python model = AutoModel.from_pretrained("your local path", trust_remote_code=True).half().to('mps') ``` @@ -194,7 +180,14 @@ model = AutoModel.from_pretrained("your local path", trust_remote_code=True).hal ## 高效参数微调 基于 [P-tuning v2](https://github.com/THUDM/P-tuning-v2) 的高效参数微调。具体使用方法详见 [ptuning/README.md](ptuning/README.md)。 +## 更新信息 +**[2023/04/06]** 优化web demo的界面(感谢 [@tuteng0915](https://github.com/tuteng0915))。移除embedding中的image token以减小显存占用(需要更新模型文件`pytorch_model-00001-of-00008.bin`和`pytorch_model-00008-of-00008.bin`,感谢 [@silverriver](https://github.com/silverriver) 提出的想法)。去掉了对 `icetk` 的依赖(需要更新模型文件`ice_text.model`)。 + +**[2023/03/31]** 增加基于 [P-Tuning-v2](https://github.com/THUDM/P-tuning-v2) 的高效参数微调实现,INT4 量化级别下最低只需 7GB 显存即可进行模型微调。详见[高效参数微调方法](ptuning/README.md)。 +**[2023/03/23]** 增加 API 部署(感谢 [@LemonQu-GIT](https://github.com/LemonQu-GIT))。增加 Embedding 量化模型 [ChatGLM-6B-INT4-QE](https://huggingface.co/THUDM/chatglm-6b-int4-qe)。增加配备 Apple Silicon 芯片的 Mac 上 GPU 加速的支持。 + +**[2023/03/19]** 增加流式输出接口 `stream_chat`,已更新到网页版和命令行 Demo。修复输出中的中文标点。增加量化后的模型 [ChatGLM-6B-INT4](https://huggingface.co/THUDM/chatglm-6b-int4) ## ChatGLM-6B 示例 From 4478546058fceac3f599d307c988ce5cbf115ad3 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Mon, 10 Apr 2023 10:08:46 +0800 Subject: [PATCH 078/110] Add PROJECT.md --- PROJECT.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 PROJECT.md diff --git a/PROJECT.md b/PROJECT.md new file mode 100644 index 0000000..c529b1e --- /dev/null +++ b/PROJECT.md @@ -0,0 +1,15 @@ +# 友情链接 + +以下是部分基于本仓库开发的开源项目: +* [SwissArmyTransformer](https://github.com/THUDM/SwissArmyTransformer): 一个Transformer统一编程框架,ChatGLM-6B已经在SAT中进行实现并可以进行P-tuning微调。 +* [ChatGLM-MNN](https://github.com/wangzhaode/ChatGLM-MNN): 一个基于 MNN 的 ChatGLM-6B C++ 推理实现,支持根据显存大小自动分配计算任务给 GPU 和 CPU +* [ChatGLM-Tuning](https://github.com/mymusise/ChatGLM-Tuning): 基于 LoRA 对 ChatGLM-6B 进行微调。类似的项目还包括 [Humanable ChatGLM/GPT Fine-tuning | ChatGLM 微调](https://github.com/hscspring/hcgf) +* [langchain-ChatGLM](https://github.com/imClumsyPanda/langchain-ChatGLM):基于本地知识的 ChatGLM 应用,基于LangChain +* [bibliothecarius](https://github.com/coderabbit214/bibliothecarius):快速构建服务以集成您的本地数据和AI模型,支持ChatGLM等本地化模型接入。 +* [闻达](https://github.com/l15y/wenda):大型语言模型调用平台,基于 ChatGLM-6B 实现了类 ChatPDF 功能 +* [JittorLLMs](https://github.com/Jittor/JittorLLMs):最低3G显存或者没有显卡都可运行 ChatGLM-6B FP16, 支持Linux、windows、Mac部署 +* [ChatGLM-Finetuning](https://github.com/liucongg/ChatGLM-Finetuning):基于ChatGLM-6B模型,进行下游具体任务微调,涉及Freeze、Lora、P-tuning等,并进行实验效果对比。 +* [InstructGLM](https://github.com/yanqiangmiffy/InstructGLM):基于ChatGLM-6B进行指令学习,汇总开源中英文指令数据,基于Lora进行指令数据微调,开放了Alpaca、Belle微调后的Lora权重,修复web_demo重复问题 + +以下是部分针对本项目的教程/文档: +* [Windows部署文档](https://github.com/ZhangErling/ChatGLM-6B/blob/main/deployment_windows.md) \ No newline at end of file From cbb9f44e30944971da5110747dd22618003d6793 Mon Sep 17 00:00:00 2001 From: rainatam Date: Mon, 10 Apr 2023 17:26:17 +0800 Subject: [PATCH 079/110] Save PrefixEncoder params only --- ptuning/README.md | 12 + ptuning/evaluate.sh | 3 +- ptuning/main.py | 22 +- ptuning/trainer.py | 3824 ++++++++++++++++++++++++++++++++++++ ptuning/trainer_seq2seq.py | 2 +- 5 files changed, 3858 insertions(+), 5 deletions(-) create mode 100644 ptuning/trainer.py diff --git a/ptuning/README.md b/ptuning/README.md index 8ec1d6d..f192718 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -39,6 +39,18 @@ bash train.sh ```shell bash evaluate.sh ``` +**[2023/04/10更新]** 在 P-tuning v2 训练时模型只保存 PrefixEncoder 部分的参数,在推理时需要同时载入原 ChatGLM-6B 模型以及 PrefixEncoder 的 Checkpoint,因此需要指定参数(已更新 `evaluate.sh`) : + +```shell +--model_name_or_path THUDM/chatglm-6b +--ptuning_checkpoint $CHECKPOINT_PATH +``` + +仍然兼容旧版全参保存的 Checkpoint,只需要跟之前一样设定 `model_name_or_path`: + +```shell +--model_name_or_path $CHECKPOINT_PATH +``` 评测指标为中文 Rouge score 和 BLEU-4。生成的结果保存在 `./output/adgen-chatglm-6b-pt-8-1e-2/generated_predictions.txt`。 diff --git a/ptuning/evaluate.sh b/ptuning/evaluate.sh index 120a8c2..ab85536 100644 --- a/ptuning/evaluate.sh +++ b/ptuning/evaluate.sh @@ -9,7 +9,8 @@ CUDA_VISIBLE_DEVICES=0 python3 main.py \ --overwrite_cache \ --prompt_column content \ --response_column summary \ - --model_name_or_path ./output/$CHECKPOINT/checkpoint-$STEP \ + --model_name_or_path THUDM/chatglm-6b \ + --ptuning_checkpoint ./output/$CHECKPOINT/checkpoint-$STEP \ --output_dir ./output/$CHECKPOINT \ --overwrite_output_dir \ --max_source_length 64 \ diff --git a/ptuning/main.py b/ptuning/main.py index e34e95e..ecce8c2 100644 --- a/ptuning/main.py +++ b/ptuning/main.py @@ -28,6 +28,7 @@ from datasets import load_dataset import jieba from rouge_chinese import Rouge from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction +import torch import transformers from transformers import ( @@ -110,13 +111,28 @@ def main(): tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, trust_remote_code=True) - model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True) + if model_args.ptuning_checkpoint is not None: + # Evaluation + # Loading extra state dict of prefix encoder + model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True) + prefix_state_dict = torch.load(os.path.join(model_args.ptuning_checkpoint, "pytorch_model.bin")) + new_prefix_state_dict = {} + for k, v in prefix_state_dict.items(): + new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v + model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict) + else: + model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True) if model_args.quantization_bit is not None: print(f"Quantized to {model_args.quantization_bit} bit") model = model.quantize(model_args.quantization_bit) - model = model.half() - model.transformer.prefix_encoder.float() + if model_args.pre_seq_len is not None: + # P-tuning v2 + model = model.half() + model.transformer.prefix_encoder.float() + else: + # Finetune + model = model.float() prefix = data_args.source_prefix if data_args.source_prefix is not None else "" diff --git a/ptuning/trainer.py b/ptuning/trainer.py new file mode 100644 index 0000000..c49944f --- /dev/null +++ b/ptuning/trainer.py @@ -0,0 +1,3824 @@ +# coding=utf-8 +# Copyright 2020-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task. +""" + +import contextlib +import functools +import glob +import inspect +import math +import os +import random +import re +import shutil +import sys +import time +import warnings +from collections.abc import Mapping +from distutils.util import strtobool +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union + +from tqdm.auto import tqdm + + +# Integrations must be imported before ML frameworks: +# isort: off +from transformers.integrations import ( + default_hp_search_backend, + get_reporting_integration_callbacks, + hp_params, + is_fairscale_available, + is_optuna_available, + is_ray_tune_available, + is_sigopt_available, + is_wandb_available, + run_hp_search_optuna, + run_hp_search_ray, + run_hp_search_sigopt, + run_hp_search_wandb, +) + +# isort: on + +import numpy as np +import torch +import torch.distributed as dist +from huggingface_hub import Repository, create_repo +from packaging import version +from torch import nn +from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler +from torch.utils.data.distributed import DistributedSampler + +from transformers import __version__ +from transformers.configuration_utils import PretrainedConfig +from transformers.data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator +from transformers.debug_utils import DebugOption, DebugUnderflowOverflow +from transformers.deepspeed import deepspeed_init, is_deepspeed_zero3_enabled +from transformers.dependency_versions_check import dep_version_check +from transformers.modelcard import TrainingSummary +from transformers.modeling_utils import PreTrainedModel, load_sharded_checkpoint, unwrap_model +from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_MAPPING_NAMES +from transformers.optimization import Adafactor, get_scheduler +from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_10, is_torch_less_than_1_11 +from transformers.tokenization_utils_base import PreTrainedTokenizerBase +from transformers.trainer_callback import ( + CallbackHandler, + DefaultFlowCallback, + PrinterCallback, + ProgressCallback, + TrainerCallback, + TrainerControl, + TrainerState, +) +from transformers.trainer_pt_utils import ( + DistributedLengthGroupedSampler, + DistributedSamplerWithLoop, + DistributedTensorGatherer, + IterableDatasetShard, + LabelSmoother, + LengthGroupedSampler, + SequentialDistributedSampler, + ShardSampler, + distributed_broadcast_scalars, + distributed_concat, + find_batch_size, + get_module_class_from_name, + get_parameter_names, + nested_concat, + nested_detach, + nested_numpify, + nested_truncate, + nested_xla_mesh_reduce, + reissue_pt_warnings, +) +from transformers.trainer_utils import ( + PREFIX_CHECKPOINT_DIR, + BestRun, + EvalLoopOutput, + EvalPrediction, + FSDPOption, + HPSearchBackend, + HubStrategy, + IntervalStrategy, + PredictionOutput, + RemoveColumnsCollator, + ShardedDDPOption, + TrainerMemoryTracker, + TrainOutput, + default_compute_objective, + default_hp_space, + denumpify_detensorize, + enable_full_determinism, + find_executable_batch_size, + get_last_checkpoint, + has_length, + number_of_arguments, + seed_worker, + set_seed, + speed_metrics, +) +from transformers.training_args import OptimizerNames, ParallelMode, TrainingArguments +from transformers.utils import ( + CONFIG_NAME, + WEIGHTS_INDEX_NAME, + WEIGHTS_NAME, + can_return_loss, + find_labels, + get_full_repo_name, + is_accelerate_available, + is_apex_available, + is_datasets_available, + is_in_notebook, + is_ipex_available, + is_sagemaker_dp_enabled, + is_sagemaker_mp_enabled, + is_torch_compile_available, + is_torch_neuroncore_available, + is_torch_tpu_available, + logging, +) +from transformers.utils.generic import ContextManagers + + +_is_native_cpu_amp_available = is_torch_greater_or_equal_than_1_10 + +DEFAULT_CALLBACKS = [DefaultFlowCallback] +DEFAULT_PROGRESS_CALLBACK = ProgressCallback + +if is_in_notebook(): + from transformers.utils.notebook import NotebookProgressCallback + + DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback + +if is_apex_available(): + from apex import amp + +if is_datasets_available(): + import datasets + +if is_torch_tpu_available(check_device=False): + import torch_xla.core.xla_model as xm + import torch_xla.debug.metrics as met + import torch_xla.distributed.parallel_loader as pl + +if is_fairscale_available(): + dep_version_check("fairscale") + import fairscale + from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP + from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP + from fairscale.nn.wrap import auto_wrap + from fairscale.optim import OSS + from fairscale.optim.grad_scaler import ShardedGradScaler + + +if is_sagemaker_mp_enabled(): + import smdistributed.modelparallel.torch as smp + from smdistributed.modelparallel import __version__ as SMP_VERSION + + IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse("1.10") + + from transformers.trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat +else: + IS_SAGEMAKER_MP_POST_1_10 = False + + +skip_first_batches = None +if is_accelerate_available(): + from accelerate import __version__ as accelerate_version + + if version.parse(accelerate_version) >= version.parse("0.16"): + from accelerate import skip_first_batches + + +if TYPE_CHECKING: + import optuna + +logger = logging.get_logger(__name__) + + +# Name of the files used for checkpointing +TRAINING_ARGS_NAME = "training_args.bin" +TRAINER_STATE_NAME = "trainer_state.json" +OPTIMIZER_NAME = "optimizer.pt" +SCHEDULER_NAME = "scheduler.pt" +SCALER_NAME = "scaler.pt" + + +class Trainer: + """ + Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers. + + Args: + model ([`PreTrainedModel`] or `torch.nn.Module`, *optional*): + The model to train, evaluate or use for predictions. If not provided, a `model_init` must be passed. + + + + [`Trainer`] is optimized to work with the [`PreTrainedModel`] provided by the library. You can still use + your own models defined as `torch.nn.Module` as long as they work the same way as the 🤗 Transformers + models. + + + + args ([`TrainingArguments`], *optional*): + The arguments to tweak for training. Will default to a basic instance of [`TrainingArguments`] with the + `output_dir` set to a directory named *tmp_trainer* in the current directory if not provided. + data_collator (`DataCollator`, *optional*): + The function to use to form a batch from a list of elements of `train_dataset` or `eval_dataset`. Will + default to [`default_data_collator`] if no `tokenizer` is provided, an instance of + [`DataCollatorWithPadding`] otherwise. + train_dataset (`torch.utils.data.Dataset` or `torch.utils.data.IterableDataset`, *optional*): + The dataset to use for training. If it is a [`~datasets.Dataset`], columns not accepted by the + `model.forward()` method are automatically removed. + + Note that if it's a `torch.utils.data.IterableDataset` with some randomization and you are training in a + distributed fashion, your iterable dataset should either use a internal attribute `generator` that is a + `torch.Generator` for the randomization that must be identical on all processes (and the Trainer will + manually set the seed of this `generator` at each epoch) or have a `set_epoch()` method that internally + sets the seed of the RNGs used. + eval_dataset (Union[`torch.utils.data.Dataset`, Dict[str, `torch.utils.data.Dataset`]), *optional*): + The dataset to use for evaluation. If it is a [`~datasets.Dataset`], columns not accepted by the + `model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each + dataset prepending the dictionary key to the metric name. + tokenizer ([`PreTrainedTokenizerBase`], *optional*): + The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs to the + maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an + interrupted training or reuse the fine-tuned model. + model_init (`Callable[[], PreTrainedModel]`, *optional*): + A function that instantiates the model to be used. If provided, each call to [`~Trainer.train`] will start + from a new instance of the model as given by this function. + + The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to + be able to choose different architectures according to hyper parameters (such as layer count, sizes of + inner layers, dropout probabilities etc). + compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*): + The function that will be used to compute metrics at evaluation. Must take a [`EvalPrediction`] and return + a dictionary string to metric values. + callbacks (List of [`TrainerCallback`], *optional*): + A list of callbacks to customize the training loop. Will add those to the list of default callbacks + detailed in [here](callback). + + If you want to remove one of the default callbacks used, use the [`Trainer.remove_callback`] method. + optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*): A tuple + containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your model + and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`. + preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*): + A function that preprocess the logits right before caching them at each evaluation step. Must take two + tensors, the logits and the labels, and return the logits once processed as desired. The modifications made + by this function will be reflected in the predictions received by `compute_metrics`. + + Note that the labels (second parameter) will be `None` if the dataset does not have them. + + Important attributes: + + - **model** -- Always points to the core model. If using a transformers model, it will be a [`PreTrainedModel`] + subclass. + - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the + original model. This is the model that should be used for the forward pass. For example, under `DeepSpeed`, + the inner model is wrapped in `DeepSpeed` and then again in `torch.nn.DistributedDataParallel`. If the inner + model hasn't been wrapped, then `self.model_wrapped` is the same as `self.model`. + - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from + data parallelism, this means some of the model layers are split on different GPUs). + - **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set + to `False` if model parallel or deepspeed is used, or if the default + `TrainingArguments.place_model_on_device` is overridden to return `False` . + - **is_in_train** -- Whether or not a model is currently running `train` (e.g. when `evaluate` is called while + in `train`) + + """ + + from transformers.trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state + + def __init__( + self, + model: Union[PreTrainedModel, nn.Module] = None, + args: TrainingArguments = None, + data_collator: Optional[DataCollator] = None, + train_dataset: Optional[Dataset] = None, + eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None, + tokenizer: Optional[PreTrainedTokenizerBase] = None, + model_init: Optional[Callable[[], PreTrainedModel]] = None, + compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, + callbacks: Optional[List[TrainerCallback]] = None, + optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), + preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, + ): + if args is None: + output_dir = "tmp_trainer" + logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.") + args = TrainingArguments(output_dir=output_dir) + self.args = args + # Seed must be set before instantiating the model when using model + enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed) + self.hp_name = None + self.deepspeed = None + self.is_in_train = False + + # memory metrics - must set up as early as possible + self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics) + self._memory_tracker.start() + + # set the correct log level depending on the node + log_level = args.get_process_log_level() + logging.set_verbosity(log_level) + + # force device and distributed setup init explicitly + args._setup_devices + + if model is None: + if model_init is not None: + self.model_init = model_init + model = self.call_model_init() + else: + raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument") + else: + if model_init is not None: + warnings.warn( + "`Trainer` requires either a `model` or `model_init` argument, but not both. `model_init` will" + " overwrite your model when calling the `train` method. This will become a fatal error in the next" + " release.", + FutureWarning, + ) + self.model_init = model_init + + if model.__class__.__name__ in MODEL_MAPPING_NAMES: + raise ValueError( + f"The model you have picked ({model.__class__.__name__}) cannot be used as is for training: it only " + "computes hidden states and does not accept any labels. You should choose a model with a head " + "suitable for your task like any of the `AutoModelForXxx` listed at " + "https://huggingface.co/docs/transformers/model_doc/auto." + ) + + if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel: + self.is_model_parallel = True + else: + self.is_model_parallel = False + + # At this stage the model is already loaded + if getattr(model, "is_loaded_in_8bit", False): + if getattr(model, "_is_int8_training_enabled", False): + logger.info( + "The model is loaded in 8-bit precision. To train this model you need to add additional modules" + " inside the model such as adapters using `peft` library and freeze the model weights. Please" + " check " + " the examples in https://github.com/huggingface/peft for more details." + ) + else: + raise ValueError( + "The model you want to train is loaded in 8-bit precision. if you want to fine-tune an 8-bit" + " model, please make sure that you have installed `bitsandbytes>=0.37.0`. " + ) + + # Setup Sharded DDP training + self.sharded_ddp = None + if len(args.sharded_ddp) > 0: + if args.deepspeed: + raise ValueError( + "Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags." + ) + if len(args.fsdp) > 0: + raise ValueError( + "Using --sharded_ddp xxx together with --fsdp is not possible, deactivate one of those flags." + ) + + if args.local_rank == -1: + raise ValueError("Using sharded DDP only works in distributed training.") + elif not is_fairscale_available(): + raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.") + elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None: + raise ImportError( + "Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found " + f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`." + ) + elif ShardedDDPOption.SIMPLE in args.sharded_ddp: + self.sharded_ddp = ShardedDDPOption.SIMPLE + elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp: + self.sharded_ddp = ShardedDDPOption.ZERO_DP_2 + elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp: + self.sharded_ddp = ShardedDDPOption.ZERO_DP_3 + + self.fsdp = None + if len(args.fsdp) > 0: + if args.deepspeed: + raise ValueError( + "Using --fsdp xxx together with --deepspeed is not possible, deactivate one of those flags." + ) + if not args.fsdp_config["xla"] and args.local_rank == -1: + raise ValueError("Using fsdp only works in distributed training.") + + # dep_version_check("torch>=1.12.0") + # Would have to update setup.py with torch>=1.12.0 + # which isn't ideally given that it will force people not using FSDP to also use torch>=1.12.0 + # below is the current alternative. + if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.12.0"): + raise ValueError("FSDP requires PyTorch >= 1.12.0") + + from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, ShardingStrategy + + if FSDPOption.FULL_SHARD in args.fsdp: + self.fsdp = ShardingStrategy.FULL_SHARD + elif FSDPOption.SHARD_GRAD_OP in args.fsdp: + self.fsdp = ShardingStrategy.SHARD_GRAD_OP + elif FSDPOption.NO_SHARD in args.fsdp: + self.fsdp = ShardingStrategy.NO_SHARD + + self.backward_prefetch = BackwardPrefetch.BACKWARD_PRE + if "backward_prefetch" in self.args.fsdp_config and "backward_pos" not in self.backward_prefetch: + self.backward_prefetch = BackwardPrefetch.BACKWARD_POST + + self.forword_prefetch = False + if self.args.fsdp_config.get("forword_prefect", False): + self.forword_prefetch = True + + self.limit_all_gathers = False + if self.args.fsdp_config.get("limit_all_gathers", False): + self.limit_all_gathers = True + + # one place to sort out whether to place the model on device or not + # postpone switching model to cuda when: + # 1. MP - since we are trying to fit a much bigger than 1 gpu model + # 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway, + # and we only use deepspeed for training at the moment + # 3. full bf16 or fp16 eval - since the model needs to be cast to the right dtype first + # 4. Sharded DDP - same as MP + # 5. FSDP - same as MP + self.place_model_on_device = args.place_model_on_device + if ( + self.is_model_parallel + or args.deepspeed + or ((args.fp16_full_eval or args.bf16_full_eval) and not args.do_train) + or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3]) + or (self.fsdp is not None) + ): + self.place_model_on_device = False + + default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer) + self.data_collator = data_collator if data_collator is not None else default_collator + self.train_dataset = train_dataset + self.eval_dataset = eval_dataset + self.tokenizer = tokenizer + + if self.place_model_on_device and not getattr(model, "is_loaded_in_8bit", False): + self._move_model_to_device(model, args.device) + + # Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs + if self.is_model_parallel: + self.args._n_gpu = 1 + + # later use `self.model is self.model_wrapped` to check if it's wrapped or not + self.model_wrapped = model + self.model = model + + self.compute_metrics = compute_metrics + self.preprocess_logits_for_metrics = preprocess_logits_for_metrics + self.optimizer, self.lr_scheduler = optimizers + if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None): + raise RuntimeError( + "Passing a `model_init` is incompatible with providing the `optimizers` argument. " + "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method." + ) + if is_torch_tpu_available() and self.optimizer is not None: + for param in self.model.parameters(): + model_device = param.device + break + for param_group in self.optimizer.param_groups: + if len(param_group["params"]) > 0: + optimizer_device = param_group["params"][0].device + break + if model_device != optimizer_device: + raise ValueError( + "The model and the optimizer parameters are not on the same device, which probably means you" + " created an optimizer around your model **before** putting on the device and passing it to the" + " `Trainer`. Make sure the lines `import torch_xla.core.xla_model as xm` and" + " `model.to(xm.xla_device())` is performed before the optimizer creation in your script." + ) + if ((self.sharded_ddp is not None) or args.deepspeed or (self.fsdp is not None)) and ( + self.optimizer is not None or self.lr_scheduler is not None + ): + raise RuntimeError( + "Passing `optimizers` is not allowed if Fairscale, Deepspeed or PyTorch FSDP is enabled." + "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method." + ) + default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to) + callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks + self.callback_handler = CallbackHandler( + callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler + ) + self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK) + + # Will be set to True by `self._setup_loggers()` on first call to `self.log()`. + self._loggers_initialized = False + + # Create clone of distant repo and output directory if needed + if self.args.push_to_hub: + self.init_git_repo(at_init=True) + # In case of pull, we need to make sure every process has the latest. + if is_torch_tpu_available(): + xm.rendezvous("init git repo") + elif args.local_rank != -1: + dist.barrier() + + if self.args.should_save: + os.makedirs(self.args.output_dir, exist_ok=True) + + if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)): + raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).") + + if args.max_steps > 0: + logger.info("max_steps is given, it will override any value given in num_train_epochs") + + if train_dataset is not None and not has_length(train_dataset) and args.max_steps <= 0: + raise ValueError("train_dataset does not implement __len__, max_steps has to be specified") + + if ( + train_dataset is not None + and isinstance(train_dataset, torch.utils.data.IterableDataset) + and args.group_by_length + ): + raise ValueError("the `--group_by_length` option is only available for `Dataset`, not `IterableDataset") + + self._signature_columns = None + + # Mixed precision setup + self.use_apex = False + self.use_cuda_amp = False + self.use_cpu_amp = False + + # Mixed precision setup for SageMaker Model Parallel + if is_sagemaker_mp_enabled(): + # BF16 + model parallelism in SageMaker: currently not supported, raise an error + if args.bf16: + raise ValueError("SageMaker Model Parallelism does not support BF16 yet. Please use FP16 instead ") + + if IS_SAGEMAKER_MP_POST_1_10: + # When there's mismatch between SMP config and trainer argument, use SMP config as truth + if args.fp16 != smp.state.cfg.fp16: + logger.warning( + f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}," + f"but FP16 provided in trainer argument is {args.fp16}," + f"setting to {smp.state.cfg.fp16}" + ) + args.fp16 = smp.state.cfg.fp16 + else: + # smp < 1.10 does not support fp16 in trainer. + if hasattr(smp.state.cfg, "fp16"): + logger.warning( + f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}, " + "but SageMaker Model Parallelism < 1.10 does not support FP16 in trainer." + ) + + if args.fp16 or args.bf16: + if args.half_precision_backend == "auto": + if args.device == torch.device("cpu"): + if args.fp16: + raise ValueError("Tried to use `fp16` but it is not supported on cpu") + elif _is_native_cpu_amp_available: + args.half_precision_backend = "cpu_amp" + else: + raise ValueError("Tried to use cpu amp but native cpu amp is not available") + else: + args.half_precision_backend = "cuda_amp" + + logger.info(f"Using {args.half_precision_backend} half precision backend") + + self.do_grad_scaling = False + if (args.fp16 or args.bf16) and not (args.deepspeed or is_sagemaker_mp_enabled() or is_torch_tpu_available()): + # deepspeed and SageMaker Model Parallel manage their own half precision + if args.half_precision_backend == "cuda_amp": + self.use_cuda_amp = True + self.amp_dtype = torch.float16 if args.fp16 else torch.bfloat16 + # bf16 does not need grad scaling + self.do_grad_scaling = self.amp_dtype == torch.float16 + if self.do_grad_scaling: + if self.sharded_ddp is not None: + self.scaler = ShardedGradScaler() + elif self.fsdp is not None: + from torch.distributed.fsdp.sharded_grad_scaler import ( + ShardedGradScaler as FSDPShardedGradScaler, + ) + + self.scaler = FSDPShardedGradScaler() + elif is_torch_tpu_available(): + from torch_xla.amp import GradScaler + + self.scaler = GradScaler() + else: + self.scaler = torch.cuda.amp.GradScaler() + elif args.half_precision_backend == "cpu_amp": + self.use_cpu_amp = True + self.amp_dtype = torch.bfloat16 + else: + if not is_apex_available(): + raise ImportError( + "Using FP16 with APEX but APEX is not installed, please refer to" + " https://www.github.com/nvidia/apex." + ) + self.use_apex = True + + # FP16 + model parallelism in SageMaker: gradient clipping does not work for now so we raise a helpful error. + if ( + is_sagemaker_mp_enabled() + and self.use_cuda_amp + and args.max_grad_norm is not None + and args.max_grad_norm > 0 + ): + raise ValueError( + "SageMaker Model Parallelism in mixed precision mode does not support gradient clipping yet. Pass " + "along 'max_grad_norm': 0 in your hyperparameters." + ) + + # Label smoothing + if self.args.label_smoothing_factor != 0: + self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor) + else: + self.label_smoother = None + + self.state = TrainerState( + is_local_process_zero=self.is_local_process_zero(), + is_world_process_zero=self.is_world_process_zero(), + ) + + self.control = TrainerControl() + # Internal variable to count flos in each process, will be accumulated in `self.state.total_flos` then + # returned to 0 every time flos need to be logged + self.current_flos = 0 + self.hp_search_backend = None + self.use_tune_checkpoints = False + default_label_names = find_labels(self.model.__class__) + self.label_names = default_label_names if self.args.label_names is None else self.args.label_names + self.can_return_loss = can_return_loss(self.model.__class__) + self.control = self.callback_handler.on_init_end(self.args, self.state, self.control) + + # Internal variables to keep track of the original batch size + self._train_batch_size = args.train_batch_size + + # very last + self._memory_tracker.stop_and_update_metrics() + + # torch.compile + if args.torch_compile and not is_torch_compile_available(): + raise RuntimeError("Using torch.compile requires PyTorch 2.0 or higher.") + + def add_callback(self, callback): + """ + Add a callback to the current list of [`~transformer.TrainerCallback`]. + + Args: + callback (`type` or [`~transformer.TrainerCallback`]): + A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the + first case, will instantiate a member of that class. + """ + self.callback_handler.add_callback(callback) + + def pop_callback(self, callback): + """ + Remove a callback from the current list of [`~transformer.TrainerCallback`] and returns it. + + If the callback is not found, returns `None` (and no error is raised). + + Args: + callback (`type` or [`~transformer.TrainerCallback`]): + A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the + first case, will pop the first member of that class found in the list of callbacks. + + Returns: + [`~transformer.TrainerCallback`]: The callback removed, if found. + """ + return self.callback_handler.pop_callback(callback) + + def remove_callback(self, callback): + """ + Remove a callback from the current list of [`~transformer.TrainerCallback`]. + + Args: + callback (`type` or [`~transformer.TrainerCallback`]): + A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the + first case, will remove the first member of that class found in the list of callbacks. + """ + self.callback_handler.remove_callback(callback) + + def _move_model_to_device(self, model, device): + model = model.to(device) + # Moving a model to an XLA device disconnects the tied weights, so we have to retie them. + if self.args.parallel_mode == ParallelMode.TPU and hasattr(model, "tie_weights"): + model.tie_weights() + + def _set_signature_columns_if_needed(self): + if self._signature_columns is None: + # Inspect model forward signature to keep only the arguments it accepts. + signature = inspect.signature(self.model.forward) + self._signature_columns = list(signature.parameters.keys()) + # Labels may be named label or label_ids, the default data collator handles that. + self._signature_columns += list(set(["label", "label_ids"] + self.label_names)) + + def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None): + if not self.args.remove_unused_columns: + return dataset + self._set_signature_columns_if_needed() + signature_columns = self._signature_columns + + ignored_columns = list(set(dataset.column_names) - set(signature_columns)) + if len(ignored_columns) > 0: + dset_description = "" if description is None else f"in the {description} set" + logger.info( + f"The following columns {dset_description} don't have a corresponding argument in " + f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}." + f" If {', '.join(ignored_columns)} are not expected by `{self.model.__class__.__name__}.forward`, " + " you can safely ignore this message." + ) + + columns = [k for k in signature_columns if k in dataset.column_names] + + if version.parse(datasets.__version__) < version.parse("1.4.0"): + dataset.set_format( + type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"] + ) + return dataset + else: + return dataset.remove_columns(ignored_columns) + + def _get_collator_with_removed_columns( + self, data_collator: Callable, description: Optional[str] = None + ) -> Callable: + """Wrap the data collator in a callable removing unused columns.""" + if not self.args.remove_unused_columns: + return data_collator + self._set_signature_columns_if_needed() + signature_columns = self._signature_columns + + remove_columns_collator = RemoveColumnsCollator( + data_collator=data_collator, + signature_columns=signature_columns, + logger=logger, + description=description, + model_name=self.model.__class__.__name__, + ) + return remove_columns_collator + + def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]: + if self.train_dataset is None or not has_length(self.train_dataset): + return None + + generator = None + if self.args.world_size <= 1: + generator = torch.Generator() + # for backwards compatibility, we generate a seed here (which is sampled from a generator seeded with + # `args.seed`) if data_seed isn't provided. + # Further on in this method, we default to `args.seed` instead. + if self.args.data_seed is None: + seed = int(torch.empty((), dtype=torch.int64).random_().item()) + else: + seed = self.args.data_seed + generator.manual_seed(seed) + + seed = self.args.data_seed if self.args.data_seed is not None else self.args.seed + + # Build the sampler. + if self.args.group_by_length: + if is_datasets_available() and isinstance(self.train_dataset, datasets.Dataset): + lengths = ( + self.train_dataset[self.args.length_column_name] + if self.args.length_column_name in self.train_dataset.column_names + else None + ) + else: + lengths = None + model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None + if self.args.world_size <= 1: + return LengthGroupedSampler( + self.args.train_batch_size * self.args.gradient_accumulation_steps, + dataset=self.train_dataset, + lengths=lengths, + model_input_name=model_input_name, + generator=generator, + ) + else: + return DistributedLengthGroupedSampler( + self.args.train_batch_size * self.args.gradient_accumulation_steps, + dataset=self.train_dataset, + num_replicas=self.args.world_size, + rank=self.args.process_index, + lengths=lengths, + model_input_name=model_input_name, + seed=seed, + ) + + else: + if self.args.world_size <= 1: + return RandomSampler(self.train_dataset, generator=generator) + elif ( + self.args.parallel_mode in [ParallelMode.TPU, ParallelMode.SAGEMAKER_MODEL_PARALLEL] + and not self.args.dataloader_drop_last + ): + # Use a loop for TPUs when drop_last is False to have all batches have the same size. + return DistributedSamplerWithLoop( + self.train_dataset, + batch_size=self.args.per_device_train_batch_size, + num_replicas=self.args.world_size, + rank=self.args.process_index, + seed=seed, + ) + else: + return DistributedSampler( + self.train_dataset, + num_replicas=self.args.world_size, + rank=self.args.process_index, + seed=seed, + ) + + def get_train_dataloader(self) -> DataLoader: + """ + Returns the training [`~torch.utils.data.DataLoader`]. + + Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed + training if necessary) otherwise. + + Subclass and override this method if you want to inject some custom behavior. + """ + if self.train_dataset is None: + raise ValueError("Trainer: training requires a train_dataset.") + + train_dataset = self.train_dataset + data_collator = self.data_collator + if is_datasets_available() and isinstance(train_dataset, datasets.Dataset): + train_dataset = self._remove_unused_columns(train_dataset, description="training") + else: + data_collator = self._get_collator_with_removed_columns(data_collator, description="training") + + if isinstance(train_dataset, torch.utils.data.IterableDataset): + if self.args.world_size > 1: + train_dataset = IterableDatasetShard( + train_dataset, + batch_size=self._train_batch_size, + drop_last=self.args.dataloader_drop_last, + num_processes=self.args.world_size, + process_index=self.args.process_index, + ) + + return DataLoader( + train_dataset, + batch_size=self._train_batch_size, + collate_fn=data_collator, + num_workers=self.args.dataloader_num_workers, + pin_memory=self.args.dataloader_pin_memory, + ) + + train_sampler = self._get_train_sampler() + + return DataLoader( + train_dataset, + batch_size=self._train_batch_size, + sampler=train_sampler, + collate_fn=data_collator, + drop_last=self.args.dataloader_drop_last, + num_workers=self.args.dataloader_num_workers, + pin_memory=self.args.dataloader_pin_memory, + worker_init_fn=seed_worker, + ) + + def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.Sampler]: + # Deprecated code + if self.args.use_legacy_prediction_loop: + if is_torch_tpu_available(): + return SequentialDistributedSampler( + eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal() + ) + elif is_sagemaker_mp_enabled(): + return SequentialDistributedSampler( + eval_dataset, + num_replicas=smp.dp_size(), + rank=smp.dp_rank(), + batch_size=self.args.per_device_eval_batch_size, + ) + elif self.args.local_rank != -1: + return SequentialDistributedSampler(eval_dataset) + else: + return SequentialSampler(eval_dataset) + + if self.args.world_size <= 1: + return SequentialSampler(eval_dataset) + else: + return ShardSampler( + eval_dataset, + batch_size=self.args.per_device_eval_batch_size, + num_processes=self.args.world_size, + process_index=self.args.process_index, + ) + + def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader: + """ + Returns the evaluation [`~torch.utils.data.DataLoader`]. + + Subclass and override this method if you want to inject some custom behavior. + + Args: + eval_dataset (`torch.utils.data.Dataset`, *optional*): + If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted + by the `model.forward()` method are automatically removed. It must implement `__len__`. + """ + if eval_dataset is None and self.eval_dataset is None: + raise ValueError("Trainer: evaluation requires an eval_dataset.") + eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset + data_collator = self.data_collator + + if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset): + eval_dataset = self._remove_unused_columns(eval_dataset, description="evaluation") + else: + data_collator = self._get_collator_with_removed_columns(data_collator, description="evaluation") + + if isinstance(eval_dataset, torch.utils.data.IterableDataset): + if self.args.world_size > 1: + eval_dataset = IterableDatasetShard( + eval_dataset, + batch_size=self.args.per_device_eval_batch_size, + drop_last=self.args.dataloader_drop_last, + num_processes=self.args.world_size, + process_index=self.args.process_index, + ) + return DataLoader( + eval_dataset, + batch_size=self.args.eval_batch_size, + collate_fn=data_collator, + num_workers=self.args.dataloader_num_workers, + pin_memory=self.args.dataloader_pin_memory, + ) + + eval_sampler = self._get_eval_sampler(eval_dataset) + + return DataLoader( + eval_dataset, + sampler=eval_sampler, + batch_size=self.args.eval_batch_size, + collate_fn=data_collator, + drop_last=self.args.dataloader_drop_last, + num_workers=self.args.dataloader_num_workers, + pin_memory=self.args.dataloader_pin_memory, + ) + + def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader: + """ + Returns the test [`~torch.utils.data.DataLoader`]. + + Subclass and override this method if you want to inject some custom behavior. + + Args: + test_dataset (`torch.utils.data.Dataset`, *optional*): + The test dataset to use. If it is a [`~datasets.Dataset`], columns not accepted by the + `model.forward()` method are automatically removed. It must implement `__len__`. + """ + data_collator = self.data_collator + + if is_datasets_available() and isinstance(test_dataset, datasets.Dataset): + test_dataset = self._remove_unused_columns(test_dataset, description="test") + else: + data_collator = self._get_collator_with_removed_columns(data_collator, description="test") + + if isinstance(test_dataset, torch.utils.data.IterableDataset): + if self.args.world_size > 1: + test_dataset = IterableDatasetShard( + test_dataset, + batch_size=self.args.eval_batch_size, + drop_last=self.args.dataloader_drop_last, + num_processes=self.args.world_size, + process_index=self.args.process_index, + ) + return DataLoader( + test_dataset, + batch_size=self.args.eval_batch_size, + collate_fn=data_collator, + num_workers=self.args.dataloader_num_workers, + pin_memory=self.args.dataloader_pin_memory, + ) + + test_sampler = self._get_eval_sampler(test_dataset) + + # We use the same batch_size as for eval. + return DataLoader( + test_dataset, + sampler=test_sampler, + batch_size=self.args.eval_batch_size, + collate_fn=data_collator, + drop_last=self.args.dataloader_drop_last, + num_workers=self.args.dataloader_num_workers, + pin_memory=self.args.dataloader_pin_memory, + ) + + def create_optimizer_and_scheduler(self, num_training_steps: int): + """ + Setup the optimizer and the learning rate scheduler. + + We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the + Trainer's init through `optimizers`, or subclass and override this method (or `create_optimizer` and/or + `create_scheduler`) in a subclass. + """ + self.create_optimizer() + if IS_SAGEMAKER_MP_POST_1_10 and smp.state.cfg.fp16: + # If smp >= 1.10 and fp16 is enabled, we unwrap the optimizer + optimizer = self.optimizer.optimizer + else: + optimizer = self.optimizer + self.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer) + + def create_optimizer(self): + """ + Setup the optimizer. + + We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the + Trainer's init through `optimizers`, or subclass and override this method in a subclass. + """ + opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model + + if self.optimizer is None: + decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS) + decay_parameters = [name for name in decay_parameters if "bias" not in name] + optimizer_grouped_parameters = [ + { + "params": [ + p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad) + ], + "weight_decay": self.args.weight_decay, + }, + { + "params": [ + p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad) + ], + "weight_decay": 0.0, + }, + ] + + optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args) + + if self.sharded_ddp == ShardedDDPOption.SIMPLE: + self.optimizer = OSS( + params=optimizer_grouped_parameters, + optim=optimizer_cls, + **optimizer_kwargs, + ) + else: + self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) + if optimizer_cls.__name__ == "Adam8bit": + import bitsandbytes + + manager = bitsandbytes.optim.GlobalOptimManager.get_instance() + + skipped = 0 + for module in opt_model.modules(): + if isinstance(module, nn.Embedding): + skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values()) + print(f"skipped {module}: {skipped/2**20}M params") + manager.register_module_override(module, "weight", {"optim_bits": 32}) + logger.debug(f"bitsandbytes: will optimize {module} in fp32") + print(f"skipped: {skipped/2**20}M params") + + if is_sagemaker_mp_enabled(): + self.optimizer = smp.DistributedOptimizer(self.optimizer) + + return self.optimizer + + @staticmethod + def get_optimizer_cls_and_kwargs(args: TrainingArguments) -> Tuple[Any, Any]: + """ + Returns the optimizer class and optimizer parameters based on the training arguments. + + Args: + args (`transformers.training_args.TrainingArguments`): + The training arguments for the training session. + + """ + + # parse args.optim_args + optim_args = {} + if args.optim_args: + for mapping in args.optim_args.replace(" ", "").split(","): + key, value = mapping.split("=") + optim_args[key] = value + + optimizer_kwargs = {"lr": args.learning_rate} + + adam_kwargs = { + "betas": (args.adam_beta1, args.adam_beta2), + "eps": args.adam_epsilon, + } + if args.optim == OptimizerNames.ADAFACTOR: + optimizer_cls = Adafactor + optimizer_kwargs.update({"scale_parameter": False, "relative_step": False}) + elif args.optim == OptimizerNames.ADAMW_HF: + from transformers.optimization import AdamW + + optimizer_cls = AdamW + optimizer_kwargs.update(adam_kwargs) + elif args.optim in [OptimizerNames.ADAMW_TORCH, OptimizerNames.ADAMW_TORCH_FUSED]: + from torch.optim import AdamW + + optimizer_cls = AdamW + optimizer_kwargs.update(adam_kwargs) + if args.optim == OptimizerNames.ADAMW_TORCH_FUSED: + optimizer_kwargs.update({"fused": True}) + elif args.optim == OptimizerNames.ADAMW_TORCH_XLA: + try: + from torch_xla.amp.syncfree import AdamW + + optimizer_cls = AdamW + optimizer_kwargs.update(adam_kwargs) + except ImportError: + raise ValueError("Trainer failed to import syncfree AdamW from torch_xla.") + elif args.optim == OptimizerNames.ADAMW_APEX_FUSED: + try: + from apex.optimizers import FusedAdam + + optimizer_cls = FusedAdam + optimizer_kwargs.update(adam_kwargs) + except ImportError: + raise ValueError("Trainer tried to instantiate apex FusedAdam but apex is not installed!") + elif args.optim == OptimizerNames.ADAMW_BNB: + try: + from bitsandbytes.optim import Adam8bit + + optimizer_cls = Adam8bit + optimizer_kwargs.update(adam_kwargs) + except ImportError: + raise ValueError("Trainer tried to instantiate bnb Adam8bit but bnb is not installed!") + elif args.optim == OptimizerNames.ADAMW_ANYPRECISION: + try: + from torchdistx.optimizers import AnyPrecisionAdamW + + optimizer_cls = AnyPrecisionAdamW + optimizer_kwargs.update(adam_kwargs) + + # TODO Change dtypes back to M=FP32, Var = BF16, Kahan = False once they can be cast together in torchdistx. + optimizer_kwargs.update( + { + "use_kahan_summation": strtobool(optim_args.get("use_kahan_summation", "False")), + "momentum_dtype": getattr(torch, optim_args.get("momentum_dtype", "float32")), + "variance_dtype": getattr(torch, optim_args.get("variance_dtype", "float32")), + "compensation_buffer_dtype": getattr( + torch, optim_args.get("compensation_buffer_dtype", "bfloat16") + ), + } + ) + except ImportError: + raise ValueError("Please install https://github.com/pytorch/torchdistx") + elif args.optim == OptimizerNames.SGD: + optimizer_cls = torch.optim.SGD + elif args.optim == OptimizerNames.ADAGRAD: + optimizer_cls = torch.optim.Adagrad + else: + raise ValueError(f"Trainer cannot instantiate unsupported optimizer: {args.optim}") + return optimizer_cls, optimizer_kwargs + + def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None): + """ + Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or + passed as an argument. + + Args: + num_training_steps (int): The number of training steps to do. + """ + if self.lr_scheduler is None: + self.lr_scheduler = get_scheduler( + self.args.lr_scheduler_type, + optimizer=self.optimizer if optimizer is None else optimizer, + num_warmup_steps=self.args.get_warmup_steps(num_training_steps), + num_training_steps=num_training_steps, + ) + return self.lr_scheduler + + def num_examples(self, dataloader: DataLoader) -> int: + """ + Helper to get number of samples in a [`~torch.utils.data.DataLoader`] by accessing its dataset. When + dataloader.dataset does not exist or has no length, estimates as best it can + """ + try: + dataset = dataloader.dataset + # Special case for IterableDatasetShard, we need to dig deeper + if isinstance(dataset, IterableDatasetShard): + return len(dataloader.dataset.dataset) + return len(dataloader.dataset) + except (NameError, AttributeError, TypeError): # no dataset or length, estimate by length of dataloader + return len(dataloader) * self.args.per_device_train_batch_size + + def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]): + """HP search setup code""" + self._trial = trial + + if self.hp_search_backend is None or trial is None: + return + if self.hp_search_backend == HPSearchBackend.OPTUNA: + params = self.hp_space(trial) + elif self.hp_search_backend == HPSearchBackend.RAY: + params = trial + params.pop("wandb", None) + elif self.hp_search_backend == HPSearchBackend.SIGOPT: + params = {k: int(v) if isinstance(v, str) else v for k, v in trial.assignments.items()} + elif self.hp_search_backend == HPSearchBackend.WANDB: + params = trial + + for key, value in params.items(): + if not hasattr(self.args, key): + logger.warning( + f"Trying to set {key} in the hyperparameter search but there is no corresponding field in" + " `TrainingArguments`." + ) + continue + old_attr = getattr(self.args, key, None) + # Casting value to the proper type + if old_attr is not None: + value = type(old_attr)(value) + setattr(self.args, key, value) + if self.hp_search_backend == HPSearchBackend.OPTUNA: + logger.info(f"Trial: {trial.params}") + if self.hp_search_backend == HPSearchBackend.SIGOPT: + logger.info(f"SigOpt Assignments: {trial.assignments}") + if self.hp_search_backend == HPSearchBackend.WANDB: + logger.info(f"W&B Sweep parameters: {trial}") + if self.args.deepspeed: + # Rebuild the deepspeed config to reflect the updated training parameters + from transformers.deepspeed import HfTrainerDeepSpeedConfig + + self.args.hf_deepspeed_config = HfTrainerDeepSpeedConfig(self.args.deepspeed) + self.args.hf_deepspeed_config.trainer_config_process(self.args) + + def _report_to_hp_search(self, trial: Union["optuna.Trial", Dict[str, Any]], step: int, metrics: Dict[str, float]): + if self.hp_search_backend is None or trial is None: + return + self.objective = self.compute_objective(metrics.copy()) + if self.hp_search_backend == HPSearchBackend.OPTUNA: + import optuna + + trial.report(self.objective, step) + if trial.should_prune(): + self.callback_handler.on_train_end(self.args, self.state, self.control) + raise optuna.TrialPruned() + elif self.hp_search_backend == HPSearchBackend.RAY: + from ray import tune + + if self.control.should_save: + self._tune_save_checkpoint() + tune.report(objective=self.objective, **metrics) + + def _tune_save_checkpoint(self): + from ray import tune + + if not self.use_tune_checkpoints: + return + with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir: + output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}") + self.save_model(output_dir, _internal_call=True) + if self.args.should_save: + self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME)) + torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) + torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) + + def call_model_init(self, trial=None): + model_init_argcount = number_of_arguments(self.model_init) + if model_init_argcount == 0: + model = self.model_init() + elif model_init_argcount == 1: + model = self.model_init(trial) + else: + raise RuntimeError("model_init should have 0 or 1 argument.") + + if model is None: + raise RuntimeError("model_init should not return None.") + + return model + + def torch_jit_model_eval(self, model, dataloader, training=False): + if not training: + if dataloader is None: + logger.warning("failed to use PyTorch jit mode due to current dataloader is none.") + return model + example_batch = next(iter(dataloader)) + example_batch = self._prepare_inputs(example_batch) + try: + jit_model = model.eval() + with ContextManagers([self.autocast_smart_context_manager(cache_enabled=False), torch.no_grad()]): + if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.14.0"): + if isinstance(example_batch, dict): + jit_model = torch.jit.trace(jit_model, example_kwarg_inputs=example_batch, strict=False) + else: + jit_model = torch.jit.trace( + jit_model, + example_kwarg_inputs={key: example_batch[key] for key in example_batch}, + strict=False, + ) + else: + jit_inputs = [] + for key in example_batch: + example_tensor = torch.ones_like(example_batch[key]) + jit_inputs.append(example_tensor) + jit_inputs = tuple(jit_inputs) + jit_model = torch.jit.trace(jit_model, jit_inputs, strict=False) + jit_model = torch.jit.freeze(jit_model) + with torch.no_grad(): + jit_model(**example_batch) + jit_model(**example_batch) + model = jit_model + self.use_cpu_amp = False + self.use_cuda_amp = False + except (RuntimeError, TypeError, ValueError, NameError, IndexError) as e: + logger.warning(f"failed to use PyTorch jit mode due to: {e}.") + + return model + + def ipex_optimize_model(self, model, training=False, dtype=torch.float32): + if not is_ipex_available(): + raise ImportError( + "Using IPEX but IPEX is not installed or IPEX's version does not match current PyTorch, please refer" + " to https://github.com/intel/intel-extension-for-pytorch." + ) + + import intel_extension_for_pytorch as ipex + + if not training: + model.eval() + dtype = torch.bfloat16 if not self.is_in_train and self.args.bf16_full_eval else dtype + # conv_bn_folding is disabled as it fails in symbolic tracing, resulting in ipex warnings + model = ipex.optimize(model, dtype=dtype, level="O1", conv_bn_folding=False, inplace=not self.is_in_train) + else: + if not model.training: + model.train() + model, self.optimizer = ipex.optimize( + model, dtype=dtype, optimizer=self.optimizer, inplace=True, level="O1" + ) + + return model + + def _wrap_model(self, model, training=True, dataloader=None): + if self.args.torch_compile: + model = torch.compile(model, backend=self.args.torch_compile_backend, mode=self.args.torch_compile_mode) + + if self.args.use_ipex: + dtype = torch.bfloat16 if self.use_cpu_amp else torch.float32 + model = self.ipex_optimize_model(model, training, dtype=dtype) + + if is_sagemaker_mp_enabled(): + # Wrapping the base model twice in a DistributedModel will raise an error. + if isinstance(self.model_wrapped, smp.model.DistributedModel): + return self.model_wrapped + return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps) + + # already initialized its own DDP and AMP + if self.deepspeed: + return self.deepspeed + + # train/eval could be run multiple-times - if already wrapped, don't re-wrap it again + if unwrap_model(model) is not model: + return model + + # Mixed precision training with apex (torch < 1.6) + if self.use_apex and training: + model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level) + + # Multi-gpu training (should be after apex fp16 initialization) + if self.args.n_gpu > 1: + model = nn.DataParallel(model) + + if self.args.jit_mode_eval: + start_time = time.time() + model = self.torch_jit_model_eval(model, dataloader, training) + self.jit_compilation_time = round(time.time() - start_time, 4) + + # Note: in torch.distributed mode, there's no point in wrapping the model + # inside a DistributedDataParallel as we'll be under `no_grad` anyways. + if not training: + return model + + # Distributed training (should be after apex fp16 initialization) + if self.sharded_ddp is not None: + # Sharded DDP! + if self.sharded_ddp == ShardedDDPOption.SIMPLE: + model = ShardedDDP(model, self.optimizer) + else: + mixed_precision = self.args.fp16 or self.args.bf16 + cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp + zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3 + # XXX: Breaking the self.model convention but I see no way around it for now. + if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp: + model = auto_wrap(model) + self.model = model = FullyShardedDDP( + model, + mixed_precision=mixed_precision, + reshard_after_forward=zero_3, + cpu_offload=cpu_offload, + ).to(self.args.device) + # Distributed training using PyTorch FSDP + elif self.fsdp is not None: + if not self.args.fsdp_config["xla"]: + # PyTorch FSDP! + from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload, MixedPrecision + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy + + if FSDPOption.OFFLOAD in self.args.fsdp: + cpu_offload = CPUOffload(offload_params=True) + else: + cpu_offload = CPUOffload(offload_params=False) + + auto_wrap_policy = None + + if FSDPOption.AUTO_WRAP in self.args.fsdp: + if self.args.fsdp_config["fsdp_min_num_params"] > 0: + auto_wrap_policy = functools.partial( + size_based_auto_wrap_policy, min_num_params=self.args.fsdp_config["fsdp_min_num_params"] + ) + elif self.args.fsdp_config.get("fsdp_transformer_layer_cls_to_wrap", None) is not None: + transformer_cls_to_wrap = set() + for layer_class in self.args.fsdp_config["fsdp_transformer_layer_cls_to_wrap"]: + transformer_cls = get_module_class_from_name(model, layer_class) + if transformer_cls is None: + raise Exception("Could not find the transformer layer class to wrap in the model.") + else: + transformer_cls_to_wrap.add(transformer_cls) + auto_wrap_policy = functools.partial( + transformer_auto_wrap_policy, + # Transformer layer class to wrap + transformer_layer_cls=transformer_cls_to_wrap, + ) + mixed_precision_policy = None + dtype = None + if self.args.fp16: + dtype = torch.float16 + elif self.args.bf16: + dtype = torch.bfloat16 + if dtype is not None: + mixed_precision_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype) + if type(model) != FSDP: + # XXX: Breaking the self.model convention but I see no way around it for now. + self.model = model = FSDP( + model, + sharding_strategy=self.fsdp, + cpu_offload=cpu_offload, + auto_wrap_policy=auto_wrap_policy, + mixed_precision=mixed_precision_policy, + device_id=self.args.device, + backward_prefetch=self.backward_prefetch, + forward_prefetch=self.forword_prefetch, + limit_all_gathers=self.limit_all_gathers, + ) + else: + try: + from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as FSDP + from torch_xla.distributed.fsdp import checkpoint_module + from torch_xla.distributed.fsdp.wrap import ( + size_based_auto_wrap_policy, + transformer_auto_wrap_policy, + ) + except ImportError: + raise ImportError("Missing XLA FSDP related module; please make sure to use torch-xla >= 2.0.") + auto_wrap_policy = None + auto_wrapper_callable = None + if self.args.fsdp_config["fsdp_min_num_params"] > 0: + auto_wrap_policy = functools.partial( + size_based_auto_wrap_policy, min_num_params=self.args.fsdp_config["fsdp_min_num_params"] + ) + elif self.args.fsdp_config.get("fsdp_transformer_layer_cls_to_wrap", None) is not None: + transformer_cls_to_wrap = set() + for layer_class in self.args.fsdp_config["fsdp_transformer_layer_cls_to_wrap"]: + transformer_cls = get_module_class_from_name(model, layer_class) + if transformer_cls is None: + raise Exception("Could not find the transformer layer class to wrap in the model.") + else: + transformer_cls_to_wrap.add(transformer_cls) + auto_wrap_policy = functools.partial( + transformer_auto_wrap_policy, + # Transformer layer class to wrap + transformer_layer_cls=transformer_cls_to_wrap, + ) + fsdp_kwargs = self.args.xla_fsdp_config + if self.args.fsdp_config["xla_fsdp_grad_ckpt"]: + # Apply gradient checkpointing to auto-wrapped sub-modules if specified + def auto_wrapper_callable(m, *args, **kwargs): + return FSDP(checkpoint_module(m), *args, **kwargs) + + # Wrap the base model with an outer FSDP wrapper + self.model = model = FSDP( + model, + auto_wrap_policy=auto_wrap_policy, + auto_wrapper_callable=auto_wrapper_callable, + **fsdp_kwargs, + ) + + # Patch `xm.optimizer_step` should not reduce gradients in this case, + # as FSDP does not need gradient reduction over sharded parameters. + def patched_optimizer_step(optimizer, barrier=False, optimizer_args={}): + loss = optimizer.step(**optimizer_args) + if barrier: + xm.mark_step() + return loss + + xm.optimizer_step = patched_optimizer_step + elif is_sagemaker_dp_enabled(): + model = nn.parallel.DistributedDataParallel( + model, device_ids=[int(os.getenv("SMDATAPARALLEL_LOCAL_RANK"))] + ) + elif self.args.local_rank != -1: + kwargs = {} + if self.args.ddp_find_unused_parameters is not None: + kwargs["find_unused_parameters"] = self.args.ddp_find_unused_parameters + elif isinstance(model, PreTrainedModel): + # find_unused_parameters breaks checkpointing as per + # https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021 + kwargs["find_unused_parameters"] = not model.is_gradient_checkpointing + else: + kwargs["find_unused_parameters"] = True + + if self.args.ddp_bucket_cap_mb is not None: + kwargs["bucket_cap_mb"] = self.args.ddp_bucket_cap_mb + if is_torch_neuroncore_available(): + return model + model = nn.parallel.DistributedDataParallel( + model, + device_ids=[self.args.local_rank] if self.args._n_gpu != 0 else None, + output_device=self.args.local_rank if self.args._n_gpu != 0 else None, + **kwargs, + ) + + return model + + def train( + self, + resume_from_checkpoint: Optional[Union[str, bool]] = None, + trial: Union["optuna.Trial", Dict[str, Any]] = None, + ignore_keys_for_eval: Optional[List[str]] = None, + **kwargs, + ): + """ + Main training entry point. + + Args: + resume_from_checkpoint (`str` or `bool`, *optional*): + If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a + `bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance + of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here. + trial (`optuna.Trial` or `Dict[str, Any]`, *optional*): + The trial run or the hyperparameter dictionary for hyperparameter search. + ignore_keys_for_eval (`List[str]`, *optional*) + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions for evaluation during the training. + kwargs: + Additional keyword arguments used to hide deprecated arguments + """ + if resume_from_checkpoint is False: + resume_from_checkpoint = None + + # memory metrics - must set up as early as possible + self._memory_tracker.start() + + args = self.args + + self.is_in_train = True + + # do_train is not a reliable argument, as it might not be set and .train() still called, so + # the following is a workaround: + if (args.fp16_full_eval or args.bf16_full_eval) and not args.do_train: + self._move_model_to_device(self.model, args.device) + + if "model_path" in kwargs: + resume_from_checkpoint = kwargs.pop("model_path") + warnings.warn( + "`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` " + "instead.", + FutureWarning, + ) + if len(kwargs) > 0: + raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.") + # This might change the seed so needs to run first. + self._hp_search_setup(trial) + self._train_batch_size = self.args.train_batch_size + + # Model re-init + model_reloaded = False + if self.model_init is not None: + # Seed must be set before instantiating the model when using model_init. + enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed) + self.model = self.call_model_init(trial) + model_reloaded = True + # Reinitializes optimizer and scheduler + self.optimizer, self.lr_scheduler = None, None + + # Load potential model checkpoint + if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint: + resume_from_checkpoint = get_last_checkpoint(args.output_dir) + if resume_from_checkpoint is None: + raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})") + + if resume_from_checkpoint is not None and not is_sagemaker_mp_enabled() and args.deepspeed is None: + self._load_from_checkpoint(resume_from_checkpoint) + + # If model was re-initialized, put it on the right device and update self.model_wrapped + if model_reloaded: + if self.place_model_on_device: + self._move_model_to_device(self.model, args.device) + self.model_wrapped = self.model + + inner_training_loop = find_executable_batch_size( + self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size + ) + return inner_training_loop( + args=args, + resume_from_checkpoint=resume_from_checkpoint, + trial=trial, + ignore_keys_for_eval=ignore_keys_for_eval, + ) + + def _inner_training_loop( + self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None + ): + self._train_batch_size = batch_size + # Data loader and number of training steps + train_dataloader = self.get_train_dataloader() + + # Setting up training control variables: + # number of training epochs: num_train_epochs + # number of training steps per epoch: num_update_steps_per_epoch + # total number of training steps to execute: max_steps + total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size + + len_dataloader = None + if has_length(train_dataloader): + len_dataloader = len(train_dataloader) + num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps + num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1) + num_examples = self.num_examples(train_dataloader) + if args.max_steps > 0: + max_steps = args.max_steps + num_train_epochs = args.max_steps // num_update_steps_per_epoch + int( + args.max_steps % num_update_steps_per_epoch > 0 + ) + # May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's + # the best we can do. + num_train_samples = args.max_steps * total_train_batch_size + else: + max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch) + num_train_epochs = math.ceil(args.num_train_epochs) + num_train_samples = self.num_examples(train_dataloader) * args.num_train_epochs + elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size + max_steps = args.max_steps + # Setting a very large number of epochs so we go as many times as necessary over the iterator. + num_train_epochs = sys.maxsize + num_update_steps_per_epoch = max_steps + num_examples = total_train_batch_size * args.max_steps + num_train_samples = args.max_steps * total_train_batch_size + else: + raise ValueError( + "args.max_steps must be set to a positive value if dataloader does not have a length, was" + f" {args.max_steps}" + ) + + if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug: + if self.args.n_gpu > 1: + # nn.DataParallel(model) replicates the model, creating new variables and module + # references registered here no longer work on other gpus, breaking the module + raise ValueError( + "Currently --debug underflow_overflow is not supported under DP. Please use DDP" + " (torch.distributed.launch)." + ) + else: + debug_overflow = DebugUnderflowOverflow(self.model) # noqa + + delay_optimizer_creation = ( + self.sharded_ddp is not None + and self.sharded_ddp != ShardedDDPOption.SIMPLE + or is_sagemaker_mp_enabled() + or self.fsdp is not None + ) + if args.deepspeed: + deepspeed_engine, optimizer, lr_scheduler = deepspeed_init( + self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint + ) + self.model = deepspeed_engine.module + self.model_wrapped = deepspeed_engine + self.deepspeed = deepspeed_engine + self.optimizer = optimizer + self.lr_scheduler = lr_scheduler + elif not delay_optimizer_creation: + self.create_optimizer_and_scheduler(num_training_steps=max_steps) + + self.state = TrainerState() + self.state.is_hyper_param_search = trial is not None + + # Activate gradient checkpointing if needed + if args.gradient_checkpointing: + self.model.gradient_checkpointing_enable() + + model = self._wrap_model(self.model_wrapped) + + if is_sagemaker_mp_enabled() and resume_from_checkpoint is not None: + self._load_from_checkpoint(resume_from_checkpoint, model) + + # for the rest of this function `model` is the outside model, whether it was wrapped or not + if model is not self.model: + self.model_wrapped = model + + if delay_optimizer_creation: + self.create_optimizer_and_scheduler(num_training_steps=max_steps) + + # Check if saved optimizer or scheduler states exist + self._load_optimizer_and_scheduler(resume_from_checkpoint) + + # important: at this point: + # self.model is the Transformers Model + # self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc. + + # Train! + logger.info("***** Running training *****") + logger.info(f" Num examples = {num_examples}") + logger.info(f" Num Epochs = {num_train_epochs}") + logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") + logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}") + logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {max_steps}") + logger.info( + f" Number of trainable parameters = {sum(p.numel() for p in model.parameters() if p.requires_grad)}" + ) + + self.state.epoch = 0 + start_time = time.time() + epochs_trained = 0 + steps_trained_in_current_epoch = 0 + steps_trained_progress_bar = None + + # Check if continuing training from a checkpoint + if resume_from_checkpoint is not None and os.path.isfile( + os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME) + ): + self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) + epochs_trained = self.state.global_step // num_update_steps_per_epoch + if not args.ignore_data_skip: + steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch) + steps_trained_in_current_epoch *= args.gradient_accumulation_steps + else: + steps_trained_in_current_epoch = 0 + + logger.info(" Continuing training from checkpoint, will skip to saved global_step") + logger.info(f" Continuing training from epoch {epochs_trained}") + logger.info(f" Continuing training from global step {self.state.global_step}") + if not args.ignore_data_skip: + if skip_first_batches is None: + logger.info( + f" Will skip the first {epochs_trained} epochs then the first" + f" {steps_trained_in_current_epoch} batches in the first epoch. If this takes a lot of time," + " you can install the latest version of Accelerate with `pip install -U accelerate`.You can" + " also add the `--ignore_data_skip` flag to your launch command, but you will resume the" + " training on data already seen by your model." + ) + else: + logger.info( + f" Will skip the first {epochs_trained} epochs then the first" + f" {steps_trained_in_current_epoch} batches in the first epoch." + ) + if self.is_local_process_zero() and not args.disable_tqdm and skip_first_batches is None: + steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch) + steps_trained_progress_bar.set_description("Skipping the first batches") + + # Update the references + self.callback_handler.model = self.model + self.callback_handler.optimizer = self.optimizer + self.callback_handler.lr_scheduler = self.lr_scheduler + self.callback_handler.train_dataloader = train_dataloader + if self.hp_name is not None and self._trial is not None: + # use self._trial because the SigOpt/Optuna hpo only call `_hp_search_setup(trial)` instead of passing trial + # parameter to Train when using DDP. + self.state.trial_name = self.hp_name(self._trial) + if trial is not None: + assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial + self.state.trial_params = hp_params(assignments) + else: + self.state.trial_params = None + # This should be the same if the state has been saved but in case the training arguments changed, it's safer + # to set this after the load. + self.state.max_steps = max_steps + self.state.num_train_epochs = num_train_epochs + self.state.is_local_process_zero = self.is_local_process_zero() + self.state.is_world_process_zero = self.is_world_process_zero() + + # tr_loss is a tensor to avoid synchronization of TPUs through .item() + tr_loss = torch.tensor(0.0).to(args.device) + # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses + self._total_loss_scalar = 0.0 + self._globalstep_last_logged = self.state.global_step + model.zero_grad() + + self.control = self.callback_handler.on_train_begin(args, self.state, self.control) + + # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point. + if not args.ignore_data_skip: + for epoch in range(epochs_trained): + is_random_sampler = hasattr(train_dataloader, "sampler") and isinstance( + train_dataloader.sampler, RandomSampler + ) + if is_torch_less_than_1_11 or not is_random_sampler: + # We just need to begin an iteration to create the randomization of the sampler. + # That was before PyTorch 1.11 however... + for _ in train_dataloader: + break + else: + # Otherwise we need to call the whooooole sampler cause there is some random operation added + # AT THE VERY END! + _ = list(train_dataloader.sampler) + + total_batched_samples = 0 + for epoch in range(epochs_trained, num_train_epochs): + if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler): + train_dataloader.sampler.set_epoch(epoch) + elif hasattr(train_dataloader, "dataset") and isinstance(train_dataloader.dataset, IterableDatasetShard): + train_dataloader.dataset.set_epoch(epoch) + + if is_torch_tpu_available(): + parallel_loader = pl.ParallelLoader(train_dataloader, [args.device]).per_device_loader(args.device) + epoch_iterator = parallel_loader + else: + epoch_iterator = train_dataloader + + # Reset the past mems state at the beginning of each epoch if necessary. + if args.past_index >= 0: + self._past = None + + steps_in_epoch = ( + len(epoch_iterator) + if len_dataloader is not None + else args.max_steps * args.gradient_accumulation_steps + ) + self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control) + + if epoch == epochs_trained and resume_from_checkpoint is not None and steps_trained_in_current_epoch == 0: + self._load_rng_state(resume_from_checkpoint) + + rng_to_sync = False + steps_skipped = 0 + if skip_first_batches is not None and steps_trained_in_current_epoch > 0: + epoch_iterator = skip_first_batches(epoch_iterator, steps_trained_in_current_epoch) + steps_skipped = steps_trained_in_current_epoch + steps_trained_in_current_epoch = 0 + rng_to_sync = True + + step = -1 + for step, inputs in enumerate(epoch_iterator): + total_batched_samples += 1 + if rng_to_sync: + self._load_rng_state(resume_from_checkpoint) + rng_to_sync = False + + # Skip past any already trained steps if resuming training + if steps_trained_in_current_epoch > 0: + steps_trained_in_current_epoch -= 1 + if steps_trained_progress_bar is not None: + steps_trained_progress_bar.update(1) + if steps_trained_in_current_epoch == 0: + self._load_rng_state(resume_from_checkpoint) + continue + elif steps_trained_progress_bar is not None: + steps_trained_progress_bar.close() + steps_trained_progress_bar = None + + if step % args.gradient_accumulation_steps == 0: + self.control = self.callback_handler.on_step_begin(args, self.state, self.control) + + if ( + (total_batched_samples % args.gradient_accumulation_steps != 0) + and args.local_rank != -1 + and args._no_sync_in_gradient_accumulation + ): + # Avoid unnecessary DDP synchronization since there will be no backward pass on this example. + with model.no_sync(): + tr_loss_step = self.training_step(model, inputs) + else: + tr_loss_step = self.training_step(model, inputs) + + if ( + args.logging_nan_inf_filter + and not is_torch_tpu_available() + and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step)) + ): + # if loss is nan or inf simply add the average of previous logged losses + tr_loss += tr_loss / (1 + self.state.global_step - self._globalstep_last_logged) + else: + tr_loss += tr_loss_step + + self.current_flos += float(self.floating_point_ops(inputs)) + + # Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps + if self.deepspeed: + self.deepspeed.step() + + if total_batched_samples % args.gradient_accumulation_steps == 0 or ( + # last step in epoch but step is always smaller than gradient_accumulation_steps + steps_in_epoch <= args.gradient_accumulation_steps + and (step + 1) == steps_in_epoch + ): + # Gradient clipping + if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed: + # deepspeed does its own clipping + + if self.do_grad_scaling: + # Reduce gradients first for XLA + if is_torch_tpu_available(): + gradients = xm._fetch_gradients(self.optimizer) + xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size()) + # AMP: gradients need unscaling + self.scaler.unscale_(self.optimizer) + + if is_sagemaker_mp_enabled() and args.fp16: + self.optimizer.clip_master_grads(args.max_grad_norm) + elif hasattr(self.optimizer, "clip_grad_norm"): + # Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping + self.optimizer.clip_grad_norm(args.max_grad_norm) + elif hasattr(model, "clip_grad_norm_"): + # Some models (like FullyShardedDDP) have a specific way to do gradient clipping + model.clip_grad_norm_(args.max_grad_norm) + else: + # Revert to normal clipping otherwise, handling Apex or full precision + nn.utils.clip_grad_norm_( + amp.master_params(self.optimizer) if self.use_apex else model.parameters(), + args.max_grad_norm, + ) + + # Optimizer step + optimizer_was_run = True + if self.deepspeed: + pass # called outside the loop + elif is_torch_tpu_available(): + if self.do_grad_scaling: + self.scaler.step(self.optimizer) + self.scaler.update() + else: + xm.optimizer_step(self.optimizer) + elif self.do_grad_scaling: + scale_before = self.scaler.get_scale() + self.scaler.step(self.optimizer) + self.scaler.update() + scale_after = self.scaler.get_scale() + optimizer_was_run = scale_before <= scale_after + else: + self.optimizer.step() + + if optimizer_was_run and not self.deepspeed: + self.lr_scheduler.step() + + model.zero_grad() + self.state.global_step += 1 + self.state.epoch = epoch + (step + 1 + steps_skipped) / steps_in_epoch + self.control = self.callback_handler.on_step_end(args, self.state, self.control) + + self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) + else: + self.control = self.callback_handler.on_substep_end(args, self.state, self.control) + + if self.control.should_epoch_stop or self.control.should_training_stop: + break + if step < 0: + logger.warning( + "There seems to be not a single sample in your epoch_iterator, stopping training at step" + f" {self.state.global_step}! This is expected if you're using an IterableDataset and set" + f" num_steps ({max_steps}) higher than the number of available samples." + ) + self.control.should_training_stop = True + + self.control = self.callback_handler.on_epoch_end(args, self.state, self.control) + self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) + + if DebugOption.TPU_METRICS_DEBUG in self.args.debug: + if is_torch_tpu_available(): + # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) + xm.master_print(met.metrics_report()) + else: + logger.warning( + "You enabled PyTorch/XLA debug metrics but you don't have a TPU " + "configured. Check your training configuration if this is unexpected." + ) + if self.control.should_training_stop: + break + + if args.past_index and hasattr(self, "_past"): + # Clean the state at the end of training + delattr(self, "_past") + + logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n") + if args.load_best_model_at_end and self.state.best_model_checkpoint is not None: + # Wait for everyone to get here so we are sur the model has been saved by process 0. + if is_torch_tpu_available(): + xm.rendezvous("load_best_model_at_end") + elif args.local_rank != -1: + dist.barrier() + elif is_sagemaker_mp_enabled(): + smp.barrier() + + self._load_best_model() + + # add remaining tr_loss + self._total_loss_scalar += tr_loss.item() + train_loss = self._total_loss_scalar / self.state.global_step + + metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps) + self.store_flos() + metrics["total_flos"] = self.state.total_flos + metrics["train_loss"] = train_loss + + self.is_in_train = False + + self._memory_tracker.stop_and_update_metrics(metrics) + + self.log(metrics) + + run_dir = self._get_output_dir(trial) + checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir) + + # Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint and process allowed to save. + if self.args.should_save and self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1: + for checkpoint in checkpoints_sorted: + if checkpoint != self.state.best_model_checkpoint: + logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") + shutil.rmtree(checkpoint) + + self.control = self.callback_handler.on_train_end(args, self.state, self.control) + + return TrainOutput(self.state.global_step, train_loss, metrics) + + def _get_output_dir(self, trial): + if self.hp_search_backend is not None and trial is not None: + if self.hp_search_backend == HPSearchBackend.OPTUNA: + run_id = trial.number + elif self.hp_search_backend == HPSearchBackend.RAY: + from ray import tune + + run_id = tune.get_trial_id() + elif self.hp_search_backend == HPSearchBackend.SIGOPT: + run_id = trial.id + elif self.hp_search_backend == HPSearchBackend.WANDB: + import wandb + + run_id = wandb.run.id + run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}" + run_dir = os.path.join(self.args.output_dir, run_name) + else: + run_dir = self.args.output_dir + return run_dir + + def _load_from_checkpoint(self, resume_from_checkpoint, model=None): + if model is None: + model = self.model + + if not os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)) and not os.path.isfile( + os.path.join(resume_from_checkpoint, WEIGHTS_INDEX_NAME) + ): + raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}") + + logger.info(f"Loading model from {resume_from_checkpoint}.") + + if os.path.isfile(os.path.join(resume_from_checkpoint, CONFIG_NAME)): + config = PretrainedConfig.from_json_file(os.path.join(resume_from_checkpoint, CONFIG_NAME)) + checkpoint_version = config.transformers_version + if checkpoint_version is not None and checkpoint_version != __version__: + logger.warning( + f"You are resuming training from a checkpoint trained with {checkpoint_version} of " + f"Transformers but your current version is {__version__}. This is not recommended and could " + "yield to errors or unwanted behaviors." + ) + + if os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)): + # If the model is on the GPU, it still works! + if is_sagemaker_mp_enabled(): + if os.path.isfile(os.path.join(resume_from_checkpoint, "user_content.pt")): + # If the 'user_content.pt' file exists, load with the new smp api. + # Checkpoint must have been saved with the new smp api. + smp.resume_from_checkpoint( + path=resume_from_checkpoint, tag=WEIGHTS_NAME, partial=False, load_optimizer=False + ) + else: + # If the 'user_content.pt' file does NOT exist, load with the old smp api. + # Checkpoint must have been saved with the old smp api. + if hasattr(self.args, "fp16") and self.args.fp16 is True: + logger.warning( + "Enabling FP16 and loading from smp < 1.10 checkpoint together is not suppported." + ) + state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu") + # Required for smp to not auto-translate state_dict from hf to smp (is already smp). + state_dict["_smp_is_partial"] = False + load_result = model.load_state_dict(state_dict, strict=True) + # release memory + del state_dict + else: + # We load the model state dict on the CPU to avoid an OOM error. + state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu") + # workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963 + # which takes *args instead of **kwargs + load_result = model.load_state_dict(state_dict, False) + # release memory + del state_dict + self._issue_warnings_after_load(load_result) + else: + # We load the sharded checkpoint + load_result = load_sharded_checkpoint(model, resume_from_checkpoint, strict=is_sagemaker_mp_enabled()) + if not is_sagemaker_mp_enabled(): + self._issue_warnings_after_load(load_result) + + def _load_best_model(self): + logger.info(f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).") + best_model_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME) + model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model + if os.path.exists(best_model_path): + if self.deepspeed: + if self.model_wrapped is not None: + # this removes the pre-hooks from the previous engine + self.model_wrapped.destroy() + self.model_wrapped = None + + # temp hack until Deepspeed fixes the problem with resume from an existing engine that did some stepping + deepspeed_engine, optimizer, lr_scheduler = deepspeed_init( + self, + num_training_steps=self.args.max_steps, + resume_from_checkpoint=self.state.best_model_checkpoint, + ) + self.model = deepspeed_engine.module + self.model_wrapped = deepspeed_engine + self.deepspeed = deepspeed_engine + self.optimizer = optimizer + self.lr_scheduler = lr_scheduler + else: + if is_sagemaker_mp_enabled(): + if os.path.isfile(os.path.join(self.state.best_model_checkpoint, "user_content.pt")): + # If the 'user_content.pt' file exists, load with the new smp api. + # Checkpoint must have been saved with the new smp api. + smp.resume_from_checkpoint( + path=self.state.best_model_checkpoint, + tag=WEIGHTS_NAME, + partial=False, + load_optimizer=False, + ) + else: + # If the 'user_content.pt' file does NOT exist, load with the old smp api. + # Checkpoint must have been saved with the old smp api. + state_dict = torch.load(best_model_path, map_location="cpu") + state_dict["_smp_is_partial"] = False + load_result = model.load_state_dict(state_dict, strict=True) + else: + # We load the model state dict on the CPU to avoid an OOM error. + state_dict = torch.load(best_model_path, map_location="cpu") + # If the model is on the GPU, it still works! + # workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963 + # which takes *args instead of **kwargs + load_result = model.load_state_dict(state_dict, False) + if not is_sagemaker_mp_enabled(): + self._issue_warnings_after_load(load_result) + elif os.path.exists(os.path.join(self.state.best_model_checkpoint, WEIGHTS_INDEX_NAME)): + load_result = load_sharded_checkpoint( + model, self.state.best_model_checkpoint, strict=is_sagemaker_mp_enabled() + ) + if not is_sagemaker_mp_enabled(): + self._issue_warnings_after_load(load_result) + else: + logger.warning( + f"Could not locate the best model at {best_model_path}, if you are running a distributed training " + "on multiple nodes, you should activate `--save_on_each_node`." + ) + + def _issue_warnings_after_load(self, load_result): + if len(load_result.missing_keys) != 0: + if self.model._keys_to_ignore_on_save is not None and set(load_result.missing_keys) == set( + self.model._keys_to_ignore_on_save + ): + self.model.tie_weights() + else: + logger.warning(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.") + if len(load_result.unexpected_keys) != 0: + logger.warning( + f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}." + ) + + def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for_eval): + if self.control.should_log: + if is_torch_tpu_available(): + xm.mark_step() + + logs: Dict[str, float] = {} + + # all_gather + mean() to get average loss over all processes + tr_loss_scalar = self._nested_gather(tr_loss).mean().item() + + # reset tr_loss to zero + tr_loss -= tr_loss + + logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4) + logs["learning_rate"] = self._get_learning_rate() + + self._total_loss_scalar += tr_loss_scalar + self._globalstep_last_logged = self.state.global_step + self.store_flos() + + self.log(logs) + + metrics = None + if self.control.should_evaluate: + if isinstance(self.eval_dataset, dict): + for eval_dataset_name, eval_dataset in self.eval_dataset.items(): + metrics = self.evaluate( + eval_dataset=eval_dataset, + ignore_keys=ignore_keys_for_eval, + metric_key_prefix=f"eval_{eval_dataset_name}", + ) + else: + metrics = self.evaluate(ignore_keys=ignore_keys_for_eval) + self._report_to_hp_search(trial, self.state.global_step, metrics) + + if self.control.should_save: + self._save_checkpoint(model, trial, metrics=metrics) + self.control = self.callback_handler.on_save(self.args, self.state, self.control) + + def _load_rng_state(self, checkpoint): + # Load RNG states from `checkpoint` + if checkpoint is None: + return + + if self.args.world_size > 1: + process_index = self.args.process_index + rng_file = os.path.join(checkpoint, f"rng_state_{process_index}.pth") + if not os.path.isfile(rng_file): + logger.info( + f"Didn't find an RNG file for process {process_index}, if you are resuming a training that " + "wasn't launched in a distributed fashion, reproducibility is not guaranteed." + ) + return + else: + rng_file = os.path.join(checkpoint, "rng_state.pth") + if not os.path.isfile(rng_file): + logger.info( + "Didn't find an RNG file, if you are resuming a training that was launched in a distributed " + "fashion, reproducibility is not guaranteed." + ) + return + + checkpoint_rng_state = torch.load(rng_file) + random.setstate(checkpoint_rng_state["python"]) + np.random.set_state(checkpoint_rng_state["numpy"]) + torch.random.set_rng_state(checkpoint_rng_state["cpu"]) + if torch.cuda.is_available(): + if self.args.local_rank != -1: + torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"]) + else: + try: + torch.cuda.random.set_rng_state_all(checkpoint_rng_state["cuda"]) + except Exception as e: + logger.info( + f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}" + "\nThis won't yield the same results as if the training had not been interrupted." + ) + if is_torch_tpu_available(): + xm.set_rng_state(checkpoint_rng_state["xla"]) + + def _save_checkpoint(self, model, trial, metrics=None): + # In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we + # want to save except FullyShardedDDP. + # assert unwrap_model(model) is self.model, "internal model should be a reference to self.model" + + # Save model checkpoint + checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}" + + if self.hp_search_backend is None and trial is None: + self.store_flos() + + run_dir = self._get_output_dir(trial=trial) + output_dir = os.path.join(run_dir, checkpoint_folder) + self.save_model(output_dir, _internal_call=True) + if self.deepspeed: + # under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed + # config `stage3_gather_16bit_weights_on_model_save` is True + self.deepspeed.save_checkpoint(output_dir) + + # Save optimizer and scheduler + if self.sharded_ddp == ShardedDDPOption.SIMPLE: + self.optimizer.consolidate_state_dict() + + if is_torch_tpu_available(): + xm.rendezvous("saving_optimizer_states") + xm.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) + with warnings.catch_warnings(record=True) as caught_warnings: + xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) + reissue_pt_warnings(caught_warnings) + elif is_sagemaker_mp_enabled(): + opt_state_dict = self.optimizer.local_state_dict(gather_if_shard=False) + smp.barrier() + if smp.rdp_rank() == 0 or smp.state.cfg.shard_optimizer_state: + smp.save( + opt_state_dict, + os.path.join(output_dir, OPTIMIZER_NAME), + partial=True, + v3=smp.state.cfg.shard_optimizer_state, + ) + if self.args.should_save: + with warnings.catch_warnings(record=True) as caught_warnings: + torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) + reissue_pt_warnings(caught_warnings) + if self.do_grad_scaling: + torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME)) + elif self.args.should_save and not self.deepspeed: + # deepspeed.save_checkpoint above saves model/optim/sched + torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) + with warnings.catch_warnings(record=True) as caught_warnings: + torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) + reissue_pt_warnings(caught_warnings) + if self.do_grad_scaling: + torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME)) + + # Determine the new best metric / best model checkpoint + if metrics is not None and self.args.metric_for_best_model is not None: + metric_to_check = self.args.metric_for_best_model + if not metric_to_check.startswith("eval_"): + metric_to_check = f"eval_{metric_to_check}" + metric_value = metrics[metric_to_check] + + operator = np.greater if self.args.greater_is_better else np.less + if ( + self.state.best_metric is None + or self.state.best_model_checkpoint is None + or operator(metric_value, self.state.best_metric) + ): + self.state.best_metric = metric_value + self.state.best_model_checkpoint = output_dir + + # Save the Trainer state + if self.args.should_save: + self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME)) + + # Save RNG state in non-distributed training + rng_states = { + "python": random.getstate(), + "numpy": np.random.get_state(), + "cpu": torch.random.get_rng_state(), + } + if torch.cuda.is_available(): + if self.args.local_rank == -1: + # In non distributed, we save the global CUDA RNG state (will take care of DataParallel) + rng_states["cuda"] = torch.cuda.random.get_rng_state_all() + else: + rng_states["cuda"] = torch.cuda.random.get_rng_state() + + if is_torch_tpu_available(): + rng_states["xla"] = xm.get_rng_state() + + # A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may + # not yet exist. + os.makedirs(output_dir, exist_ok=True) + + if self.args.world_size <= 1: + torch.save(rng_states, os.path.join(output_dir, "rng_state.pth")) + else: + torch.save(rng_states, os.path.join(output_dir, f"rng_state_{self.args.process_index}.pth")) + + if self.args.push_to_hub: + self._push_from_checkpoint(output_dir) + + # Maybe delete some older checkpoints. + if self.args.should_save: + self._rotate_checkpoints(use_mtime=True, output_dir=run_dir) + + def _load_optimizer_and_scheduler(self, checkpoint): + """If optimizer and scheduler states exist, load them.""" + if checkpoint is None: + return + + if self.deepspeed: + # deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init + return + + checkpoint_file_exists = ( + glob.glob(os.path.join(checkpoint, OPTIMIZER_NAME) + "_*") + if is_sagemaker_mp_enabled() + else os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME)) + ) + if checkpoint_file_exists and os.path.isfile(os.path.join(checkpoint, SCHEDULER_NAME)): + # Load in optimizer and scheduler states + if is_torch_tpu_available(): + # On TPU we have to take some extra precautions to properly load the states on the right device. + optimizer_state = torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location="cpu") + with warnings.catch_warnings(record=True) as caught_warnings: + lr_scheduler_state = torch.load(os.path.join(checkpoint, SCHEDULER_NAME), map_location="cpu") + reissue_pt_warnings(caught_warnings) + + xm.send_cpu_data_to_device(optimizer_state, self.args.device) + xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device) + + self.optimizer.load_state_dict(optimizer_state) + self.lr_scheduler.load_state_dict(lr_scheduler_state) + else: + map_location = "cpu" if is_sagemaker_mp_enabled() else self.args.device + if is_sagemaker_mp_enabled(): + if os.path.isfile(os.path.join(checkpoint, "user_content.pt")): + # Optimizer checkpoint was saved with smp >= 1.10 + def opt_load_hook(mod, opt): + opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True)) + + else: + # Optimizer checkpoint was saved with smp < 1.10 + def opt_load_hook(mod, opt): + if IS_SAGEMAKER_MP_POST_1_10: + opt.load_state_dict( + smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True, back_compat=True) + ) + else: + opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True)) + + self.model_wrapped.register_post_step_hook(opt_load_hook) + else: + self.optimizer.load_state_dict( + torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location=map_location) + ) + with warnings.catch_warnings(record=True) as caught_warnings: + self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME))) + reissue_pt_warnings(caught_warnings) + if self.do_grad_scaling and os.path.isfile(os.path.join(checkpoint, SCALER_NAME)): + self.scaler.load_state_dict(torch.load(os.path.join(checkpoint, SCALER_NAME))) + + def hyperparameter_search( + self, + hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None, + compute_objective: Optional[Callable[[Dict[str, float]], float]] = None, + n_trials: int = 20, + direction: str = "minimize", + backend: Optional[Union["str", HPSearchBackend]] = None, + hp_name: Optional[Callable[["optuna.Trial"], str]] = None, + **kwargs, + ) -> BestRun: + """ + Launch an hyperparameter search using `optuna` or `Ray Tune` or `SigOpt`. The optimized quantity is determined + by `compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided, + the sum of all metrics otherwise. + + + + To use this method, you need to have provided a `model_init` when initializing your [`Trainer`]: we need to + reinitialize the model at each new run. This is incompatible with the `optimizers` argument, so you need to + subclass [`Trainer`] and override the method [`~Trainer.create_optimizer_and_scheduler`] for custom + optimizer/scheduler. + + + + Args: + hp_space (`Callable[["optuna.Trial"], Dict[str, float]]`, *optional*): + A function that defines the hyperparameter search space. Will default to + [`~trainer_utils.default_hp_space_optuna`] or [`~trainer_utils.default_hp_space_ray`] or + [`~trainer_utils.default_hp_space_sigopt`] depending on your backend. + compute_objective (`Callable[[Dict[str, float]], float]`, *optional*): + A function computing the objective to minimize or maximize from the metrics returned by the `evaluate` + method. Will default to [`~trainer_utils.default_compute_objective`]. + n_trials (`int`, *optional*, defaults to 100): + The number of trial runs to test. + direction (`str`, *optional*, defaults to `"minimize"`): + Whether to optimize greater or lower objects. Can be `"minimize"` or `"maximize"`, you should pick + `"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or several metrics. + backend (`str` or [`~training_utils.HPSearchBackend`], *optional*): + The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending + on which one is installed. If all are installed, will default to optuna. + hp_name (`Callable[["optuna.Trial"], str]]`, *optional*): + A function that defines the trial/run name. Will default to None. + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to `optuna.create_study` or `ray.tune.run`. For more + information see: + + - the documentation of + [optuna.create_study](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html) + - the documentation of [tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run) + - the documentation of [sigopt](https://app.sigopt.com/docs/endpoints/experiments/create) + + Returns: + [`trainer_utils.BestRun`]: All the information about the best run. Experiment summary can be found in + `run_summary` attribute for Ray backend. + """ + if backend is None: + backend = default_hp_search_backend() + if backend is None: + raise RuntimeError( + "At least one of optuna or ray should be installed. " + "To install optuna run `pip install optuna`. " + "To install ray run `pip install ray[tune]`. " + "To install sigopt run `pip install sigopt`." + ) + backend = HPSearchBackend(backend) + if backend == HPSearchBackend.OPTUNA and not is_optuna_available(): + raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.") + if backend == HPSearchBackend.RAY and not is_ray_tune_available(): + raise RuntimeError( + "You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`." + ) + if backend == HPSearchBackend.SIGOPT and not is_sigopt_available(): + raise RuntimeError("You picked the sigopt backend, but it is not installed. Use `pip install sigopt`.") + if backend == HPSearchBackend.WANDB and not is_wandb_available(): + raise RuntimeError("You picked the wandb backend, but it is not installed. Use `pip install wandb`.") + self.hp_search_backend = backend + if self.model_init is None: + raise RuntimeError( + "To use hyperparameter search, you need to pass your model through a model_init function." + ) + + self.hp_space = default_hp_space[backend] if hp_space is None else hp_space + self.hp_name = hp_name + self.compute_objective = default_compute_objective if compute_objective is None else compute_objective + + backend_dict = { + HPSearchBackend.OPTUNA: run_hp_search_optuna, + HPSearchBackend.RAY: run_hp_search_ray, + HPSearchBackend.SIGOPT: run_hp_search_sigopt, + HPSearchBackend.WANDB: run_hp_search_wandb, + } + best_run = backend_dict[backend](self, n_trials, direction, **kwargs) + + self.hp_search_backend = None + return best_run + + def log(self, logs: Dict[str, float]) -> None: + """ + Log `logs` on the various objects watching training. + + Subclass and override this method to inject custom behavior. + + Args: + logs (`Dict[str, float]`): + The values to log. + """ + if self.state.epoch is not None: + logs["epoch"] = round(self.state.epoch, 2) + + output = {**logs, **{"step": self.state.global_step}} + self.state.log_history.append(output) + self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs) + + def _prepare_input(self, data: Union[torch.Tensor, Any]) -> Union[torch.Tensor, Any]: + """ + Prepares one `data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors. + """ + if isinstance(data, Mapping): + return type(data)({k: self._prepare_input(v) for k, v in data.items()}) + elif isinstance(data, (tuple, list)): + return type(data)(self._prepare_input(v) for v in data) + elif isinstance(data, torch.Tensor): + kwargs = {"device": self.args.device} + if self.deepspeed and (torch.is_floating_point(data) or torch.is_complex(data)): + # NLP models inputs are int/uint and those get adjusted to the right dtype of the + # embedding. Other models such as wav2vec2's inputs are already float and thus + # may need special handling to match the dtypes of the model + kwargs.update({"dtype": self.args.hf_deepspeed_config.dtype()}) + return data.to(**kwargs) + return data + + def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]: + """ + Prepare `inputs` before feeding them to the model, converting them to tensors if they are not already and + handling potential state. + """ + inputs = self._prepare_input(inputs) + if len(inputs) == 0: + raise ValueError( + "The batch received was empty, your model won't be able to train on it. Double-check that your " + f"training dataset contains keys expected by the model: {','.join(self._signature_columns)}." + ) + if self.args.past_index >= 0 and self._past is not None: + inputs["mems"] = self._past + + return inputs + + def compute_loss_context_manager(self): + """ + A helper wrapper to group together context managers. + """ + return self.autocast_smart_context_manager() + + def autocast_smart_context_manager(self, cache_enabled: Optional[bool] = True): + """ + A helper wrapper that creates an appropriate context manager for `autocast` while feeding it the desired + arguments, depending on the situation. + """ + if self.use_cuda_amp or self.use_cpu_amp: + if is_torch_greater_or_equal_than_1_10: + ctx_manager = ( + torch.cpu.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype) + if self.use_cpu_amp + else torch.cuda.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype) + ) + else: + ctx_manager = torch.cuda.amp.autocast() + else: + ctx_manager = contextlib.nullcontext() if sys.version_info >= (3, 7) else contextlib.suppress() + + return ctx_manager + + def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: + """ + Perform a training step on a batch of inputs. + + Subclass and override to inject custom behavior. + + Args: + model (`nn.Module`): + The model to train. + inputs (`Dict[str, Union[torch.Tensor, Any]]`): + The inputs and targets of the model. + + The dictionary will be unpacked before being fed to the model. Most models expect the targets under the + argument `labels`. Check your model's documentation for all accepted arguments. + + Return: + `torch.Tensor`: The tensor with training loss on this batch. + """ + model.train() + inputs = self._prepare_inputs(inputs) + + if is_sagemaker_mp_enabled(): + loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps) + return loss_mb.reduce_mean().detach().to(self.args.device) + + with self.compute_loss_context_manager(): + loss = self.compute_loss(model, inputs) + + if self.args.n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu parallel training + + if self.args.gradient_accumulation_steps > 1 and not self.deepspeed: + # deepspeed handles loss scaling by gradient_accumulation_steps in its `backward` + loss = loss / self.args.gradient_accumulation_steps + + if self.do_grad_scaling: + self.scaler.scale(loss).backward() + elif self.use_apex: + with amp.scale_loss(loss, self.optimizer) as scaled_loss: + scaled_loss.backward() + elif self.deepspeed: + # loss gets scaled under gradient_accumulation_steps in deepspeed + loss = self.deepspeed.backward(loss) + else: + loss.backward() + + return loss.detach() + + def compute_loss(self, model, inputs, return_outputs=False): + """ + How the loss is computed by Trainer. By default, all models return the loss in the first element. + + Subclass and override for custom behavior. + """ + if self.label_smoother is not None and "labels" in inputs: + labels = inputs.pop("labels") + else: + labels = None + outputs = model(**inputs) + # Save past state if it exists + # TODO: this needs to be fixed and made cleaner later. + if self.args.past_index >= 0: + self._past = outputs[self.args.past_index] + + if labels is not None: + if unwrap_model(model)._get_name() in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values(): + loss = self.label_smoother(outputs, labels, shift_labels=True) + else: + loss = self.label_smoother(outputs, labels) + else: + if isinstance(outputs, dict) and "loss" not in outputs: + raise ValueError( + "The model did not return a loss from the inputs, only the following keys: " + f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}." + ) + # We don't use .loss here since the model may return tuples instead of ModelOutput. + loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0] + + return (loss, outputs) if return_outputs else loss + + def is_local_process_zero(self) -> bool: + """ + Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several + machines) main process. + """ + return self.args.local_process_index == 0 + + def is_world_process_zero(self) -> bool: + """ + Whether or not this process is the global main process (when training in a distributed fashion on several + machines, this is only going to be `True` for one process). + """ + # Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global + # process index. + if is_sagemaker_mp_enabled(): + return smp.rank() == 0 + else: + return self.args.process_index == 0 + + def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False): + """ + Will save the model, so you can reload it using `from_pretrained()`. + + Will only save from the main process. + """ + + if output_dir is None: + output_dir = self.args.output_dir + + if is_torch_tpu_available(): + self._save_tpu(output_dir) + elif is_sagemaker_mp_enabled(): + # Calling the state_dict needs to be done on the wrapped model and on all processes. + os.makedirs(output_dir, exist_ok=True) + state_dict = self.model_wrapped.state_dict() + if self.args.should_save: + self._save(output_dir, state_dict=state_dict) + if IS_SAGEMAKER_MP_POST_1_10: + # 'user_content.pt' indicates model state_dict saved with smp >= 1.10 + Path(os.path.join(output_dir, "user_content.pt")).touch() + elif ( + ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp + or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp + or self.fsdp is not None + ): + state_dict = self.model.state_dict() + + if self.args.should_save: + self._save(output_dir, state_dict=state_dict) + elif self.deepspeed: + # this takes care of everything as long as we aren't under zero3 + if self.args.should_save: + self._save(output_dir) + + if is_deepspeed_zero3_enabled(): + # It's too complicated to try to override different places where the weights dump gets + # saved, so since under zero3 the file is bogus, simply delete it. The user should + # either user deepspeed checkpoint to resume or to recover full weights use + # zero_to_fp32.py stored in the checkpoint. + if self.args.should_save: + file = os.path.join(output_dir, WEIGHTS_NAME) + if os.path.isfile(file): + # logger.info(f"deepspeed zero3: removing {file}, see zero_to_fp32.py to recover weights") + os.remove(file) + + # now save the real model if stage3_gather_16bit_weights_on_model_save=True + # if false it will not be saved. + # This must be called on all ranks + if not self.deepspeed.save_16bit_model(output_dir, WEIGHTS_NAME): + logger.warning( + "deepspeed.save_16bit_model didn't save the model, since" + " stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use" + " zero_to_fp32.py to recover weights" + ) + self.deepspeed.save_checkpoint(output_dir) + + elif self.args.should_save: + self._save(output_dir) + + # Push to the Hub when `save_model` is called by the user. + if self.args.push_to_hub and not _internal_call: + self.push_to_hub(commit_message="Model save") + + def _save_tpu(self, output_dir: Optional[str] = None): + output_dir = output_dir if output_dir is not None else self.args.output_dir + logger.info(f"Saving model checkpoint to {output_dir}") + + if xm.is_master_ordinal(): + os.makedirs(output_dir, exist_ok=True) + torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) + + # Save a trained model and configuration using `save_pretrained()`. + # They can then be reloaded using `from_pretrained()` + xm.rendezvous("saving_checkpoint") + if not isinstance(self.model, PreTrainedModel): + if isinstance(unwrap_model(self.model), PreTrainedModel): + unwrap_model(self.model).save_pretrained( + output_dir, + is_main_process=self.args.should_save, + state_dict=self.model.state_dict(), + save_function=xm.save, + ) + else: + logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") + state_dict = self.model.state_dict() + xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) + else: + self.model.save_pretrained(output_dir, is_main_process=self.args.should_save, save_function=xm.save) + if self.tokenizer is not None and self.args.should_save: + self.tokenizer.save_pretrained(output_dir) + + def _save(self, output_dir: Optional[str] = None, state_dict=None): + # If we are executing this function, we are the process zero, so we don't check for that. + output_dir = output_dir if output_dir is not None else self.args.output_dir + os.makedirs(output_dir, exist_ok=True) + logger.info(f"Saving model checkpoint to {output_dir}") + # Save a trained model and configuration using `save_pretrained()`. + # They can then be reloaded using `from_pretrained()` + if not isinstance(self.model, PreTrainedModel): + if isinstance(unwrap_model(self.model), PreTrainedModel): + if state_dict is None: + state_dict = self.model.state_dict() + unwrap_model(self.model).save_pretrained(output_dir, state_dict=filtered_state_dict) + else: + logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") + if state_dict is None: + state_dict = self.model.state_dict() + torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) + else: + # state_dict = self.model.state_dict() + # filtered_state_dict = {} + # for k, v in self.model.named_parameters(): + # if v.requires_grad: + # filtered_state_dict[k] = state_dict[k] + # print(filtered_state_dict.keys()) + self.model.save_pretrained(output_dir, state_dict=state_dict) + if self.tokenizer is not None: + self.tokenizer.save_pretrained(output_dir) + + # Good practice: save your training arguments together with the trained model + torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) + + def store_flos(self): + # Storing the number of floating-point operations that went into the model + if self.args.local_rank != -1: + self.state.total_flos += ( + distributed_broadcast_scalars([self.current_flos], device=self.args.device).sum().item() + ) + self.current_flos = 0 + else: + self.state.total_flos += self.current_flos + self.current_flos = 0 + + def _sorted_checkpoints( + self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False + ) -> List[str]: + ordering_and_checkpoint_path = [] + + glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*") if os.path.isdir(x)] + + for path in glob_checkpoints: + if use_mtime: + ordering_and_checkpoint_path.append((os.path.getmtime(path), path)) + else: + regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path) + if regex_match is not None and regex_match.groups() is not None: + ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path)) + + checkpoints_sorted = sorted(ordering_and_checkpoint_path) + checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] + # Make sure we don't delete the best model. + if self.state.best_model_checkpoint is not None: + best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint))) + for i in range(best_model_index, len(checkpoints_sorted) - 2): + checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i] + return checkpoints_sorted + + def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None: + if self.args.save_total_limit is None or self.args.save_total_limit <= 0: + return + + # Check if we should delete older checkpoint(s) + checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir) + if len(checkpoints_sorted) <= self.args.save_total_limit: + return + + # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which + # we don't do to allow resuming. + save_total_limit = self.args.save_total_limit + if ( + self.state.best_model_checkpoint is not None + and self.args.save_total_limit == 1 + and checkpoints_sorted[-1] != self.state.best_model_checkpoint + ): + save_total_limit = 2 + + number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit) + checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] + for checkpoint in checkpoints_to_be_deleted: + logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") + shutil.rmtree(checkpoint, ignore_errors=True) + + def evaluate( + self, + eval_dataset: Optional[Dataset] = None, + ignore_keys: Optional[List[str]] = None, + metric_key_prefix: str = "eval", + ) -> Dict[str, float]: + """ + Run evaluation and returns metrics. + + The calling script will be responsible for providing a method to compute metrics, as they are task-dependent + (pass it to the init `compute_metrics` argument). + + You can also subclass and override this method to inject custom behavior. + + Args: + eval_dataset (`Dataset`, *optional*): + Pass a dataset if you wish to override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns + not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__` + method. + ignore_keys (`Lst[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + metric_key_prefix (`str`, *optional*, defaults to `"eval"`): + An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named + "eval_bleu" if the prefix is "eval" (default) + + Returns: + A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The + dictionary also contains the epoch number which comes from the training state. + """ + # memory metrics - must set up as early as possible + self._memory_tracker.start() + + eval_dataloader = self.get_eval_dataloader(eval_dataset) + start_time = time.time() + + eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop + output = eval_loop( + eval_dataloader, + description="Evaluation", + # No point gathering the predictions if there are no metrics, otherwise we defer to + # self.args.prediction_loss_only + prediction_loss_only=True if self.compute_metrics is None else None, + ignore_keys=ignore_keys, + metric_key_prefix=metric_key_prefix, + ) + + total_batch_size = self.args.eval_batch_size * self.args.world_size + if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: + start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] + output.metrics.update( + speed_metrics( + metric_key_prefix, + start_time, + num_samples=output.num_samples, + num_steps=math.ceil(output.num_samples / total_batch_size), + ) + ) + + self.log(output.metrics) + + if DebugOption.TPU_METRICS_DEBUG in self.args.debug: + # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) + xm.master_print(met.metrics_report()) + + self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics) + + self._memory_tracker.stop_and_update_metrics(output.metrics) + + return output.metrics + + def predict( + self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test" + ) -> PredictionOutput: + """ + Run prediction and returns predictions and potential metrics. + + Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method + will also return metrics, like in `evaluate()`. + + Args: + test_dataset (`Dataset`): + Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the + `model.forward()` method are automatically removed. Has to implement the method `__len__` + ignore_keys (`Lst[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + metric_key_prefix (`str`, *optional*, defaults to `"test"`): + An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named + "test_bleu" if the prefix is "test" (default) + + + + If your predictions or labels have different sequence length (for instance because you're doing dynamic padding + in a token classification task) the predictions will be padded (on the right) to allow for concatenation into + one array. The padding index is -100. + + + + Returns: *NamedTuple* A namedtuple with the following keys: + + - predictions (`np.ndarray`): The predictions on `test_dataset`. + - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some). + - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained + labels). + """ + # memory metrics - must set up as early as possible + self._memory_tracker.start() + + test_dataloader = self.get_test_dataloader(test_dataset) + start_time = time.time() + + eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop + output = eval_loop( + test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix + ) + total_batch_size = self.args.eval_batch_size * self.args.world_size + if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: + start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] + output.metrics.update( + speed_metrics( + metric_key_prefix, + start_time, + num_samples=output.num_samples, + num_steps=math.ceil(output.num_samples / total_batch_size), + ) + ) + + self.control = self.callback_handler.on_predict(self.args, self.state, self.control, output.metrics) + self._memory_tracker.stop_and_update_metrics(output.metrics) + + return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics) + + def evaluation_loop( + self, + dataloader: DataLoader, + description: str, + prediction_loss_only: Optional[bool] = None, + ignore_keys: Optional[List[str]] = None, + metric_key_prefix: str = "eval", + ) -> EvalLoopOutput: + """ + Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. + + Works both with or without labels. + """ + args = self.args + + prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only + + # if eval is called w/o train init deepspeed here + if args.deepspeed and not self.deepspeed: + # XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval + # from the checkpoint eventually + deepspeed_engine, _, _ = deepspeed_init( + self, num_training_steps=0, resume_from_checkpoint=None, inference=True + ) + self.model = deepspeed_engine.module + self.model_wrapped = deepspeed_engine + self.deepspeed = deepspeed_engine + + model = self._wrap_model(self.model, training=False, dataloader=dataloader) + + # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called + # while ``train`` is running, cast it to the right dtype first and then put on device + if not self.is_in_train: + if args.fp16_full_eval: + model = model.to(dtype=torch.float16, device=args.device) + elif args.bf16_full_eval: + model = model.to(dtype=torch.bfloat16, device=args.device) + + batch_size = self.args.eval_batch_size + + logger.info(f"***** Running {description} *****") + if has_length(dataloader): + logger.info(f" Num examples = {self.num_examples(dataloader)}") + else: + logger.info(" Num examples: Unknown") + logger.info(f" Batch size = {batch_size}") + + model.eval() + + self.callback_handler.eval_dataloader = dataloader + # Do this before wrapping. + eval_dataset = getattr(dataloader, "dataset", None) + + if is_torch_tpu_available(): + dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader(args.device) + + if args.past_index >= 0: + self._past = None + + # Initialize containers + # losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps) + losses_host = None + preds_host = None + labels_host = None + inputs_host = None + + # losses/preds/labels on CPU (final containers) + all_losses = None + all_preds = None + all_labels = None + all_inputs = None + # Will be useful when we have an iterable dataset so don't know its length. + + observed_num_examples = 0 + # Main evaluation loop + for step, inputs in enumerate(dataloader): + # Update the observed num examples + observed_batch_size = find_batch_size(inputs) + if observed_batch_size is not None: + observed_num_examples += observed_batch_size + # For batch samplers, batch_size is not known by the dataloader in advance. + if batch_size is None: + batch_size = observed_batch_size + + # Prediction step + loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) + inputs_decode = self._prepare_input(inputs["input_ids"]) if args.include_inputs_for_metrics else None + + if is_torch_tpu_available(): + xm.mark_step() + + # Update containers on host + if loss is not None: + losses = self._nested_gather(loss.repeat(batch_size)) + losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0) + if labels is not None: + labels = self._pad_across_processes(labels) + labels = self._nested_gather(labels) + labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100) + if inputs_decode is not None: + inputs_decode = self._pad_across_processes(inputs_decode) + inputs_decode = self._nested_gather(inputs_decode) + inputs_host = ( + inputs_decode + if inputs_host is None + else nested_concat(inputs_host, inputs_decode, padding_index=-100) + ) + if logits is not None: + logits = self._pad_across_processes(logits) + logits = self._nested_gather(logits) + if self.preprocess_logits_for_metrics is not None: + logits = self.preprocess_logits_for_metrics(logits, labels) + preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100) + self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) + + # Gather all tensors and put them back on the CPU if we have done enough accumulation steps. + if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0: + if losses_host is not None: + losses = nested_numpify(losses_host) + all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0) + if preds_host is not None: + logits = nested_numpify(preds_host) + all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) + if inputs_host is not None: + inputs_decode = nested_numpify(inputs_host) + all_inputs = ( + inputs_decode + if all_inputs is None + else nested_concat(all_inputs, inputs_decode, padding_index=-100) + ) + if labels_host is not None: + labels = nested_numpify(labels_host) + all_labels = ( + labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100) + ) + + # Set back to None to begin a new accumulation + losses_host, preds_host, inputs_host, labels_host = None, None, None, None + + if args.past_index and hasattr(self, "_past"): + # Clean the state at the end of the evaluation loop + delattr(self, "_past") + + # Gather all remaining tensors and put them back on the CPU + if losses_host is not None: + losses = nested_numpify(losses_host) + all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0) + if preds_host is not None: + logits = nested_numpify(preds_host) + all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) + if inputs_host is not None: + inputs_decode = nested_numpify(inputs_host) + all_inputs = ( + inputs_decode if all_inputs is None else nested_concat(all_inputs, inputs_decode, padding_index=-100) + ) + if labels_host is not None: + labels = nested_numpify(labels_host) + all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100) + + # Number of samples + if has_length(eval_dataset): + num_samples = len(eval_dataset) + # The instance check is weird and does not actually check for the type, but whether the dataset has the right + # methods. Therefore we need to make sure it also has the attribute. + elif isinstance(eval_dataset, IterableDatasetShard) and getattr(eval_dataset, "num_examples", 0) > 0: + num_samples = eval_dataset.num_examples + else: + if has_length(dataloader): + num_samples = self.num_examples(dataloader) + else: # both len(dataloader.dataset) and len(dataloader) fail + num_samples = observed_num_examples + if num_samples == 0 and observed_num_examples > 0: + num_samples = observed_num_examples + + # Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of + # samplers has been rounded to a multiple of batch_size, so we truncate. + if all_losses is not None: + all_losses = all_losses[:num_samples] + if all_preds is not None: + all_preds = nested_truncate(all_preds, num_samples) + if all_labels is not None: + all_labels = nested_truncate(all_labels, num_samples) + if all_inputs is not None: + all_inputs = nested_truncate(all_inputs, num_samples) + + # Metrics! + if self.compute_metrics is not None and all_preds is not None and all_labels is not None: + if args.include_inputs_for_metrics: + metrics = self.compute_metrics( + EvalPrediction(predictions=all_preds, label_ids=all_labels, inputs=all_inputs) + ) + else: + metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels)) + else: + metrics = {} + + # To be JSON-serializable, we need to remove numpy types or zero-d tensors + metrics = denumpify_detensorize(metrics) + + if all_losses is not None: + metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item() + if hasattr(self, "jit_compilation_time"): + metrics[f"{metric_key_prefix}_jit_compilation_time"] = self.jit_compilation_time + + # Prefix all keys with metric_key_prefix + '_' + for key in list(metrics.keys()): + if not key.startswith(f"{metric_key_prefix}_"): + metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) + + return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples) + + def _nested_gather(self, tensors, name=None): + """ + Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before + concatenating them to `gathered` + """ + if tensors is None: + return + if is_torch_tpu_available(): + if name is None: + name = "nested_gather" + tensors = nested_xla_mesh_reduce(tensors, name) + elif is_sagemaker_mp_enabled(): + tensors = smp_gather(tensors) + elif self.args.local_rank != -1: + tensors = distributed_concat(tensors) + return tensors + + # Copied from Accelerate. + def _pad_across_processes(self, tensor, pad_index=-100): + """ + Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so + they can safely be gathered. + """ + if isinstance(tensor, (list, tuple)): + return type(tensor)(self._pad_across_processes(t, pad_index=pad_index) for t in tensor) + elif isinstance(tensor, dict): + return type(tensor)({k: self._pad_across_processes(v, pad_index=pad_index) for k, v in tensor.items()}) + elif not isinstance(tensor, torch.Tensor): + raise TypeError( + f"Can't pad the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors." + ) + + if len(tensor.shape) < 2: + return tensor + # Gather all sizes + size = torch.tensor(tensor.shape, device=tensor.device)[None] + sizes = self._nested_gather(size).cpu() + + max_size = max(s[1] for s in sizes) + # When extracting XLA graphs for compilation, max_size is 0, + # so use inequality to avoid errors. + if tensor.shape[1] >= max_size: + return tensor + + # Then pad to the maximum size + old_size = tensor.shape + new_size = list(old_size) + new_size[1] = max_size + new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index + new_tensor[:, : old_size[1]] = tensor + return new_tensor + + def prediction_step( + self, + model: nn.Module, + inputs: Dict[str, Union[torch.Tensor, Any]], + prediction_loss_only: bool, + ignore_keys: Optional[List[str]] = None, + ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: + """ + Perform an evaluation step on `model` using `inputs`. + + Subclass and override to inject custom behavior. + + Args: + model (`nn.Module`): + The model to evaluate. + inputs (`Dict[str, Union[torch.Tensor, Any]]`): + The inputs and targets of the model. + + The dictionary will be unpacked before being fed to the model. Most models expect the targets under the + argument `labels`. Check your model's documentation for all accepted arguments. + prediction_loss_only (`bool`): + Whether or not to return the loss only. + ignore_keys (`Lst[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + + Return: + Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, + logits and labels (each being optional). + """ + has_labels = False if len(self.label_names) == 0 else all(inputs.get(k) is not None for k in self.label_names) + # For CLIP-like models capable of returning loss values. + # If `return_loss` is not specified or being `None` in `inputs`, we check if the default value of `return_loss` + # is `True` in `model.forward`. + return_loss = inputs.get("return_loss", None) + if return_loss is None: + return_loss = self.can_return_loss + loss_without_labels = True if len(self.label_names) == 0 and return_loss else False + + inputs = self._prepare_inputs(inputs) + if ignore_keys is None: + if hasattr(self.model, "config"): + ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", []) + else: + ignore_keys = [] + + # labels may be popped when computing the loss (label smoothing for instance) so we grab them first. + if has_labels or loss_without_labels: + labels = nested_detach(tuple(inputs.get(name) for name in self.label_names)) + if len(labels) == 1: + labels = labels[0] + else: + labels = None + + with torch.no_grad(): + if is_sagemaker_mp_enabled(): + raw_outputs = smp_forward_only(model, inputs) + if has_labels or loss_without_labels: + if isinstance(raw_outputs, dict): + loss_mb = raw_outputs["loss"] + logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"]) + else: + loss_mb = raw_outputs[0] + logits_mb = raw_outputs[1:] + + loss = loss_mb.reduce_mean().detach().cpu() + logits = smp_nested_concat(logits_mb) + else: + loss = None + if isinstance(raw_outputs, dict): + logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys) + else: + logits_mb = raw_outputs + logits = smp_nested_concat(logits_mb) + else: + if has_labels or loss_without_labels: + with self.compute_loss_context_manager(): + loss, outputs = self.compute_loss(model, inputs, return_outputs=True) + loss = loss.mean().detach() + + if isinstance(outputs, dict): + logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"]) + else: + logits = outputs[1:] + else: + loss = None + with self.compute_loss_context_manager(): + outputs = model(**inputs) + if isinstance(outputs, dict): + logits = tuple(v for k, v in outputs.items() if k not in ignore_keys) + else: + logits = outputs + # TODO: this needs to be fixed and made cleaner later. + if self.args.past_index >= 0: + self._past = outputs[self.args.past_index - 1] + + if prediction_loss_only: + return (loss, None, None) + + logits = nested_detach(logits) + if len(logits) == 1: + logits = logits[0] + + return (loss, logits, labels) + + def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]): + """ + For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point + operations for every backward + forward pass. If using another model, either implement such a method in the + model or subclass and override this method. + + Args: + inputs (`Dict[str, Union[torch.Tensor, Any]]`): + The inputs and targets of the model. + + Returns: + `int`: The number of floating-point operations. + """ + if hasattr(self.model, "floating_point_ops"): + return self.model.floating_point_ops(inputs) + else: + return 0 + + def init_git_repo(self, at_init: bool = False): + """ + Initializes a git repo in `self.args.hub_model_id`. + + Args: + at_init (`bool`, *optional*, defaults to `False`): + Whether this function is called before any training or not. If `self.args.overwrite_output_dir` is + `True` and `at_init` is `True`, the path to the repo (which is `self.args.output_dir`) might be wiped + out. + """ + if not self.is_world_process_zero(): + return + if self.args.hub_model_id is None: + repo_name = Path(self.args.output_dir).absolute().name + else: + repo_name = self.args.hub_model_id + if "/" not in repo_name: + repo_name = get_full_repo_name(repo_name, token=self.args.hub_token) + + # Make sure the repo exists. + create_repo(repo_name, token=self.args.hub_token, private=self.args.hub_private_repo, exist_ok=True) + try: + self.repo = Repository(self.args.output_dir, clone_from=repo_name, token=self.args.hub_token) + except EnvironmentError: + if self.args.overwrite_output_dir and at_init: + # Try again after wiping output_dir + shutil.rmtree(self.args.output_dir) + self.repo = Repository(self.args.output_dir, clone_from=repo_name, token=self.args.hub_token) + else: + raise + + self.repo.git_pull() + + # By default, ignore the checkpoint folders + if ( + not os.path.exists(os.path.join(self.args.output_dir, ".gitignore")) + and self.args.hub_strategy != HubStrategy.ALL_CHECKPOINTS + ): + with open(os.path.join(self.args.output_dir, ".gitignore"), "w", encoding="utf-8") as writer: + writer.writelines(["checkpoint-*/"]) + + # Add "*.sagemaker" to .gitignore if using SageMaker + if os.environ.get("SM_TRAINING_ENV"): + self._add_sm_patterns_to_gitignore() + + self.push_in_progress = None + + def create_model_card( + self, + language: Optional[str] = None, + license: Optional[str] = None, + tags: Union[str, List[str], None] = None, + model_name: Optional[str] = None, + finetuned_from: Optional[str] = None, + tasks: Union[str, List[str], None] = None, + dataset_tags: Union[str, List[str], None] = None, + dataset: Union[str, List[str], None] = None, + dataset_args: Union[str, List[str], None] = None, + ): + """ + Creates a draft of a model card using the information available to the `Trainer`. + + Args: + language (`str`, *optional*): + The language of the model (if applicable) + license (`str`, *optional*): + The license of the model. Will default to the license of the pretrained model used, if the original + model given to the `Trainer` comes from a repo on the Hub. + tags (`str` or `List[str]`, *optional*): + Some tags to be included in the metadata of the model card. + model_name (`str`, *optional*): + The name of the model. + finetuned_from (`str`, *optional*): + The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo + of the original model given to the `Trainer` (if it comes from the Hub). + tasks (`str` or `List[str]`, *optional*): + One or several task identifiers, to be included in the metadata of the model card. + dataset_tags (`str` or `List[str]`, *optional*): + One or several dataset tags, to be included in the metadata of the model card. + dataset (`str` or `List[str]`, *optional*): + One or several dataset identifiers, to be included in the metadata of the model card. + dataset_args (`str` or `List[str]`, *optional*): + One or several dataset arguments, to be included in the metadata of the model card. + """ + if not self.is_world_process_zero(): + return + + training_summary = TrainingSummary.from_trainer( + self, + language=language, + license=license, + tags=tags, + model_name=model_name, + finetuned_from=finetuned_from, + tasks=tasks, + dataset_tags=dataset_tags, + dataset=dataset, + dataset_args=dataset_args, + ) + model_card = training_summary.to_model_card() + with open(os.path.join(self.args.output_dir, "README.md"), "w") as f: + f.write(model_card) + + def _push_from_checkpoint(self, checkpoint_folder): + # Only push from one node. + if not self.is_world_process_zero() or self.args.hub_strategy == HubStrategy.END: + return + # If we haven't finished the last push, we don't do this one. + if self.push_in_progress is not None and not self.push_in_progress.is_done: + return + + output_dir = self.args.output_dir + # To avoid a new synchronization of all model weights, we just copy the file from the checkpoint folder + modeling_files = [CONFIG_NAME, WEIGHTS_NAME] + for modeling_file in modeling_files: + if os.path.isfile(os.path.join(checkpoint_folder, modeling_file)): + shutil.copy(os.path.join(checkpoint_folder, modeling_file), os.path.join(output_dir, modeling_file)) + # Saving the tokenizer is fast and we don't know how many files it may have spawned, so we resave it to be sure. + if self.tokenizer is not None: + self.tokenizer.save_pretrained(output_dir) + # Same for the training arguments + torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) + + try: + if self.args.hub_strategy == HubStrategy.CHECKPOINT: + # Temporarily move the checkpoint just saved for the push + tmp_checkpoint = os.path.join(output_dir, "last-checkpoint") + # We have to remove the "last-checkpoint" dir if it exists, otherwise the checkpoint is moved as a + # subfolder. + if os.path.isdir(tmp_checkpoint): + shutil.rmtree(tmp_checkpoint) + shutil.move(checkpoint_folder, tmp_checkpoint) + + if self.args.save_strategy == IntervalStrategy.STEPS: + commit_message = f"Training in progress, step {self.state.global_step}" + else: + commit_message = f"Training in progress, epoch {int(self.state.epoch)}" + _, self.push_in_progress = self.repo.push_to_hub( + commit_message=commit_message, blocking=False, auto_lfs_prune=True + ) + finally: + if self.args.hub_strategy == HubStrategy.CHECKPOINT: + # Move back the checkpoint to its place + shutil.move(tmp_checkpoint, checkpoint_folder) + + def push_to_hub(self, commit_message: Optional[str] = "End of training", blocking: bool = True, **kwargs) -> str: + """ + Upload *self.model* and *self.tokenizer* to the 🤗 model hub on the repo *self.args.hub_model_id*. + + Parameters: + commit_message (`str`, *optional*, defaults to `"End of training"`): + Message to commit while pushing. + blocking (`bool`, *optional*, defaults to `True`): + Whether the function should return only when the `git push` has finished. + kwargs: + Additional keyword arguments passed along to [`~Trainer.create_model_card`]. + + Returns: + The url of the commit of your model in the given repository if `blocking=False`, a tuple with the url of + the commit and an object to track the progress of the commit if `blocking=True` + """ + # If a user calls manually `push_to_hub` with `self.args.push_to_hub = False`, we try to create the repo but + # it might fail. + if not hasattr(self, "repo"): + self.init_git_repo() + + model_name = kwargs.pop("model_name", None) + if model_name is None and self.args.should_save: + if self.args.hub_model_id is None: + model_name = Path(self.args.output_dir).name + else: + model_name = self.args.hub_model_id.split("/")[-1] + + # Needs to be executed on all processes for TPU training, but will only save on the processed determined by + # self.args.should_save. + self.save_model(_internal_call=True) + + # Only push from one node. + if not self.is_world_process_zero(): + return + + # Cancel any async push in progress if blocking=True. The commits will all be pushed together. + if blocking and self.push_in_progress is not None and not self.push_in_progress.is_done: + self.push_in_progress._process.kill() + self.push_in_progress = None + + git_head_commit_url = self.repo.push_to_hub( + commit_message=commit_message, blocking=blocking, auto_lfs_prune=True + ) + # push separately the model card to be independant from the rest of the model + if self.args.should_save: + self.create_model_card(model_name=model_name, **kwargs) + try: + self.repo.push_to_hub( + commit_message="update model card README.md", blocking=blocking, auto_lfs_prune=True + ) + except EnvironmentError as exc: + logger.error(f"Error pushing update to the model card. Please read logs and retry.\n${exc}") + + return git_head_commit_url + + # + # Deprecated code + # + + def prediction_loop( + self, + dataloader: DataLoader, + description: str, + prediction_loss_only: Optional[bool] = None, + ignore_keys: Optional[List[str]] = None, + metric_key_prefix: str = "eval", + ) -> EvalLoopOutput: + """ + Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. + + Works both with or without labels. + """ + args = self.args + + if not has_length(dataloader): + raise ValueError("dataloader must implement a working __len__") + + prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only + + # if eval is called w/o train init deepspeed here + if args.deepspeed and not self.deepspeed: + # XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval + # from the checkpoint eventually + deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None) + self.model = deepspeed_engine.module + self.model_wrapped = deepspeed_engine + self.deepspeed = deepspeed_engine + # XXX: we don't need optim/sched for inference, but this needs to be sorted out, since + # for example the Z3-optimizer is a must for zero3 to work even for inference - what we + # don't need is the deepspeed basic optimizer which is self.optimizer.optimizer + deepspeed_engine.optimizer.optimizer = None + deepspeed_engine.lr_scheduler = None + + model = self._wrap_model(self.model, training=False, dataloader=dataloader) + + # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called + # while ``train`` is running, cast it to the right dtype first and then put on device + if not self.is_in_train: + if args.fp16_full_eval: + model = model.to(dtype=torch.float16, device=args.device) + elif args.bf16_full_eval: + model = model.to(dtype=torch.bfloat16, device=args.device) + + batch_size = dataloader.batch_size + num_examples = self.num_examples(dataloader) + logger.info(f"***** Running {description} *****") + logger.info(f" Num examples = {num_examples}") + logger.info(f" Batch size = {batch_size}") + losses_host: torch.Tensor = None + preds_host: Union[torch.Tensor, List[torch.Tensor]] = None + labels_host: Union[torch.Tensor, List[torch.Tensor]] = None + inputs_host: Union[torch.Tensor, List[torch.Tensor]] = None + + world_size = max(1, args.world_size) + + eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size) + if not prediction_loss_only: + # The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass + # a batch size to the sampler) + make_multiple_of = None + if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler): + make_multiple_of = dataloader.sampler.batch_size + preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) + labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) + inputs_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) + + model.eval() + + if is_torch_tpu_available(): + dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader(args.device) + + if args.past_index >= 0: + self._past = None + + self.callback_handler.eval_dataloader = dataloader + + for step, inputs in enumerate(dataloader): + loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) + inputs_decode = self._prepare_input(inputs["input_ids"]) if args.include_inputs_for_metrics else None + + if loss is not None: + losses = loss.repeat(batch_size) + losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0) + if logits is not None: + preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100) + if labels is not None: + labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100) + if inputs_decode is not None: + inputs_host = ( + inputs_decode + if inputs_host is None + else nested_concat(inputs_host, inputs_decode, padding_index=-100) + ) + self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) + + # Gather all tensors and put them back on the CPU if we have done enough accumulation steps. + if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0: + eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses")) + if not prediction_loss_only: + preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds")) + labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids")) + inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids")) + + # Set back to None to begin a new accumulation + losses_host, preds_host, labels_host, inputs_host = None, None, None, None + + if args.past_index and hasattr(self, "_past"): + # Clean the state at the end of the evaluation loop + delattr(self, "_past") + + # Gather all remaining tensors and put them back on the CPU + eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses")) + if not prediction_loss_only: + preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds")) + labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids")) + inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids")) + + eval_loss = eval_losses_gatherer.finalize() + preds = preds_gatherer.finalize() if not prediction_loss_only else None + label_ids = labels_gatherer.finalize() if not prediction_loss_only else None + inputs_ids = inputs_gatherer.finalize() if not prediction_loss_only else None + + if self.compute_metrics is not None and preds is not None and label_ids is not None: + if args.include_inputs_for_metrics: + metrics = self.compute_metrics( + EvalPrediction(predictions=preds, label_ids=label_ids, inputs=inputs_ids) + ) + else: + metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids)) + else: + metrics = {} + + # To be JSON-serializable, we need to remove numpy types or zero-d tensors + metrics = denumpify_detensorize(metrics) + + if eval_loss is not None: + metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item() + + # Prefix all keys with metric_key_prefix + '_' + for key in list(metrics.keys()): + if not key.startswith(f"{metric_key_prefix}_"): + metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) + + return EvalLoopOutput(predictions=preds, label_ids=label_ids, metrics=metrics, num_samples=num_examples) + + def _gather_and_numpify(self, tensors, name): + """ + Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before + concatenating them to `gathered` + """ + if tensors is None: + return + if is_torch_tpu_available(): + tensors = nested_xla_mesh_reduce(tensors, name) + elif is_sagemaker_mp_enabled(): + tensors = smp_gather(tensors) + elif self.args.local_rank != -1: + tensors = distributed_concat(tensors) + + return nested_numpify(tensors) + + def _add_sm_patterns_to_gitignore(self) -> None: + """Add SageMaker Checkpointing patterns to .gitignore file.""" + # Make sure we only do this on the main process + if not self.is_world_process_zero(): + return + + patterns = ["*.sagemaker-uploading", "*.sagemaker-uploaded"] + + # Get current .gitignore content + if os.path.exists(os.path.join(self.repo.local_dir, ".gitignore")): + with open(os.path.join(self.repo.local_dir, ".gitignore"), "r") as f: + current_content = f.read() + else: + current_content = "" + + # Add the patterns to .gitignore + content = current_content + for pattern in patterns: + if pattern not in content: + if content.endswith("\n"): + content += pattern + else: + content += f"\n{pattern}" + + # Write the .gitignore file if it has changed + if content != current_content: + with open(os.path.join(self.repo.local_dir, ".gitignore"), "w") as f: + logger.debug(f"Writing .gitignore file. Content: {content}") + f.write(content) + + self.repo.git_add(".gitignore") + + # avoid race condition with git status + time.sleep(0.5) + + if not self.repo.is_repo_clean(): + self.repo.git_commit("Add *.sagemaker patterns to .gitignore.") + self.repo.git_push() diff --git a/ptuning/trainer_seq2seq.py b/ptuning/trainer_seq2seq.py index 518daa0..19d5cf1 100644 --- a/ptuning/trainer_seq2seq.py +++ b/ptuning/trainer_seq2seq.py @@ -19,7 +19,7 @@ from torch import nn from torch.utils.data import Dataset from transformers.deepspeed import is_deepspeed_zero3_enabled -from transformers.trainer import Trainer +from trainer import Trainer from transformers.trainer_utils import PredictionOutput from transformers.utils import logging From 47a5ec121eb2c092a6347b503eb9f2540595a963 Mon Sep 17 00:00:00 2001 From: rainatam Date: Mon, 10 Apr 2023 17:28:27 +0800 Subject: [PATCH 080/110] Add deepspeed finetuning scripts --- ptuning/arguments.py | 3 +++ ptuning/deepspeed.json | 21 +++++++++++++++++++++ ptuning/ds_train_finetune.sh | 29 +++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+) create mode 100644 ptuning/deepspeed.json create mode 100644 ptuning/ds_train_finetune.sh diff --git a/ptuning/arguments.py b/ptuning/arguments.py index f9310da..fda1f35 100644 --- a/ptuning/arguments.py +++ b/ptuning/arguments.py @@ -11,6 +11,9 @@ class ModelArguments: model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) + ptuning_checkpoint: str = field( + default=None, metadata={"help": "Path to p-tuning v2 checkpoints"} + ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) diff --git a/ptuning/deepspeed.json b/ptuning/deepspeed.json new file mode 100644 index 0000000..8e45509 --- /dev/null +++ b/ptuning/deepspeed.json @@ -0,0 +1,21 @@ +{ + "train_micro_batch_size_per_gpu": "auto", + "zero_allow_untested_optimizer": true, + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "initial_scale_power": 16, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "zero_optimization": { + "stage": 2, + "allgather_partitions": true, + "allgather_bucket_size": 5e8, + "overlap_comm": false, + "reduce_scatter": true, + "reduce_bucket_size": 5e8, + "contiguous_gradients" : true + } +} \ No newline at end of file diff --git a/ptuning/ds_train_finetune.sh b/ptuning/ds_train_finetune.sh new file mode 100644 index 0000000..d768d80 --- /dev/null +++ b/ptuning/ds_train_finetune.sh @@ -0,0 +1,29 @@ + +LR=1e-4 + +MASTER_PORT=$(shuf -n 1 -i 10000-65535) +MASTER_PORT=50003 + +deepspeed --num_gpus=3 --master_port $MASTER_PORT main.py \ + --deepspeed deepspeed.json \ + --do_train \ + --train_file AdvertiseGen/train.json \ + --test_file AdvertiseGen/dev.json \ + --prompt_column content \ + --response_column summary \ + --overwrite_cache \ + --model_name_or_path THUDM/chatglm-6b \ + --output_dir ./output/adgen-chatglm-6b-ft-$LR \ + --overwrite_output_dir \ + --max_source_length 64 \ + --max_target_length 64 \ + --per_device_train_batch_size 4 \ + --per_device_eval_batch_size 1 \ + --gradient_accumulation_steps 4 \ + --predict_with_generate \ + --max_steps 5000 \ + --logging_steps 10 \ + --save_steps 1000 \ + --learning_rate $LR \ + --fp16 + From 2a5250ffcb358023fb362761ad13b75ea72e9c0a Mon Sep 17 00:00:00 2001 From: rainatam Date: Mon, 10 Apr 2023 18:32:40 +0800 Subject: [PATCH 081/110] Update trainer --- ptuning/trainer.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/ptuning/trainer.py b/ptuning/trainer.py index c49944f..bbaa9db 100644 --- a/ptuning/trainer.py +++ b/ptuning/trainer.py @@ -2825,12 +2825,11 @@ class Trainer: state_dict = self.model.state_dict() torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: - # state_dict = self.model.state_dict() - # filtered_state_dict = {} - # for k, v in self.model.named_parameters(): - # if v.requires_grad: - # filtered_state_dict[k] = state_dict[k] - # print(filtered_state_dict.keys()) + state_dict = self.model.state_dict() + filtered_state_dict = {} + for k, v in self.model.named_parameters(): + if v.requires_grad: + filtered_state_dict[k] = state_dict[k] self.model.save_pretrained(output_dir, state_dict=state_dict) if self.tokenizer is not None: self.tokenizer.save_pretrained(output_dir) From 2073ac75d42f4aef6018e5f40c52fc0dac940e48 Mon Sep 17 00:00:00 2001 From: rainatam Date: Mon, 10 Apr 2023 18:43:17 +0800 Subject: [PATCH 082/110] Update README --- ptuning/README.md | 41 ++++++++++++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/ptuning/README.md b/ptuning/README.md index f192718..d7d1d07 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -25,6 +25,9 @@ ADGEN 数据集任务为根据输入(content)生成一段广告词(summary 从 [Google Drive](https://drive.google.com/file/d/13_vf0xRTQsyneRKdD1bZIr93vBGOczrk/view?usp=sharing) 或者 [Tsinghua Cloud](https://cloud.tsinghua.edu.cn/f/b3f119a008264b1cabd1/?dl=1) 下载处理好的 ADGEN 数据集,将解压后的 `AdvertiseGen` 目录放到本目录下。 ### 训练 + +#### P-tuning v2 + 运行以下指令进行训练: ```shell bash train.sh @@ -33,6 +36,14 @@ bash train.sh 在默认配置 `quantization_bit=4`、`per_device_train_batch_size=1`、`gradient_accumulation_steps=16` 下,INT4 的模型参数被冻结,一次训练迭代会以 1 的批处理大小进行 16 次累加的前后向传播,等效为 16 的总批处理大小,此时最低只需 6.7G 显存。若想在同等批处理大小下提升训练效率,可在二者乘积不变的情况下,加大 `per_device_train_batch_size` 的值,但也会带来更多的显存消耗,请根据实际情况酌情调整。 +#### Finetune + +需要安装 [Deepspeed](https://github.com/microsoft/DeepSpeed)。如果需要进行全参数的 Finetune,可以运行以下指令(如果需要多卡运行,也可以参考): + +``` +bash ds_train_finetune.sh +``` + ### 推理 将 `evaluate.sh` 中的 `CHECKPOINT` 更改为训练时保存的 checkpoint 名称,运行以下指令进行模型推理和评测: @@ -71,13 +82,13 @@ bash evaluate.sh ### 评估结果 -| | P-tuning v2 | LoRA | -| ------------- | ----------- | ----- | -| BLEU-4 | 7.78 | 6.25 | -| Rouge-1 | 31.34 | 28.58 | -| Rouge-2 | 7.34 | 4.42 | -| Rouge-l | 25.26 | 17.56 | -| Training Loss | 3.80 | 3.36 | +| | P-tuning v2 | LoRA | Finetune | +| ------------- | ----------- | ----- | ------------- | +| BLEU-4 | 7.78 | 6.25 | 7.92 | +| Rouge-1 | 31.34 | 28.58 | 30.97 | +| Rouge-2 | 7.34 | 4.42 | 7.16 | +| Rouge-l | 25.26 | 17.56 | 25.04 | +| Training Loss | 3.80 | 3.36 | 10.34 | @@ -86,8 +97,6 @@ bash evaluate.sh ``` max_source_length=64 max_target_length=64 -per_device_train_batch_size=1 -gradient_accumulation_steps=16 max_steps=3000 ``` @@ -97,16 +106,30 @@ max_steps=3000 pre_seq_len=128 learning_rate=2e-2 quantization_bit=4 +per_device_train_batch_size=1 +gradient_accumulation_steps=16 ``` ##### LoRA ``` learning_rate=5e-4 +per_device_train_batch_size=1 +gradient_accumulation_steps=16 ``` 实现采用的是 [simple_thu_chatglm6b](https://github.com/yuanzhoulvpi2017/zero_nlp/tree/main/simple_thu_chatglm6b) +##### Finetune + +``` +learning_rate=1e-4 +fp16 +num_gpus=3 +per_device_train_batch_size=4 +gradient_accumulation_steps=4 +``` + ## 模型部署 From 173ccd8d27d593372a9fe34c2815be1aa580fc53 Mon Sep 17 00:00:00 2001 From: rainatam Date: Mon, 10 Apr 2023 19:40:21 +0800 Subject: [PATCH 083/110] Update trainer --- ptuning/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ptuning/trainer.py b/ptuning/trainer.py index bbaa9db..5a9a27b 100644 --- a/ptuning/trainer.py +++ b/ptuning/trainer.py @@ -2830,7 +2830,7 @@ class Trainer: for k, v in self.model.named_parameters(): if v.requires_grad: filtered_state_dict[k] = state_dict[k] - self.model.save_pretrained(output_dir, state_dict=state_dict) + self.model.save_pretrained(output_dir, state_dict=filtered_state_dict) if self.tokenizer is not None: self.tokenizer.save_pretrained(output_dir) From ec5f258de9ec5baba87220ee2a21c811634a170c Mon Sep 17 00:00:00 2001 From: rainatam Date: Mon, 10 Apr 2023 22:31:08 +0800 Subject: [PATCH 084/110] Update evaluation results --- ptuning/README.md | 45 ++++++++++++++++++------------------ ptuning/ds_train_finetune.sh | 4 ++-- 2 files changed, 25 insertions(+), 24 deletions(-) diff --git a/ptuning/README.md b/ptuning/README.md index d7d1d07..ec94a13 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -38,9 +38,9 @@ bash train.sh #### Finetune -需要安装 [Deepspeed](https://github.com/microsoft/DeepSpeed)。如果需要进行全参数的 Finetune,可以运行以下指令(如果需要多卡运行,也可以参考): +如果需要进行全参数的 Finetune,需要安装 [Deepspeed](https://github.com/microsoft/DeepSpeed),然后运行以下指令: -``` +```shell bash ds_train_finetune.sh ``` @@ -50,7 +50,7 @@ bash ds_train_finetune.sh ```shell bash evaluate.sh ``` -**[2023/04/10更新]** 在 P-tuning v2 训练时模型只保存 PrefixEncoder 部分的参数,在推理时需要同时载入原 ChatGLM-6B 模型以及 PrefixEncoder 的 Checkpoint,因此需要指定参数(已更新 `evaluate.sh`) : +**[2023/04/10更新]** 在 P-tuning v2 训练时模型只保存 PrefixEncoder 部分的参数,所以在推理时需要同时加载原 ChatGLM-6B 模型以及 PrefixEncoder 的权重,因此需要指定参数(已更新 `evaluate.sh`) : ```shell --model_name_or_path THUDM/chatglm-6b @@ -82,13 +82,13 @@ bash evaluate.sh ### 评估结果 -| | P-tuning v2 | LoRA | Finetune | +| | Finetune | P-tuning v2 | LoRA | | ------------- | ----------- | ----- | ------------- | -| BLEU-4 | 7.78 | 6.25 | 7.92 | -| Rouge-1 | 31.34 | 28.58 | 30.97 | -| Rouge-2 | 7.34 | 4.42 | 7.16 | -| Rouge-l | 25.26 | 17.56 | 25.04 | -| Training Loss | 3.80 | 3.36 | 10.34 | +| BLEU-4 | 8.01 | 8.10 | | +| Rouge-1 | 31.23 | 31.12 | | +| Rouge-2 | 7.36 | 7.11 | | +| Rouge-l | 25.08 | 24.97 | | +| Training Loss | 3.00 | 3.74 | 3.319 | @@ -106,28 +106,28 @@ max_steps=3000 pre_seq_len=128 learning_rate=2e-2 quantization_bit=4 -per_device_train_batch_size=1 -gradient_accumulation_steps=16 +per_device_train_batch_size=16 +gradient_accumulation_steps=1 ``` -##### LoRA +##### Finetune ``` -learning_rate=5e-4 -per_device_train_batch_size=1 -gradient_accumulation_steps=16 +learning_rate=1e-4 +fp16 +num_gpus=4 +per_device_train_batch_size=4 +gradient_accumulation_steps=1 ``` -实现采用的是 [simple_thu_chatglm6b](https://github.com/yuanzhoulvpi2017/zero_nlp/tree/main/simple_thu_chatglm6b) +##### LoRA -##### Finetune +实现采用的是 [simple_thu_chatglm6b](https://github.com/yuanzhoulvpi2017/zero_nlp/tree/main/simple_thu_chatglm6b) ``` -learning_rate=1e-4 -fp16 -num_gpus=3 -per_device_train_batch_size=4 -gradient_accumulation_steps=4 +learning_rate=5e-4 +per_device_train_batch_size=1 +gradient_accumulation_steps=16 ``` @@ -207,3 +207,4 @@ bash train_chat.sh } ``` + diff --git a/ptuning/ds_train_finetune.sh b/ptuning/ds_train_finetune.sh index d768d80..92a69ee 100644 --- a/ptuning/ds_train_finetune.sh +++ b/ptuning/ds_train_finetune.sh @@ -4,7 +4,7 @@ LR=1e-4 MASTER_PORT=$(shuf -n 1 -i 10000-65535) MASTER_PORT=50003 -deepspeed --num_gpus=3 --master_port $MASTER_PORT main.py \ +deepspeed --num_gpus=4 --master_port $MASTER_PORT main.py \ --deepspeed deepspeed.json \ --do_train \ --train_file AdvertiseGen/train.json \ @@ -19,7 +19,7 @@ deepspeed --num_gpus=3 --master_port $MASTER_PORT main.py \ --max_target_length 64 \ --per_device_train_batch_size 4 \ --per_device_eval_batch_size 1 \ - --gradient_accumulation_steps 4 \ + --gradient_accumulation_steps 1 \ --predict_with_generate \ --max_steps 5000 \ --logging_steps 10 \ From 166a6e70f13d462305fcae91beee8998b2c49028 Mon Sep 17 00:00:00 2001 From: rainatam Date: Tue, 11 Apr 2023 01:04:03 +0800 Subject: [PATCH 085/110] Update LoRA evaluation results --- ptuning/README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ptuning/README.md b/ptuning/README.md index ec94a13..321d701 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -84,10 +84,10 @@ bash evaluate.sh | | Finetune | P-tuning v2 | LoRA | | ------------- | ----------- | ----- | ------------- | -| BLEU-4 | 8.01 | 8.10 | | -| Rouge-1 | 31.23 | 31.12 | | -| Rouge-2 | 7.36 | 7.11 | | -| Rouge-l | 25.08 | 24.97 | | +| BLEU-4 | 8.01 | 8.10 | 7.62 | +| Rouge-1 | 31.23 | 31.12 | 30.60 | +| Rouge-2 | 7.36 | 7.11 | 6.96 | +| Rouge-l | 25.08 | 24.97 | 24.80 | | Training Loss | 3.00 | 3.74 | 3.319 | @@ -193,7 +193,7 @@ bash train_chat.sh ## TODO * [x] Support for chat data -* [ ] Support for full finetuning +* [x] Support for full finetuning ## 引用 From 75aa887c2044e2ca4e8313076db8d011d870b29d Mon Sep 17 00:00:00 2001 From: rainatam Date: Tue, 11 Apr 2023 01:06:11 +0800 Subject: [PATCH 086/110] Update LoRA evaluation results --- ptuning/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ptuning/README.md b/ptuning/README.md index 321d701..7bbf05f 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -88,7 +88,7 @@ bash evaluate.sh | Rouge-1 | 31.23 | 31.12 | 30.60 | | Rouge-2 | 7.36 | 7.11 | 6.96 | | Rouge-l | 25.08 | 24.97 | 24.80 | -| Training Loss | 3.00 | 3.74 | 3.319 | +| Training Loss | 3.00 | 3.74 | 3.32 | From 0c2806fea82683349194e21996dd6b3acc3c265b Mon Sep 17 00:00:00 2001 From: rainatam Date: Tue, 11 Apr 2023 13:48:54 +0800 Subject: [PATCH 087/110] Fix typo --- ptuning/README.md | 4 ++-- ptuning/ds_train_finetune.sh | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/ptuning/README.md b/ptuning/README.md index 7bbf05f..bd8d335 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -126,8 +126,8 @@ gradient_accumulation_steps=1 ``` learning_rate=5e-4 -per_device_train_batch_size=1 -gradient_accumulation_steps=16 +per_device_train_batch_size=16 +gradient_accumulation_steps=1 ``` diff --git a/ptuning/ds_train_finetune.sh b/ptuning/ds_train_finetune.sh index 92a69ee..531a800 100644 --- a/ptuning/ds_train_finetune.sh +++ b/ptuning/ds_train_finetune.sh @@ -2,7 +2,6 @@ LR=1e-4 MASTER_PORT=$(shuf -n 1 -i 10000-65535) -MASTER_PORT=50003 deepspeed --num_gpus=4 --master_port $MASTER_PORT main.py \ --deepspeed deepspeed.json \ From 1d87dac585c8fafd708db16860b628928ec5a821 Mon Sep 17 00:00:00 2001 From: nczkevin Date: Tue, 11 Apr 2023 17:36:02 +0800 Subject: [PATCH 088/110] docs: Update PROJECT --- PROJECT.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/PROJECT.md b/PROJECT.md index c529b1e..59f34ff 100644 --- a/PROJECT.md +++ b/PROJECT.md @@ -10,6 +10,7 @@ * [JittorLLMs](https://github.com/Jittor/JittorLLMs):最低3G显存或者没有显卡都可运行 ChatGLM-6B FP16, 支持Linux、windows、Mac部署 * [ChatGLM-Finetuning](https://github.com/liucongg/ChatGLM-Finetuning):基于ChatGLM-6B模型,进行下游具体任务微调,涉及Freeze、Lora、P-tuning等,并进行实验效果对比。 * [InstructGLM](https://github.com/yanqiangmiffy/InstructGLM):基于ChatGLM-6B进行指令学习,汇总开源中英文指令数据,基于Lora进行指令数据微调,开放了Alpaca、Belle微调后的Lora权重,修复web_demo重复问题 +* [ChatGLM-web](https://github.com/NCZkevin/chatglm-web):基于FastAPI和Vue3搭建的ChatGLM演示网站(支持chatglm流式输出、前端调整模型参数、上下文选择、保存图片、知识库问答等功能) 以下是部分针对本项目的教程/文档: -* [Windows部署文档](https://github.com/ZhangErling/ChatGLM-6B/blob/main/deployment_windows.md) \ No newline at end of file +* [Windows部署文档](https://github.com/ZhangErling/ChatGLM-6B/blob/main/deployment_windows.md) From 79e4d8ba8a199b977a91138edeff963ee34c9912 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Wed, 12 Apr 2023 09:49:14 +0800 Subject: [PATCH 089/110] Remove todo --- ptuning/README.md | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/ptuning/README.md b/ptuning/README.md index 7bbf05f..f6f90e6 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -133,7 +133,7 @@ gradient_accumulation_steps=16 ## 模型部署 -将对应的demo或代码中的`THUDM/chatglm-6b`换成经过 P-Tuning 微调之后 checkpoint 的地址(在示例中为 `./output/adgen-chatglm-6b-pt-8-1e-2/checkpoint-3000`)。注意,目前的微调还不支持多轮数据,所以只有对话第一轮的回复是经过微调的。 +将对应的demo或代码中的`THUDM/chatglm-6b`换成经过 P-Tuning 微调之后 checkpoint 的地址(在示例中为 `./output/adgen-chatglm-6b-pt-8-1e-2/checkpoint-3000`)。 ## 使用自己的数据集 修改 `train.sh` 和 `evaluate.sh` 中的 `train_file`、`validation_file`和`test_file`为你自己的 JSON 格式数据集路径,并将 `prompt_column` 和 `response_column` 改为 JSON 文件中输入文本和输出文本对应的 KEY。 @@ -189,12 +189,6 @@ gradient_accumulation_steps=16 bash train_chat.sh ``` - - -## TODO -* [x] Support for chat data -* [x] Support for full finetuning - ## 引用 ``` From 3736c1ae98d4d501ecb7a86347faf2f4c210d0a1 Mon Sep 17 00:00:00 2001 From: rainatam Date: Wed, 12 Apr 2023 12:28:25 +0800 Subject: [PATCH 090/110] Update README --- ptuning/README.md | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/ptuning/README.md b/ptuning/README.md index 8bb42fd..5c41771 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -133,7 +133,27 @@ gradient_accumulation_steps=1 ## 模型部署 -将对应的demo或代码中的`THUDM/chatglm-6b`换成经过 P-Tuning 微调之后 checkpoint 的地址(在示例中为 `./output/adgen-chatglm-6b-pt-8-1e-2/checkpoint-3000`)。 +```python +import os +import torch +from transformers import AutoConfig, AutoModel, AutoTokenizer + +# Load model and tokenizer of ChatGLM-6B +config = AutoConfig.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True, pre_seq_len=128) +tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) +model = AutoModel.from_pretrained("THUDM/chatglm-6b", config=config, trust_remote_code=True).half().cuda() + +# Load PrefixEncoder +prefix_state_dict = torch.load(os.path.join(CHECKPOINT_PATH, "pytorch_model.bin")) +new_prefix_state_dict = {} +for k, v in prefix_state_dict.items(): + new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v +model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict) + +model = model.eval() + +response, history = model.chat(tokenizer, "你好", history=[]) +``` ## 使用自己的数据集 修改 `train.sh` 和 `evaluate.sh` 中的 `train_file`、`validation_file`和`test_file`为你自己的 JSON 格式数据集路径,并将 `prompt_column` 和 `response_column` 改为 JSON 文件中输入文本和输出文本对应的 KEY。 @@ -202,3 +222,4 @@ bash train_chat.sh ``` + From 1a368afd267f67a2d0d18e611827deb15a7544b1 Mon Sep 17 00:00:00 2001 From: rainatam Date: Wed, 12 Apr 2023 16:43:34 +0800 Subject: [PATCH 091/110] Update README --- ptuning/README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ptuning/README.md b/ptuning/README.md index 5c41771..9f4d0a0 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -141,7 +141,7 @@ from transformers import AutoConfig, AutoModel, AutoTokenizer # Load model and tokenizer of ChatGLM-6B config = AutoConfig.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True, pre_seq_len=128) tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) -model = AutoModel.from_pretrained("THUDM/chatglm-6b", config=config, trust_remote_code=True).half().cuda() +model = AutoModel.from_pretrained("THUDM/chatglm-6b", config=config, trust_remote_code=True) # Load PrefixEncoder prefix_state_dict = torch.load(os.path.join(CHECKPOINT_PATH, "pytorch_model.bin")) @@ -150,6 +150,10 @@ for k, v in prefix_state_dict.items(): new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict) +print(f"Quantized to 4 bit") +model = model.quantize(4) +model = model.half().cuda() +model.transformer.prefix_encoder.float() model = model.eval() response, history = model.chat(tokenizer, "你好", history=[]) From a1d9dcc5178a3ad7d738d92db89f0dc94556c9bb Mon Sep 17 00:00:00 2001 From: rainatam Date: Wed, 12 Apr 2023 21:11:29 +0800 Subject: [PATCH 092/110] Update README --- ptuning/README.md | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/ptuning/README.md b/ptuning/README.md index 9f4d0a0..a86db16 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -133,23 +133,39 @@ gradient_accumulation_steps=1 ## 模型部署 +首先载入Tokenizer: + ```python import os import torch from transformers import AutoConfig, AutoModel, AutoTokenizer -# Load model and tokenizer of ChatGLM-6B -config = AutoConfig.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True, pre_seq_len=128) +# 载入Tokenizer tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) -model = AutoModel.from_pretrained("THUDM/chatglm-6b", config=config, trust_remote_code=True) +``` -# Load PrefixEncoder +(1) 如果需要加载的是新 Checkpoint(只包含 PrefixEncoder 参数): + +```python +config = AutoConfig.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True, pre_seq_len=128) +model = AutoModel.from_pretrained("THUDM/chatglm-6b", config=config, trust_remote_code=True) prefix_state_dict = torch.load(os.path.join(CHECKPOINT_PATH, "pytorch_model.bin")) new_prefix_state_dict = {} for k, v in prefix_state_dict.items(): new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict) +``` + +(2) 如果需要加载的是旧 Checkpoint(包含 ChatGLM-6B 以及 PrefixEncoder 参数),则直接加载整个 Checkpoint: +```python +config = AutoConfig.from_pretrained(CHECKPOINT_PATH, trust_remote_code=True, pre_seq_len=128) +model = AutoModel.from_pretrained(CHECKPOINT_PATH, config=config, trust_remote_code=True) +``` + +再进行量化即可使用: + +```python print(f"Quantized to 4 bit") model = model.quantize(4) model = model.half().cuda() From da626f8b232884b3848eb5a892d58937a8211cfd Mon Sep 17 00:00:00 2001 From: duzx16 Date: Wed, 12 Apr 2023 22:42:36 +0800 Subject: [PATCH 093/110] Add instruction for pre_seq_len --- ptuning/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ptuning/README.md b/ptuning/README.md index a86db16..ab91468 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -155,11 +155,11 @@ for k, v in prefix_state_dict.items(): new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict) ``` +注意你可能需要将 `pre_seq_len` 改成你训练时的实际值。 (2) 如果需要加载的是旧 Checkpoint(包含 ChatGLM-6B 以及 PrefixEncoder 参数),则直接加载整个 Checkpoint: ```python -config = AutoConfig.from_pretrained(CHECKPOINT_PATH, trust_remote_code=True, pre_seq_len=128) model = AutoModel.from_pretrained(CHECKPOINT_PATH, config=config, trust_remote_code=True) ``` From f06df225dd3779f2b7015590de2e7a3661733d2d Mon Sep 17 00:00:00 2001 From: duzx16 Date: Wed, 12 Apr 2023 23:34:44 +0800 Subject: [PATCH 094/110] Fix turn_idx --- ptuning/main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ptuning/main.py b/ptuning/main.py index ecce8c2..b027e9e 100644 --- a/ptuning/main.py +++ b/ptuning/main.py @@ -200,8 +200,8 @@ def main(): else: prompt = "" history = examples[history_column][i] - for i, (old_query, response) in enumerate(history): - prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response) + for turn_idx, (old_query, response) in enumerate(history): + prompt += "[Round {}]\n问:{}\n答:{}\n".format(turn_idx, old_query, response) prompt += "[Round {}]\n问:{}\n答:".format(len(history), query) prompt = prefix + prompt From e570c166fe642c04152b130d4987d611eb24f10c Mon Sep 17 00:00:00 2001 From: initialencounter <2911583893@qq.com> Date: Wed, 12 Apr 2023 23:41:05 +0800 Subject: [PATCH 095/110] add a chatbot --- glm-bot/.gitignore | 14 + glm-bot/README.md | 76 ++ glm-bot/fastapi.py | 82 ++ glm-bot/flask.py | 48 + glm-bot/glm-bot/package.json | 33 + glm-bot/glm-bot/readme.md | 5 + glm-bot/index.ts | 38 + glm-bot/package-lock.json | 1749 ++++++++++++++++++++++++++++++++++ glm-bot/package.json | 20 + 9 files changed, 2065 insertions(+) create mode 100644 glm-bot/.gitignore create mode 100644 glm-bot/README.md create mode 100644 glm-bot/fastapi.py create mode 100644 glm-bot/flask.py create mode 100644 glm-bot/glm-bot/package.json create mode 100644 glm-bot/glm-bot/readme.md create mode 100644 glm-bot/index.ts create mode 100644 glm-bot/package-lock.json create mode 100644 glm-bot/package.json diff --git a/glm-bot/.gitignore b/glm-bot/.gitignore new file mode 100644 index 0000000..72df230 --- /dev/null +++ b/glm-bot/.gitignore @@ -0,0 +1,14 @@ +node_modules +npm-debug.log +yarn-debug.log +yarn-error.log +tsconfig.tsbuildinfo + +.eslintcache +.DS_Store +.idea +.vscode +*.suo +*.ntvs* +*.njsproj +*.sln diff --git a/glm-bot/README.md b/glm-bot/README.md new file mode 100644 index 0000000..d9817f6 --- /dev/null +++ b/glm-bot/README.md @@ -0,0 +1,76 @@ +# glm-bot + +基于koishi框架的qq聊天机器人 + + +## 环境依赖 + +* nodejs14以上版本 +* gocqhttp + +## 使用方法 +* 1.启动接口 +``` +python fastapi.py +``` + +如果启动的是flask.py +则需要在index.ts文件中将 +``` +// 启用glm-bot +ctx.plugin(glm_bot,{ + type: 'fastapi', + myServerUrl: 'http://wx.blockelite.cn:10269/chatglm', + publicUrl: 'http://127.0.0.1:10269/chat', + send_glmmtg_response: true, + prefix: '', + defaultText: '', + output: 'quote' +}) +``` + +修改为 +``` +// 启用glm-bot +ctx.plugin(glm_bot,{ + type: 'flaskapi', + myServerUrl: 'http://wx.blockelite.cn:10269/chatglm', + publicUrl: 'http://127.0.0.1:10269/chat', + send_glmmtg_response: true, + prefix: '', + defaultText: '', + output: 'quote' +}) +``` + +* 2.启动[go-cqhttp](https://github.com/Mrs4s/go-cqhttp)并开启正向ws服务 + +* 2-1配置onebot +将index.ts中的 +``` +endpoint: 'ws://127.0.0.1:32333' +``` +修改为go-cqhttp的正向ws服务地址 + +* 3.安装[koishi](https://koishi.chat)依赖 + +``` +cd glm-bot && npm i +``` + + +* 4.启动机器人 +``` +node -r esbuild-register . +``` + +## 感谢 +* [koishi](https://koishi.chat) + + +* [go-cqhttp](https://github.com/Mrs4s/go-cqhttp) + + +* [glm-bot](https://github.com/wochenlong/glm-bot) + +* [t4wefan](https://github.com/t4wefan/ChatGLM-6B-with-flask-api) \ No newline at end of file diff --git a/glm-bot/fastapi.py b/glm-bot/fastapi.py new file mode 100644 index 0000000..54163fe --- /dev/null +++ b/glm-bot/fastapi.py @@ -0,0 +1,82 @@ +from fastapi import FastAPI +from pydantic import BaseModel +import uvicorn +import json +from transformers import AutoModel, AutoTokenizer +from typing import List,Tuple + + +max_length = 4096 +# 根据id获取上下文信息 +def get_history(id: str) -> List[Tuple[str,str]] or None: + if id in sessions.keys(): + length = len(json.dumps(sessions[id],indent=2)) + if length>max_length: + sessions[id] = [] + return None + if sessions[id] == []: + return None + return sessions[id] + else: + sessions[id] = [] + return None + +# 根据id清空上下文 + + +def clear(id: str) -> str: + sessions[id] = [] + return '已重置' + + + + +tokenizer = AutoTokenizer.from_pretrained( + "THUDM/chatglm-6b", trust_remote_code=True) +model = AutoModel.from_pretrained( + "THUDM/chatglm-6b", trust_remote_code=True).half().cuda() +model = model.eval() + +MAX_TURNS = 20 +MAX_BOXES = MAX_TURNS * 2 + +sessions = {} + + +def predict(prompt: str, uid: str, max_length: int = 2048, top_p: float = 0.7, temperature: float = 0.95) -> str: + history = get_history(uid) + print(history) + response, history = model.chat(tokenizer, prompt, history=history, max_length=max_length, top_p=top_p, + temperature=temperature) + sessions[uid].append((prompt, response)) + print(get_history(uid)) + return response + +# while 1: +# uid = input("uid:") +# prompt = input('msg:') +# msg = predict(prompt=prompt,uid = uid) +# print(msg) + + +app = FastAPI() + + +class Item_chat(BaseModel): + msg: str + uid: str +@app.post("/chat") +def chat(item:Item_chat): + msg = predict(prompt=item.msg, uid=item.uid) + print(msg) + return msg + + +class Item_claer(BaseModel): + uid: str +@app.post("/clear") +def clear_session(item:Item_claer): + return clear(item.uid) + + +uvicorn.run(app, host="0.0.0.0", port=10269) diff --git a/glm-bot/flask.py b/glm-bot/flask.py new file mode 100644 index 0000000..a5fc6a0 --- /dev/null +++ b/glm-bot/flask.py @@ -0,0 +1,48 @@ +import os +import platform +from transformers import AutoTokenizer, AutoModel + +from flask import Flask, request + +tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) +model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() +def prepare_model(): + global model + model = model.eval() + +prepare_model() +model = model.eval() +preset = [] +port = 7860 +os_name = platform.system() +app = Flask(__name__) +history = {} +@app.route('/chatglm', methods=["GET"]) +def delete_msg(): + global history + query = request.args.get('msg') + usrid = request.args.get('usrid') + source = request.args.get('source') + if query == None: + return '请提供内容' + if query == 'ping': + return 'pong!服务端运行正常!' + if source == None: + return '无来源的请求,请更新插件' + if usrid == None: + return '请提供用户id' + if not usrid in history: + history[usrid] = preset + print(f"usrid:{usrid},content:{query}") + if query == "clear": + history[usrid] = preset + + print(f"usrid:{usrid},清空历史") + return '已重置当前对话' + response, history[usrid] = model.chat(tokenizer, query, history=history[usrid]) + print(f"ChatGLM-6B:{response}") + return response + +if __name__ == '__main__': + print(f"欢迎使用 ChatGLM-6B API,可通过发送GET请求到http://127.0.0.1:{port}/chatglm来调用。") + app.run(host='0.0.0.0', port=port) diff --git a/glm-bot/glm-bot/package.json b/glm-bot/glm-bot/package.json new file mode 100644 index 0000000..6f5f39f --- /dev/null +++ b/glm-bot/glm-bot/package.json @@ -0,0 +1,33 @@ +{ + "name": "koishi-plugin-glm-bot", + "description": "glm-testbot的稳定版,支持自建后端", + "version": "1.0.3", + "main": "lib/index.js", + "typings": "lib/index.d.ts", + "files": [ + "lib", + "dist" + ], + "license": "MIT", + "scripts": {}, + "keywords": [ + "chatbot", + "koishi", + "plugin" + ], + "peerDependencies": { + "koishi": "4.12.0" + }, + "koishi": { + "description": "glm插件", + "browser": true, + "service": { + "optional": [ + "puppeteer" + ], + "implements": [ + "glm" + ] + } + } +} diff --git a/glm-bot/glm-bot/readme.md b/glm-bot/glm-bot/readme.md new file mode 100644 index 0000000..82934e8 --- /dev/null +++ b/glm-bot/glm-bot/readme.md @@ -0,0 +1,5 @@ +# koishi-plugin-glm-bot + +[![npm](https://img.shields.io/npm/v/koishi-plugin-glm-bot?style=flat-square)](https://www.npmjs.com/package/koishi-plugin-glm-bot) + +glm-bot的稳定版 diff --git a/glm-bot/index.ts b/glm-bot/index.ts new file mode 100644 index 0000000..0f5bf63 --- /dev/null +++ b/glm-bot/index.ts @@ -0,0 +1,38 @@ +import { Context } from 'koishi' +import console from '@koishijs/plugin-console' +import * as sandbox from '@koishijs/plugin-sandbox' +import * as echo from '@koishijs/plugin-echo' + +import onebot from '@koishijs/plugin-adapter-onebot' + +import glm_bot from './glm-bot' + +// 创建一个 Koishi 应用 +const ctx = new Context({ + port: 5140, +}) +// 使用 OneBot 适配器的机器人 +ctx.plugin(onebot, { + protocol: 'ws', + selfId: '3111720341', + endpoint: 'ws://127.0.0.1:32333', + }) + +// 启用上述插件 +ctx.plugin(console) // 提供控制台 +ctx.plugin(sandbox) // 提供调试沙盒 +ctx.plugin(echo) // 提供回声指令 + +// 启用glm-bot +ctx.plugin(glm_bot,{ + type: 'fastapi', + myServerUrl: 'http://wx.blockelite.cn:10269/chatglm', + publicUrl: 'http://127.0.0.1:10269/chat', + send_glmmtg_response: true, + prefix: '', + defaultText: '', + output: 'quote' +}) + +// 启动应用 +ctx.start() \ No newline at end of file diff --git a/glm-bot/package-lock.json b/glm-bot/package-lock.json new file mode 100644 index 0000000..57d69f8 --- /dev/null +++ b/glm-bot/package-lock.json @@ -0,0 +1,1749 @@ +{ + "name": "glm-bot", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "glm-bot", + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "@koishijs/plugin-adapter-onebot": "^5.6.6", + "@koishijs/plugin-console": "^5.6.1", + "@koishijs/plugin-echo": "^2.2.3", + "@koishijs/plugin-market": "^1.12.5", + "@koishijs/plugin-sandbox": "^3.0.0", + "koishi": "^4.12.4", + "koishi-plugin-glm-bot": "^1.1.2" + } + }, + "node_modules/@koa/router": { + "version": "10.1.1", + "resolved": "https://registry.npmjs.org/@koa/router/-/router-10.1.1.tgz", + "integrity": "sha512-ORNjq5z4EmQPriKbR0ER3k4Gh7YGNhWDL7JBW+8wXDrHLbWYKYSJaOJ9aN06npF5tbTxe2JBOsurpJDAvjiXKw==", + "dependencies": { + "debug": "^4.1.1", + "http-errors": "^1.7.3", + "koa-compose": "^4.1.0", + "methods": "^1.1.2", + "path-to-regexp": "^6.1.0" + }, + "engines": { + "node": ">= 8.0.0" + } + }, + "node_modules/@koishijs/core": { + "version": "4.12.4", + "resolved": "https://registry.npmjs.org/@koishijs/core/-/core-4.12.4.tgz", + "integrity": "sha512-+KTCvqr1M3cZnW8HOF359CUI4JVAEzW8YDexCRWflMlCDHtgzQSgBXIcujHhVbn1uVLrhowhspNvNI9kI+LIgg==", + "dependencies": { + "@koishijs/utils": "^7.0.1", + "@minatojs/core": "^2.3.1", + "@satorijs/core": "^2.3.1", + "cordis": "^2.7.4", + "cosmokit": "^1.4.1", + "fastest-levenshtein": "^1.0.16" + } + }, + "node_modules/@koishijs/loader": { + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/@koishijs/loader/-/loader-3.1.4.tgz", + "integrity": "sha512-jhcMaKAHf/9gpsd9gZEBZIws888YOcGqyR6iMfL6r4+66pXbERFQx/WlZTVuA0x4UTtvJtfvQI6n1aviNUA7AA==", + "dependencies": { + "dotenv": "^16.0.3", + "js-yaml": "^4.1.0", + "ns-require": "^1.1.4" + }, + "peerDependencies": { + "@koishijs/core": "4.12.4" + } + }, + "node_modules/@koishijs/plugin-adapter-onebot": { + "version": "5.6.6", + "resolved": "https://registry.npmjs.org/@koishijs/plugin-adapter-onebot/-/plugin-adapter-onebot-5.6.6.tgz", + "integrity": "sha512-tbMvrEmL32k4j7AUcx97VRVcXypkUUo8zPJvdITnv3v49/bLT1qafKgrjw+E3rkb61MKFHzBJvNN8aPciG+sXg==", + "dependencies": { + "@satorijs/adapter-onebot": "^5.6.6" + }, + "peerDependencies": { + "koishi": "^4.12.4" + } + }, + "node_modules/@koishijs/plugin-console": { + "version": "5.6.1", + "resolved": "https://registry.npmjs.org/@koishijs/plugin-console/-/plugin-console-5.6.1.tgz", + "integrity": "sha512-Vk8JY9wgtLqjftrbeQXwPbbR5y9i8b9Ob38MO9yjSrliNMxUKZZxNjy2CaWUL6gMgqMtrR22C4TgpyswZwquNg==", + "dependencies": { + "open": "^8.4.2", + "uuid": "^8.3.2", + "ws": "^8.13.0" + }, + "peerDependencies": { + "koishi": "^4.12.3" + } + }, + "node_modules/@koishijs/plugin-echo": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/@koishijs/plugin-echo/-/plugin-echo-2.2.3.tgz", + "integrity": "sha512-wt2Ax4pWjl9D21yW2V72YIxVQfOXsU1DizaQ8Qzop7ZxnD2AwQBgvD30vqA3bpJ0aUnsa+LsRwJYawsY+GCV7w==", + "peerDependencies": { + "koishi": "^4.12.3" + } + }, + "node_modules/@koishijs/plugin-market": { + "version": "1.12.5", + "resolved": "https://registry.npmjs.org/@koishijs/plugin-market/-/plugin-market-1.12.5.tgz", + "integrity": "sha512-sc9m7GeQ6eolbE+wF7eLoD3RkUXQRLpVoKS65hfNuKhmDGqVeSkTHB9gEwtyMqX4P8y9uhuYquVskwy/dipF5Q==", + "dependencies": { + "@koishijs/registry": "^4.6.0", + "execa": "^5.1.1", + "get-registry": "^1.1.0", + "ns-require": "^1.1.4", + "semver": "^7.3.8", + "throttle-debounce": "^3.0.1", + "which-pm-runs": "^1.1.0" + }, + "peerDependencies": { + "@koishijs/plugin-console": "^5.5.7", + "koishi": "^4.12.3" + } + }, + "node_modules/@koishijs/plugin-sandbox": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@koishijs/plugin-sandbox/-/plugin-sandbox-3.0.0.tgz", + "integrity": "sha512-4ZcsIEuCTHkD7XigQaCAj2BX5ZiWqHyeIx8mFgXIh3oB+1CEaidmmukFbpqnbk04Tc9sOaqANGSEiaxUwHrfRQ==", + "peerDependencies": { + "@koishijs/plugin-console": "^5.6.1", + "koishi": "^4.12.3" + } + }, + "node_modules/@koishijs/registry": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@koishijs/registry/-/registry-4.6.0.tgz", + "integrity": "sha512-Sn2uTjT0qm18rxec1D66d0xU6YjpOAXTmmDytpz7aoguRS8WdZaAtK4jtSMKo2AXmowM3sQMM4LvYkZsqkhrTw==", + "dependencies": { + "cosmokit": "^1.4.0", + "p-map": "^4.0.0", + "semver": "^7.3.8" + } + }, + "node_modules/@koishijs/utils": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/@koishijs/utils/-/utils-7.0.1.tgz", + "integrity": "sha512-in0vSfgmsjURf8aw+Wagve5thVXGpZRawYZiN9/w0AElPZ0Y/nQtGDvI0UMRcjubizb/mZgO7j0FcdYMm24dqg==", + "dependencies": { + "cosmokit": "^1.4.1", + "inaba": "^1.1.1" + } + }, + "node_modules/@minatojs/core": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@minatojs/core/-/core-2.3.1.tgz", + "integrity": "sha512-3q37agKnVq9QHrJozw3ACk/Gt+i1xDY/o7363kFJmjQSxQfaQg5dHT0NqjwVxe4AyqYpTWh3487IVyEAAMe9kw==", + "dependencies": { + "cosmokit": "^1.4.1" + } + }, + "node_modules/@satorijs/adapter-onebot": { + "version": "5.6.6", + "resolved": "https://registry.npmjs.org/@satorijs/adapter-onebot/-/adapter-onebot-5.6.6.tgz", + "integrity": "sha512-Plb4fhXvWMOTCj4OVTX2pDF4MKszh3Mq5lGauGI+/eby2m/hW22TFLe6K4Mnpa8QupSoqpGEgB2cofk3bDUJxg==", + "dependencies": { + "qface": "^1.4.0" + }, + "peerDependencies": { + "@satorijs/satori": "^2.3.1" + } + }, + "node_modules/@satorijs/core": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@satorijs/core/-/core-2.3.1.tgz", + "integrity": "sha512-IAEPN2GW5m6ZEQIRA1uZlQIiq5VHQFvY+rMbZGjdtbX3bioIhtIBtwUseilUW/BtSl3XrjDgOVRuziGxQeyXwg==", + "dependencies": { + "@satorijs/element": "^2.3.6", + "cordis": "^2.7.4", + "cordis-axios": "^3.1.1", + "cosmokit": "^1.4.1", + "reggol": "^1.3.5", + "schemastery": "^3.7.2", + "ws": "^8.13.0" + } + }, + "node_modules/@satorijs/element": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/@satorijs/element/-/element-2.3.6.tgz", + "integrity": "sha512-xys5HsclvKvQ3ysO88c85/9F1xdDIpsVOQ6tlPCeL/HnTKtc4wWzAp8Tu4YlIlBujzsvMkRohVrr89u1Ck0+sA==", + "dependencies": { + "cosmokit": "^1.4.1" + } + }, + "node_modules/@satorijs/satori": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/@satorijs/satori/-/satori-2.3.1.tgz", + "integrity": "sha512-wsToUdPRzCWgqZcFHeeaA8uyvqPCVSSsVWfNPP+hY9pEJoB/Pe3vREYHBemzy0UJ3o29S08z05pt81AbeJxOTQ==", + "dependencies": { + "@koa/router": "^10.1.1", + "@satorijs/core": "2.3.1", + "@types/koa": "*", + "@types/koa__router": "*", + "@types/ws": "^8.5.4", + "file-type": "^16.5.4", + "http-proxy-agent": "^5.0.0", + "https-proxy-agent": "^5.0.1", + "koa": "^2.14.1", + "koa-bodyparser": "^4.4.0", + "parseurl": "^1.3.3", + "path-to-regexp": "^6.2.1", + "socks-proxy-agent": "^5.0.1", + "ws": "^8.13.0" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/@tokenizer/token": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@tokenizer/token/-/token-0.3.0.tgz", + "integrity": "sha512-OvjF+z51L3ov0OyAU0duzsYuvO01PH7x4t6DJx+guahgTnBHkhJdG7soQeTSFLWN3efnHyibZ4Z8l2EuWwJN3A==" + }, + "node_modules/@tootallnate/once": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz", + "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/@types/accepts": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/@types/accepts/-/accepts-1.3.5.tgz", + "integrity": "sha512-jOdnI/3qTpHABjM5cx1Hc0sKsPoYCp+DP/GJRGtDlPd7fiV9oXGGIcjW/ZOxLIvjGz8MA+uMZI9metHlgqbgwQ==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/body-parser": { + "version": "1.19.2", + "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.2.tgz", + "integrity": "sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==", + "dependencies": { + "@types/connect": "*", + "@types/node": "*" + } + }, + "node_modules/@types/connect": { + "version": "3.4.35", + "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.35.tgz", + "integrity": "sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/content-disposition": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/@types/content-disposition/-/content-disposition-0.5.5.tgz", + "integrity": "sha512-v6LCdKfK6BwcqMo+wYW05rLS12S0ZO0Fl4w1h4aaZMD7bqT3gVUns6FvLJKGZHQmYn3SX55JWGpziwJRwVgutA==" + }, + "node_modules/@types/cookies": { + "version": "0.7.7", + "resolved": "https://registry.npmjs.org/@types/cookies/-/cookies-0.7.7.tgz", + "integrity": "sha512-h7BcvPUogWbKCzBR2lY4oqaZbO3jXZksexYJVFvkrFeLgbZjQkU4x8pRq6eg2MHXQhY0McQdqmmsxRWlVAHooA==", + "dependencies": { + "@types/connect": "*", + "@types/express": "*", + "@types/keygrip": "*", + "@types/node": "*" + } + }, + "node_modules/@types/express": { + "version": "4.17.17", + "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.17.tgz", + "integrity": "sha512-Q4FmmuLGBG58btUnfS1c1r/NQdlp3DMfGDGig8WhfpA2YRUtEkxAjkZb0yvplJGYdF1fsQ81iMDcH24sSCNC/Q==", + "dependencies": { + "@types/body-parser": "*", + "@types/express-serve-static-core": "^4.17.33", + "@types/qs": "*", + "@types/serve-static": "*" + } + }, + "node_modules/@types/express-serve-static-core": { + "version": "4.17.33", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.33.tgz", + "integrity": "sha512-TPBqmR/HRYI3eC2E5hmiivIzv+bidAfXofM+sbonAGvyDhySGw9/PQZFt2BLOrjUUR++4eJVpx6KnLQK1Fk9tA==", + "dependencies": { + "@types/node": "*", + "@types/qs": "*", + "@types/range-parser": "*" + } + }, + "node_modules/@types/http-assert": { + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/@types/http-assert/-/http-assert-1.5.3.tgz", + "integrity": "sha512-FyAOrDuQmBi8/or3ns4rwPno7/9tJTijVW6aQQjK02+kOQ8zmoNg2XJtAuQhvQcy1ASJq38wirX5//9J1EqoUA==" + }, + "node_modules/@types/http-errors": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.1.tgz", + "integrity": "sha512-/K3ds8TRAfBvi5vfjuz8y6+GiAYBZ0x4tXv1Av6CWBWn0IlADc+ZX9pMq7oU0fNQPnBwIZl3rmeLp6SBApbxSQ==" + }, + "node_modules/@types/keygrip": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@types/keygrip/-/keygrip-1.0.2.tgz", + "integrity": "sha512-GJhpTepz2udxGexqos8wgaBx4I/zWIDPh/KOGEwAqtuGDkOUJu5eFvwmdBX4AmB8Odsr+9pHCQqiAqDL/yKMKw==" + }, + "node_modules/@types/koa": { + "version": "2.13.6", + "resolved": "https://registry.npmjs.org/@types/koa/-/koa-2.13.6.tgz", + "integrity": "sha512-diYUfp/GqfWBAiwxHtYJ/FQYIXhlEhlyaU7lB/bWQrx4Il9lCET5UwpFy3StOAohfsxxvEQ11qIJgT1j2tfBvw==", + "dependencies": { + "@types/accepts": "*", + "@types/content-disposition": "*", + "@types/cookies": "*", + "@types/http-assert": "*", + "@types/http-errors": "*", + "@types/keygrip": "*", + "@types/koa-compose": "*", + "@types/node": "*" + } + }, + "node_modules/@types/koa__router": { + "version": "12.0.0", + "resolved": "https://registry.npmjs.org/@types/koa__router/-/koa__router-12.0.0.tgz", + "integrity": "sha512-S6eHyZyoWCZLNHyy8j0sMW85cPrpByCbGGU2/BO4IzGiI87aHJ92lZh4E9xfsM9DcbCT469/OIqyC0sSJXSIBQ==", + "dependencies": { + "@types/koa": "*" + } + }, + "node_modules/@types/koa-compose": { + "version": "3.2.5", + "resolved": "https://registry.npmjs.org/@types/koa-compose/-/koa-compose-3.2.5.tgz", + "integrity": "sha512-B8nG/OoE1ORZqCkBVsup/AKcvjdgoHnfi4pZMn5UwAPCbhk/96xyv284eBYW8JlQbQ7zDmnpFr68I/40mFoIBQ==", + "dependencies": { + "@types/koa": "*" + } + }, + "node_modules/@types/mime": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@types/mime/-/mime-3.0.1.tgz", + "integrity": "sha512-Y4XFY5VJAuw0FgAqPNd6NNoV44jbq9Bz2L7Rh/J6jLTiHBSBJa9fxqQIvkIld4GsoDOcCbvzOUAbLPsSKKg+uA==" + }, + "node_modules/@types/node": { + "version": "18.15.11", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.15.11.tgz", + "integrity": "sha512-E5Kwq2n4SbMzQOn6wnmBjuK9ouqlURrcZDVfbo9ftDDTFt3nk7ZKK4GMOzoYgnpQJKcxwQw+lGaBvvlMo0qN/Q==" + }, + "node_modules/@types/qs": { + "version": "6.9.7", + "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.7.tgz", + "integrity": "sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw==" + }, + "node_modules/@types/range-parser": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.4.tgz", + "integrity": "sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw==" + }, + "node_modules/@types/serve-static": { + "version": "1.15.1", + "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.1.tgz", + "integrity": "sha512-NUo5XNiAdULrJENtJXZZ3fHtfMolzZwczzBbnAeBbqBwG+LaG6YaJtuwzwGSQZ2wsCrxjEhNNjAkKigy3n8teQ==", + "dependencies": { + "@types/mime": "*", + "@types/node": "*" + } + }, + "node_modules/@types/ws": { + "version": "8.5.4", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.4.tgz", + "integrity": "sha512-zdQDHKUgcX/zBc4GrwsE/7dVdAD8JR4EuiAXiiUhhfyIJXXb2+PrGshFyeXWQPMmmZ2XxgaqclgpIC7eTXc1mg==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/agent-base": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", + "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "dependencies": { + "debug": "4" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/aggregate-error": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "dependencies": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" + }, + "node_modules/axios": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.1.3.tgz", + "integrity": "sha512-00tXVRwKx/FZr/IDVFt4C+f9FYairX517WoGCL6dpOntqLkZofjhu43F/Xl44UOpqa+9sLFDrG/XAnFsUYgkDA==", + "dependencies": { + "follow-redirects": "^1.15.0", + "form-data": "^4.0.0", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/cac": { + "version": "6.7.14", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", + "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/cache-content-type": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/cache-content-type/-/cache-content-type-1.0.1.tgz", + "integrity": "sha512-IKufZ1o4Ut42YUrZSo8+qnMTrFuKkvyoLXUywKz9GJ5BrhOFGhLdkx9sG4KAnVvbY6kEcSFjLQul+DVmBm2bgA==", + "dependencies": { + "mime-types": "^2.1.18", + "ylru": "^1.2.0" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/call-bind": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", + "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "dependencies": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/clean-stack": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "engines": { + "node": ">=6" + } + }, + "node_modules/co": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", + "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", + "engines": { + "iojs": ">= 1.0.0", + "node": ">= 0.12.0" + } + }, + "node_modules/co-body": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/co-body/-/co-body-6.1.0.tgz", + "integrity": "sha512-m7pOT6CdLN7FuXUcpuz/8lfQ/L77x8SchHCF4G0RBTJO20Wzmhn5Sp4/5WsKy8OSpifBSUrmg83qEqaDHdyFuQ==", + "dependencies": { + "inflation": "^2.0.0", + "qs": "^6.5.2", + "raw-body": "^2.3.3", + "type-is": "^1.6.16" + } + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookies": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/cookies/-/cookies-0.8.0.tgz", + "integrity": "sha512-8aPsApQfebXnuI+537McwYsDtjVxGm8gTIzQI3FDW6t5t/DAhERxtnbEPN/8RX+uZthoz4eCOgloXaE5cYyNow==", + "dependencies": { + "depd": "~2.0.0", + "keygrip": "~1.1.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/cookies/node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/copy-to": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/copy-to/-/copy-to-2.0.1.tgz", + "integrity": "sha512-3DdaFaU/Zf1AnpLiFDeNCD4TOWe3Zl2RZaTzUvWiIk5ERzcCodOE20Vqq4fzCbNoHURFHT4/us/Lfq+S2zyY4w==" + }, + "node_modules/cordis": { + "version": "2.7.4", + "resolved": "https://registry.npmjs.org/cordis/-/cordis-2.7.4.tgz", + "integrity": "sha512-SNP9JAhSjVHtozfmvU8HvREjsVSWTsQLuyHBH8PVnLZDJExe7l3srqNWs7vkKCrKWCqw8JH1Zj7kDF1bCbpwzw==", + "dependencies": { + "cosmokit": "^1.4.1" + } + }, + "node_modules/cordis-axios": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/cordis-axios/-/cordis-axios-3.1.1.tgz", + "integrity": "sha512-zBdLVIfnp8jAerS24T4JY689tmYQl2WpOFMv0Y1g3j3BwwUO/ScveEhfR5Oix3J2JepxIUoA2q18nTBJj2RaFQ==", + "dependencies": { + "axios": "~1.1.3", + "cosmokit": "^1.4.1", + "mime-db": "^1.52.0" + }, + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "cordis": "^2.7.4" + } + }, + "node_modules/cosmokit": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/cosmokit/-/cosmokit-1.4.1.tgz", + "integrity": "sha512-d3ZRpKFahJRvLbo1T4y0ELCudjk9AeDUsfgKm+iAti6yPCeoPLGNUGT4expTWsNkrSA1uk7CKhtBPiizFYvDgA==" + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/deep-equal": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-1.0.1.tgz", + "integrity": "sha512-bHtC0iYvWhyaTzvV3CZgPeZQqCOBGyGsVV7v4eevpdkLHfiSrXUdBG+qAuSz4RI70sszvjQ1QSZ98An1yNwpSw==" + }, + "node_modules/define-lazy-prop": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", + "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", + "engines": { + "node": ">=8" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/delegates": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", + "integrity": "sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==" + }, + "node_modules/depd": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", + "integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/dotenv": { + "version": "16.0.3", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.0.3.tgz", + "integrity": "sha512-7GO6HghkA5fYG9TYnNxi14/7K9f5occMlp3zXAuSxn7CKCxt9xbNWG7yF8hTCSUchlfWSe3uLmlPfigevRItzQ==", + "engines": { + "node": ">=12" + } + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" + }, + "node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/fastest-levenshtein": { + "version": "1.0.16", + "resolved": "https://registry.npmjs.org/fastest-levenshtein/-/fastest-levenshtein-1.0.16.tgz", + "integrity": "sha512-eRnCtTTtGZFpQCwhJiUOuxPQWRXVKYDn0b2PeHfXL6/Zi53SLAzAHfVhVWK2AryC/WH05kGfxhFIPvTF0SXQzg==", + "engines": { + "node": ">= 4.9.1" + } + }, + "node_modules/file-type": { + "version": "16.5.4", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-16.5.4.tgz", + "integrity": "sha512-/yFHK0aGjFEgDJjEKP0pWCplsPFPhwyfwevf/pVxiN0tmE4L9LmwWxWukdJSHdoCli4VgQLehjJtwQBnqmsKcw==", + "dependencies": { + "readable-web-to-node-stream": "^3.0.0", + "strtok3": "^6.2.4", + "token-types": "^4.1.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/file-type?sponsor=1" + } + }, + "node_modules/follow-redirects": { + "version": "1.15.2", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.2.tgz", + "integrity": "sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/form-data": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/function-bind": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + }, + "node_modules/get-intrinsic": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.0.tgz", + "integrity": "sha512-L049y6nFOuom5wGyRc3/gdTLO94dySVKRACj1RmJZBQXlbTMhtNIgkWkUHq+jYmZvKf14EW1EoJnnjbmoHij0Q==", + "dependencies": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-registry": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/get-registry/-/get-registry-1.1.0.tgz", + "integrity": "sha512-D/0p/sxhBYQVVHJA7e8r47HSLeU7OTEfBxDR2v5AY9wU0l8esIsD6hbaAdVYRx8HVSQCgwgDHHxWT+rhoisnkQ==", + "dependencies": { + "which-pm-runs": "^1.1.0" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/has": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", + "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "dependencies": { + "function-bind": "^1.1.1" + }, + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", + "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==", + "dependencies": { + "has-symbols": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/http-assert": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/http-assert/-/http-assert-1.5.0.tgz", + "integrity": "sha512-uPpH7OKX4H25hBmU6G1jWNaqJGpTXxey+YOUizJUAgu0AjLUeC8D73hTrhvDS5D+GJN1DN1+hhc/eF/wpxtp0w==", + "dependencies": { + "deep-equal": "~1.0.1", + "http-errors": "~1.8.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/http-errors": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.8.1.tgz", + "integrity": "sha512-Kpk9Sm7NmI+RHhnj6OIWDI1d6fIoFAtFt9RLaTMRlg/8w49juAStsrBgp0Dp4OdxdVbRIeKhtCUvoi/RuAhO4g==", + "dependencies": { + "depd": "~1.1.2", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": ">= 1.5.0 < 2", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/http-proxy-agent": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", + "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", + "dependencies": { + "@tootallnate/once": "2", + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/https-proxy-agent": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", + "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", + "dependencies": { + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/inaba": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/inaba/-/inaba-1.1.1.tgz", + "integrity": "sha512-VYgrcz9EwjHELNU74R/p81U/G00u8KuFzao43pyNp7UZix+NY78eUzBy1Ks0tSgxgia+luJMvTD67vV02pk9yg==" + }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/inflation": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/inflation/-/inflation-2.0.0.tgz", + "integrity": "sha512-m3xv4hJYR2oXw4o4Y5l6P5P16WYmazYof+el6Al3f+YlggGj6qT9kImBAnzDelRALnP5d3h4jGBPKzYCizjZZw==", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/ip": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ip/-/ip-2.0.0.tgz", + "integrity": "sha512-WKa+XuLG1A1R0UWhl2+1XQSi+fZWMsYKffMZTTYsiZaUD8k2yDAj5atimTUD2TZkyCkNEeYE5NhFZmupOGtjYQ==" + }, + "node_modules/is-docker": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", + "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-generator-function": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.10.tgz", + "integrity": "sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-wsl": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", + "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", + "dependencies": { + "is-docker": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/keygrip": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/keygrip/-/keygrip-1.1.0.tgz", + "integrity": "sha512-iYSchDJ+liQ8iwbSI2QqsQOvqv58eJCEanyJPJi+Khyu8smkcKSFUCbPwzFcL7YVtZ6eONjqRX/38caJ7QjRAQ==", + "dependencies": { + "tsscmp": "1.0.6" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/kleur": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz", + "integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/koa": { + "version": "2.14.2", + "resolved": "https://registry.npmjs.org/koa/-/koa-2.14.2.tgz", + "integrity": "sha512-VFI2bpJaodz6P7x2uyLiX6RLYpZmOJqNmoCst/Yyd7hQlszyPwG/I9CQJ63nOtKSxpt5M7NH67V6nJL2BwCl7g==", + "dependencies": { + "accepts": "^1.3.5", + "cache-content-type": "^1.0.0", + "content-disposition": "~0.5.2", + "content-type": "^1.0.4", + "cookies": "~0.8.0", + "debug": "^4.3.2", + "delegates": "^1.0.0", + "depd": "^2.0.0", + "destroy": "^1.0.4", + "encodeurl": "^1.0.2", + "escape-html": "^1.0.3", + "fresh": "~0.5.2", + "http-assert": "^1.3.0", + "http-errors": "^1.6.3", + "is-generator-function": "^1.0.7", + "koa-compose": "^4.1.0", + "koa-convert": "^2.0.0", + "on-finished": "^2.3.0", + "only": "~0.0.2", + "parseurl": "^1.3.2", + "statuses": "^1.5.0", + "type-is": "^1.6.16", + "vary": "^1.1.2" + }, + "engines": { + "node": "^4.8.4 || ^6.10.1 || ^7.10.1 || >= 8.1.4" + } + }, + "node_modules/koa-bodyparser": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/koa-bodyparser/-/koa-bodyparser-4.4.0.tgz", + "integrity": "sha512-AXPY7wwKZUmbgb8VkTEUFoRNOlx6aWRJwEnQD+zfNf33/7KSAkN4Oo9BqlIk80D+5TvuqlhpQT5dPVcyxl5Zsw==", + "dependencies": { + "co-body": "^6.0.0", + "copy-to": "^2.0.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/koa-compose": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/koa-compose/-/koa-compose-4.1.0.tgz", + "integrity": "sha512-8ODW8TrDuMYvXRwra/Kh7/rJo9BtOfPc6qO8eAfC80CnCvSjSl0bkRM24X6/XBBEyj0v1nRUQ1LyOy3dbqOWXw==" + }, + "node_modules/koa-convert": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/koa-convert/-/koa-convert-2.0.0.tgz", + "integrity": "sha512-asOvN6bFlSnxewce2e/DK3p4tltyfC4VM7ZwuTuepI7dEQVcvpyFuBcEARu1+Hxg8DIwytce2n7jrZtRlPrARA==", + "dependencies": { + "co": "^4.6.0", + "koa-compose": "^4.1.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/koa/node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/koishi": { + "version": "4.12.4", + "resolved": "https://registry.npmjs.org/koishi/-/koishi-4.12.4.tgz", + "integrity": "sha512-HKpUShf/n09nqawF2elEHMrB74FmBE9DpBq70Vi1K7f4UtaC4SbeltUgQh+R94HKeChOujzVFbgLkipjslYsMw==", + "dependencies": { + "@koishijs/core": "4.12.4", + "@koishijs/loader": "3.1.4", + "@koishijs/utils": "^7.0.1", + "@satorijs/satori": "^2.3.1", + "cac": "^6.7.14", + "kleur": "^4.1.5", + "ns-require": "^1.1.4" + }, + "bin": { + "koishi": "lib/cli/index.js" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/koishi-plugin-glm-bot": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/koishi-plugin-glm-bot/-/koishi-plugin-glm-bot-1.1.2.tgz", + "integrity": "sha512-H8pCz6m2U079LhEvHFBhb/SRKcWQTi9FWhDPsIuboXqdJiKhW7lP56avfvXWZWM5XVxhmCBcCN6+QSLk/jqHEw==", + "peerDependencies": { + "koishi": "^4.11.0" + } + }, + "node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ns-require": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/ns-require/-/ns-require-1.1.4.tgz", + "integrity": "sha512-Zk25pQj4u5i6DS0vaNO5aSSXewybVqqVVjz8AOxFy9DNPtmu3jlexMz6kUXLV2oB+X6iQeAnHXSzj5Qz/IeDaQ==" + }, + "node_modules/object-inspect": { + "version": "1.12.3", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", + "integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/only": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/only/-/only-0.0.2.tgz", + "integrity": "sha512-Fvw+Jemq5fjjyWz6CpKx6w9s7xxqo3+JCyM0WXWeCSOboZ8ABkyvP8ID4CZuChA/wxSx+XSJmdOm8rGVyJ1hdQ==" + }, + "node_modules/open": { + "version": "8.4.2", + "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz", + "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==", + "dependencies": { + "define-lazy-prop": "^2.0.0", + "is-docker": "^2.1.1", + "is-wsl": "^2.2.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-map": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", + "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "dependencies": { + "aggregate-error": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-to-regexp": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.2.1.tgz", + "integrity": "sha512-JLyh7xT1kizaEvcaXOQwOc2/Yhw6KZOvPf1S8401UyLk86CU79LN3vl7ztXGm/pZ+YjoyAJ4rxmHwbkBXJX+yw==" + }, + "node_modules/peek-readable": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/peek-readable/-/peek-readable-4.1.0.tgz", + "integrity": "sha512-ZI3LnwUv5nOGbQzD9c2iDG6toheuXSZP5esSHBjopsXH4dg19soufvpUGA3uohi5anFtGb2lhAVdHzH6R/Evvg==", + "engines": { + "node": ">=8" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" + }, + "node_modules/qface": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/qface/-/qface-1.4.1.tgz", + "integrity": "sha512-52qX9qdiDFd53xnYAFitkXVldcSddd4ZQiFTV2IluM+2HdDiJph3CKtmPi7CTCA9QF7K2d2WUAH3E2Y4P6fEjQ==" + }, + "node_modules/qs": { + "version": "6.11.1", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.1.tgz", + "integrity": "sha512-0wsrzgTz/kAVIeuxSjnpGC56rzYtr6JT/2BwEvMaPhFIoYa1aGO8LbzuU1R0uUYQkLpWBTOj0l/CLAJB64J6nQ==", + "dependencies": { + "side-channel": "^1.0.4" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/raw-body": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", + "dependencies": { + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/raw-body/node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/raw-body/node_modules/http-errors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", + "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "dependencies": { + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/raw-body/node_modules/statuses": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", + "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/readable-web-to-node-stream": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/readable-web-to-node-stream/-/readable-web-to-node-stream-3.0.2.tgz", + "integrity": "sha512-ePeK6cc1EcKLEhJFt/AebMCLL+GgSKhuygrZ/GLaKZYEecIgIECf4UaUuaByiGtzckwR4ain9VzUh95T1exYGw==", + "dependencies": { + "readable-stream": "^3.6.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/reggol": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/reggol/-/reggol-1.3.5.tgz", + "integrity": "sha512-kzkzs4nhZeiphyh+amekq25/3PndZDq+5Yt8qCJqPSyMXPC1pkwhfYCQyJdXxoRz3/uqt0+VqHulagUCVY84vA==", + "dependencies": { + "cosmokit": "^1.4.0", + "object-inspect": "^1.12.2", + "supports-color": "^8.1.1" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "node_modules/schemastery": { + "version": "3.7.2", + "resolved": "https://registry.npmjs.org/schemastery/-/schemastery-3.7.2.tgz", + "integrity": "sha512-CiDDwMQeNq7eusS4TSuo5bMfi/RORPXbBNMpd1s8a5I2ukD5NOpExNKfgHJSK6Sy4PcjY35luIw7bVo84dkPHw==", + "dependencies": { + "cosmokit": "^1.4.1" + } + }, + "node_modules/semver": { + "version": "7.4.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.4.0.tgz", + "integrity": "sha512-RgOxM8Mw+7Zus0+zcLEUn8+JfoLpj/huFTItQy2hsM4khuC1HYRDp0cU482Ewn/Fcy6bCjufD8vAj7voC66KQw==", + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "engines": { + "node": ">=8" + } + }, + "node_modules/side-channel": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", + "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "dependencies": { + "call-bind": "^1.0.0", + "get-intrinsic": "^1.0.2", + "object-inspect": "^1.9.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==" + }, + "node_modules/smart-buffer": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz", + "integrity": "sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==", + "engines": { + "node": ">= 6.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/socks": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/socks/-/socks-2.7.1.tgz", + "integrity": "sha512-7maUZy1N7uo6+WVEX6psASxtNlKaNVMlGQKkG/63nEDdLOWNbiUMoLK7X4uYoLhQstau72mLgfEWcXcwsaHbYQ==", + "dependencies": { + "ip": "^2.0.0", + "smart-buffer": "^4.2.0" + }, + "engines": { + "node": ">= 10.13.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/socks-proxy-agent": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-5.0.1.tgz", + "integrity": "sha512-vZdmnjb9a2Tz6WEQVIurybSwElwPxMZaIc7PzqbJTrezcKNznv6giT7J7tZDZ1BojVaa1jvO/UiUdhDVB0ACoQ==", + "dependencies": { + "agent-base": "^6.0.2", + "debug": "4", + "socks": "^2.3.3" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/statuses": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", + "integrity": "sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/strtok3": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/strtok3/-/strtok3-6.3.0.tgz", + "integrity": "sha512-fZtbhtvI9I48xDSywd/somNqgUHl2L2cstmXCCif0itOf96jeW18MBSyrLuNicYQVkvpOxkZtkzujiTJ9LW5Jw==", + "dependencies": { + "@tokenizer/token": "^0.3.0", + "peek-readable": "^4.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/throttle-debounce": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/throttle-debounce/-/throttle-debounce-3.0.1.tgz", + "integrity": "sha512-dTEWWNu6JmeVXY0ZYoPuH5cRIwc0MeGbJwah9KUNYSJwommQpCzTySTpEe8Gs1J23aeWEuAobe4Ag7EHVt/LOg==", + "engines": { + "node": ">=10" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/token-types": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/token-types/-/token-types-4.2.1.tgz", + "integrity": "sha512-6udB24Q737UD/SDsKAHI9FCRP7Bqc9D/MQUV02ORQg5iskjtLJlZJNdN4kKtcdtwCeWIwIHDGaUsTsCCAa8sFQ==", + "dependencies": { + "@tokenizer/token": "^0.3.0", + "ieee754": "^1.2.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Borewit" + } + }, + "node_modules/tsscmp": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/tsscmp/-/tsscmp-1.0.6.tgz", + "integrity": "sha512-LxhtAkPDTkVCMQjt2h6eBVY28KCjikZqZfMcC15YBeNjkgUpdCfBu5HoiOTDu86v6smE8yOjyEktJ8hlbANHQA==", + "engines": { + "node": ">=0.6.x" + } + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, + "node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/which-pm-runs": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/which-pm-runs/-/which-pm-runs-1.1.0.tgz", + "integrity": "sha512-n1brCuqClxfFfq/Rb0ICg9giSZqCS+pLtccdag6C2HyufBrh3fBOiy9nb6ggRMvWOVH5GrdJskj5iGTZNxd7SA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/ws": { + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.13.0.tgz", + "integrity": "sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA==", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + }, + "node_modules/ylru": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/ylru/-/ylru-1.3.2.tgz", + "integrity": "sha512-RXRJzMiK6U2ye0BlGGZnmpwJDPgakn6aNQ0A7gHRbD4I0uvK4TW6UqkK1V0pp9jskjJBAXd3dRrbzWkqJ+6cxA==", + "engines": { + "node": ">= 4.0.0" + } + } + } +} diff --git a/glm-bot/package.json b/glm-bot/package.json new file mode 100644 index 0000000..de6be80 --- /dev/null +++ b/glm-bot/package.json @@ -0,0 +1,20 @@ +{ + "name": "glm-bot", + "version": "1.0.0", + "description": "glm-koishi机器人", + "main": "index.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "author": "", + "license": "MIT", + "dependencies": { + "@koishijs/plugin-adapter-onebot": "^5.6.6", + "@koishijs/plugin-console": "^5.6.1", + "@koishijs/plugin-echo": "^2.2.3", + "@koishijs/plugin-market": "^1.12.5", + "@koishijs/plugin-sandbox": "^3.0.0", + "koishi": "^4.12.4", + "koishi-plugin-glm-bot": "^1.1.2" + } +} From 7607cfe5858292d06cb360f10f19f74eb7e8f73a Mon Sep 17 00:00:00 2001 From: duzx16 Date: Wed, 12 Apr 2023 23:41:37 +0800 Subject: [PATCH 096/110] Fix turn_idx in eval --- ptuning/main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ptuning/main.py b/ptuning/main.py index b027e9e..6328eac 100644 --- a/ptuning/main.py +++ b/ptuning/main.py @@ -166,8 +166,8 @@ def main(): else: prompt = "" history = examples[history_column][i] - for i, (old_query, response) in enumerate(history): - prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response) + for turn_idx, (old_query, response) in enumerate(history): + prompt += "[Round {}]\n问:{}\n答:{}\n".format(turn_idx, old_query, response) prompt += "[Round {}]\n问:{}\n答:".format(len(history), query) inputs.append(prompt) targets.append(examples[response_column][i]) From 70e015654c56b20cdce2f6637dae75ccfea0a7b8 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Wed, 12 Apr 2023 23:54:24 +0800 Subject: [PATCH 097/110] Remove qe model --- README.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/README.md b/README.md index 5c25575..baf7d97 100644 --- a/README.md +++ b/README.md @@ -150,11 +150,6 @@ model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).qu model = AutoModel.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True).half().cuda() ``` -我们进一步提供了对Embedding量化后的模型,模型参数仅占用4.3 GB显存: -```python -model = AutoModel.from_pretrained("THUDM/chatglm-6b-int4-qe", trust_remote_code=True).half().cuda() -``` - ### CPU 部署 如果你没有 GPU 硬件的话,也可以在 CPU 上进行推理,但是推理速度会更慢。使用方法如下(需要大概 32GB 内存) ```python From 7410cacd911dd73c94b8e68932ffc50ea4f6acc3 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Wed, 12 Apr 2023 23:54:55 +0800 Subject: [PATCH 098/110] Remove qe model --- README_en.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/README_en.md b/README_en.md index 1a56c39..632a22a 100644 --- a/README_en.md +++ b/README_en.md @@ -140,11 +140,6 @@ Model quantization brings a certain performance decline. After testing, ChatGLM- model = AutoModel.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True).half().cuda() ``` -**[2023/03/24]** We further provide an embedding-quantized model whose model parameters only cost 4.3GB GPU memory -```python -model = AutoModel.from_pretrained("THUDM/chatglm-6b-int4-qe", trust_remote_code=True).half().cuda() -``` - ### CPU Deployment If your computer is not equipped with GPU, you can also conduct inference on CPU, but the inference speed is slow (and taking about 32GB of memory): From 1c2184b8e7f480333b8b0eeac29c0a3cc2143e53 Mon Sep 17 00:00:00 2001 From: initialencounter <2911583893@qq.com> Date: Thu, 13 Apr 2023 00:38:55 +0800 Subject: [PATCH 099/110] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E5=8F=8B=E6=83=85?= =?UTF-8?q?=E9=93=BE=E6=8E=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- PROJECT.md | 1 + glm-bot/.gitignore | 14 - glm-bot/README.md | 76 -- glm-bot/fastapi.py | 82 -- glm-bot/flask.py | 48 - glm-bot/glm-bot/package.json | 33 - glm-bot/glm-bot/readme.md | 5 - glm-bot/index.ts | 38 - glm-bot/package-lock.json | 1749 ---------------------------------- glm-bot/package.json | 20 - 10 files changed, 1 insertion(+), 2065 deletions(-) delete mode 100644 glm-bot/.gitignore delete mode 100644 glm-bot/README.md delete mode 100644 glm-bot/fastapi.py delete mode 100644 glm-bot/flask.py delete mode 100644 glm-bot/glm-bot/package.json delete mode 100644 glm-bot/glm-bot/readme.md delete mode 100644 glm-bot/index.ts delete mode 100644 glm-bot/package-lock.json delete mode 100644 glm-bot/package.json diff --git a/PROJECT.md b/PROJECT.md index 59f34ff..ce9a91c 100644 --- a/PROJECT.md +++ b/PROJECT.md @@ -11,6 +11,7 @@ * [ChatGLM-Finetuning](https://github.com/liucongg/ChatGLM-Finetuning):基于ChatGLM-6B模型,进行下游具体任务微调,涉及Freeze、Lora、P-tuning等,并进行实验效果对比。 * [InstructGLM](https://github.com/yanqiangmiffy/InstructGLM):基于ChatGLM-6B进行指令学习,汇总开源中英文指令数据,基于Lora进行指令数据微调,开放了Alpaca、Belle微调后的Lora权重,修复web_demo重复问题 * [ChatGLM-web](https://github.com/NCZkevin/chatglm-web):基于FastAPI和Vue3搭建的ChatGLM演示网站(支持chatglm流式输出、前端调整模型参数、上下文选择、保存图片、知识库问答等功能) +* [glm-bot](https://github.com/initialencounter/glm-bot):将ChatGLM接入Koishi可在各大聊天平台上调用ChatGLM 以下是部分针对本项目的教程/文档: * [Windows部署文档](https://github.com/ZhangErling/ChatGLM-6B/blob/main/deployment_windows.md) diff --git a/glm-bot/.gitignore b/glm-bot/.gitignore deleted file mode 100644 index 72df230..0000000 --- a/glm-bot/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -node_modules -npm-debug.log -yarn-debug.log -yarn-error.log -tsconfig.tsbuildinfo - -.eslintcache -.DS_Store -.idea -.vscode -*.suo -*.ntvs* -*.njsproj -*.sln diff --git a/glm-bot/README.md b/glm-bot/README.md deleted file mode 100644 index d9817f6..0000000 --- a/glm-bot/README.md +++ /dev/null @@ -1,76 +0,0 @@ -# glm-bot - -基于koishi框架的qq聊天机器人 - - -## 环境依赖 - -* nodejs14以上版本 -* gocqhttp - -## 使用方法 -* 1.启动接口 -``` -python fastapi.py -``` - -如果启动的是flask.py -则需要在index.ts文件中将 -``` -// 启用glm-bot -ctx.plugin(glm_bot,{ - type: 'fastapi', - myServerUrl: 'http://wx.blockelite.cn:10269/chatglm', - publicUrl: 'http://127.0.0.1:10269/chat', - send_glmmtg_response: true, - prefix: '', - defaultText: '', - output: 'quote' -}) -``` - -修改为 -``` -// 启用glm-bot -ctx.plugin(glm_bot,{ - type: 'flaskapi', - myServerUrl: 'http://wx.blockelite.cn:10269/chatglm', - publicUrl: 'http://127.0.0.1:10269/chat', - send_glmmtg_response: true, - prefix: '', - defaultText: '', - output: 'quote' -}) -``` - -* 2.启动[go-cqhttp](https://github.com/Mrs4s/go-cqhttp)并开启正向ws服务 - -* 2-1配置onebot -将index.ts中的 -``` -endpoint: 'ws://127.0.0.1:32333' -``` -修改为go-cqhttp的正向ws服务地址 - -* 3.安装[koishi](https://koishi.chat)依赖 - -``` -cd glm-bot && npm i -``` - - -* 4.启动机器人 -``` -node -r esbuild-register . -``` - -## 感谢 -* [koishi](https://koishi.chat) - - -* [go-cqhttp](https://github.com/Mrs4s/go-cqhttp) - - -* [glm-bot](https://github.com/wochenlong/glm-bot) - -* [t4wefan](https://github.com/t4wefan/ChatGLM-6B-with-flask-api) \ No newline at end of file diff --git a/glm-bot/fastapi.py b/glm-bot/fastapi.py deleted file mode 100644 index 54163fe..0000000 --- a/glm-bot/fastapi.py +++ /dev/null @@ -1,82 +0,0 @@ -from fastapi import FastAPI -from pydantic import BaseModel -import uvicorn -import json -from transformers import AutoModel, AutoTokenizer -from typing import List,Tuple - - -max_length = 4096 -# 根据id获取上下文信息 -def get_history(id: str) -> List[Tuple[str,str]] or None: - if id in sessions.keys(): - length = len(json.dumps(sessions[id],indent=2)) - if length>max_length: - sessions[id] = [] - return None - if sessions[id] == []: - return None - return sessions[id] - else: - sessions[id] = [] - return None - -# 根据id清空上下文 - - -def clear(id: str) -> str: - sessions[id] = [] - return '已重置' - - - - -tokenizer = AutoTokenizer.from_pretrained( - "THUDM/chatglm-6b", trust_remote_code=True) -model = AutoModel.from_pretrained( - "THUDM/chatglm-6b", trust_remote_code=True).half().cuda() -model = model.eval() - -MAX_TURNS = 20 -MAX_BOXES = MAX_TURNS * 2 - -sessions = {} - - -def predict(prompt: str, uid: str, max_length: int = 2048, top_p: float = 0.7, temperature: float = 0.95) -> str: - history = get_history(uid) - print(history) - response, history = model.chat(tokenizer, prompt, history=history, max_length=max_length, top_p=top_p, - temperature=temperature) - sessions[uid].append((prompt, response)) - print(get_history(uid)) - return response - -# while 1: -# uid = input("uid:") -# prompt = input('msg:') -# msg = predict(prompt=prompt,uid = uid) -# print(msg) - - -app = FastAPI() - - -class Item_chat(BaseModel): - msg: str - uid: str -@app.post("/chat") -def chat(item:Item_chat): - msg = predict(prompt=item.msg, uid=item.uid) - print(msg) - return msg - - -class Item_claer(BaseModel): - uid: str -@app.post("/clear") -def clear_session(item:Item_claer): - return clear(item.uid) - - -uvicorn.run(app, host="0.0.0.0", port=10269) diff --git a/glm-bot/flask.py b/glm-bot/flask.py deleted file mode 100644 index a5fc6a0..0000000 --- a/glm-bot/flask.py +++ /dev/null @@ -1,48 +0,0 @@ -import os -import platform -from transformers import AutoTokenizer, AutoModel - -from flask import Flask, request - -tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) -model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() -def prepare_model(): - global model - model = model.eval() - -prepare_model() -model = model.eval() -preset = [] -port = 7860 -os_name = platform.system() -app = Flask(__name__) -history = {} -@app.route('/chatglm', methods=["GET"]) -def delete_msg(): - global history - query = request.args.get('msg') - usrid = request.args.get('usrid') - source = request.args.get('source') - if query == None: - return '请提供内容' - if query == 'ping': - return 'pong!服务端运行正常!' - if source == None: - return '无来源的请求,请更新插件' - if usrid == None: - return '请提供用户id' - if not usrid in history: - history[usrid] = preset - print(f"usrid:{usrid},content:{query}") - if query == "clear": - history[usrid] = preset - - print(f"usrid:{usrid},清空历史") - return '已重置当前对话' - response, history[usrid] = model.chat(tokenizer, query, history=history[usrid]) - print(f"ChatGLM-6B:{response}") - return response - -if __name__ == '__main__': - print(f"欢迎使用 ChatGLM-6B API,可通过发送GET请求到http://127.0.0.1:{port}/chatglm来调用。") - app.run(host='0.0.0.0', port=port) diff --git a/glm-bot/glm-bot/package.json b/glm-bot/glm-bot/package.json deleted file mode 100644 index 6f5f39f..0000000 --- a/glm-bot/glm-bot/package.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "name": "koishi-plugin-glm-bot", - "description": "glm-testbot的稳定版,支持自建后端", - "version": "1.0.3", - "main": "lib/index.js", - "typings": "lib/index.d.ts", - "files": [ - "lib", - "dist" - ], - "license": "MIT", - "scripts": {}, - "keywords": [ - "chatbot", - "koishi", - "plugin" - ], - "peerDependencies": { - "koishi": "4.12.0" - }, - "koishi": { - "description": "glm插件", - "browser": true, - "service": { - "optional": [ - "puppeteer" - ], - "implements": [ - "glm" - ] - } - } -} diff --git a/glm-bot/glm-bot/readme.md b/glm-bot/glm-bot/readme.md deleted file mode 100644 index 82934e8..0000000 --- a/glm-bot/glm-bot/readme.md +++ /dev/null @@ -1,5 +0,0 @@ -# koishi-plugin-glm-bot - -[![npm](https://img.shields.io/npm/v/koishi-plugin-glm-bot?style=flat-square)](https://www.npmjs.com/package/koishi-plugin-glm-bot) - -glm-bot的稳定版 diff --git a/glm-bot/index.ts b/glm-bot/index.ts deleted file mode 100644 index 0f5bf63..0000000 --- a/glm-bot/index.ts +++ /dev/null @@ -1,38 +0,0 @@ -import { Context } from 'koishi' -import console from '@koishijs/plugin-console' -import * as sandbox from '@koishijs/plugin-sandbox' -import * as echo from '@koishijs/plugin-echo' - -import onebot from '@koishijs/plugin-adapter-onebot' - -import glm_bot from './glm-bot' - -// 创建一个 Koishi 应用 -const ctx = new Context({ - port: 5140, -}) -// 使用 OneBot 适配器的机器人 -ctx.plugin(onebot, { - protocol: 'ws', - selfId: '3111720341', - endpoint: 'ws://127.0.0.1:32333', - }) - -// 启用上述插件 -ctx.plugin(console) // 提供控制台 -ctx.plugin(sandbox) // 提供调试沙盒 -ctx.plugin(echo) // 提供回声指令 - -// 启用glm-bot -ctx.plugin(glm_bot,{ - type: 'fastapi', - myServerUrl: 'http://wx.blockelite.cn:10269/chatglm', - publicUrl: 'http://127.0.0.1:10269/chat', - send_glmmtg_response: true, - prefix: '', - defaultText: '', - output: 'quote' -}) - -// 启动应用 -ctx.start() \ No newline at end of file diff --git a/glm-bot/package-lock.json b/glm-bot/package-lock.json deleted file mode 100644 index 57d69f8..0000000 --- a/glm-bot/package-lock.json +++ /dev/null @@ -1,1749 +0,0 @@ -{ - "name": "glm-bot", - "version": "1.0.0", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "glm-bot", - "version": "1.0.0", - "license": "MIT", - "dependencies": { - "@koishijs/plugin-adapter-onebot": "^5.6.6", - "@koishijs/plugin-console": "^5.6.1", - "@koishijs/plugin-echo": "^2.2.3", - "@koishijs/plugin-market": "^1.12.5", - "@koishijs/plugin-sandbox": "^3.0.0", - "koishi": "^4.12.4", - "koishi-plugin-glm-bot": "^1.1.2" - } - }, - "node_modules/@koa/router": { - "version": "10.1.1", - "resolved": "https://registry.npmjs.org/@koa/router/-/router-10.1.1.tgz", - "integrity": "sha512-ORNjq5z4EmQPriKbR0ER3k4Gh7YGNhWDL7JBW+8wXDrHLbWYKYSJaOJ9aN06npF5tbTxe2JBOsurpJDAvjiXKw==", - "dependencies": { - "debug": "^4.1.1", - "http-errors": "^1.7.3", - "koa-compose": "^4.1.0", - "methods": "^1.1.2", - "path-to-regexp": "^6.1.0" - }, - "engines": { - "node": ">= 8.0.0" - } - }, - "node_modules/@koishijs/core": { - "version": "4.12.4", - "resolved": "https://registry.npmjs.org/@koishijs/core/-/core-4.12.4.tgz", - "integrity": "sha512-+KTCvqr1M3cZnW8HOF359CUI4JVAEzW8YDexCRWflMlCDHtgzQSgBXIcujHhVbn1uVLrhowhspNvNI9kI+LIgg==", - "dependencies": { - "@koishijs/utils": "^7.0.1", - "@minatojs/core": "^2.3.1", - "@satorijs/core": "^2.3.1", - "cordis": "^2.7.4", - "cosmokit": "^1.4.1", - "fastest-levenshtein": "^1.0.16" - } - }, - "node_modules/@koishijs/loader": { - "version": "3.1.4", - "resolved": "https://registry.npmjs.org/@koishijs/loader/-/loader-3.1.4.tgz", - "integrity": "sha512-jhcMaKAHf/9gpsd9gZEBZIws888YOcGqyR6iMfL6r4+66pXbERFQx/WlZTVuA0x4UTtvJtfvQI6n1aviNUA7AA==", - "dependencies": { - "dotenv": "^16.0.3", - "js-yaml": "^4.1.0", - "ns-require": "^1.1.4" - }, - "peerDependencies": { - "@koishijs/core": "4.12.4" - } - }, - "node_modules/@koishijs/plugin-adapter-onebot": { - "version": "5.6.6", - "resolved": "https://registry.npmjs.org/@koishijs/plugin-adapter-onebot/-/plugin-adapter-onebot-5.6.6.tgz", - "integrity": "sha512-tbMvrEmL32k4j7AUcx97VRVcXypkUUo8zPJvdITnv3v49/bLT1qafKgrjw+E3rkb61MKFHzBJvNN8aPciG+sXg==", - "dependencies": { - "@satorijs/adapter-onebot": "^5.6.6" - }, - "peerDependencies": { - "koishi": "^4.12.4" - } - }, - "node_modules/@koishijs/plugin-console": { - "version": "5.6.1", - "resolved": "https://registry.npmjs.org/@koishijs/plugin-console/-/plugin-console-5.6.1.tgz", - "integrity": "sha512-Vk8JY9wgtLqjftrbeQXwPbbR5y9i8b9Ob38MO9yjSrliNMxUKZZxNjy2CaWUL6gMgqMtrR22C4TgpyswZwquNg==", - "dependencies": { - "open": "^8.4.2", - "uuid": "^8.3.2", - "ws": "^8.13.0" - }, - "peerDependencies": { - "koishi": "^4.12.3" - } - }, - "node_modules/@koishijs/plugin-echo": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/@koishijs/plugin-echo/-/plugin-echo-2.2.3.tgz", - "integrity": "sha512-wt2Ax4pWjl9D21yW2V72YIxVQfOXsU1DizaQ8Qzop7ZxnD2AwQBgvD30vqA3bpJ0aUnsa+LsRwJYawsY+GCV7w==", - "peerDependencies": { - "koishi": "^4.12.3" - } - }, - "node_modules/@koishijs/plugin-market": { - "version": "1.12.5", - "resolved": "https://registry.npmjs.org/@koishijs/plugin-market/-/plugin-market-1.12.5.tgz", - "integrity": "sha512-sc9m7GeQ6eolbE+wF7eLoD3RkUXQRLpVoKS65hfNuKhmDGqVeSkTHB9gEwtyMqX4P8y9uhuYquVskwy/dipF5Q==", - "dependencies": { - "@koishijs/registry": "^4.6.0", - "execa": "^5.1.1", - "get-registry": "^1.1.0", - "ns-require": "^1.1.4", - "semver": "^7.3.8", - "throttle-debounce": "^3.0.1", - "which-pm-runs": "^1.1.0" - }, - "peerDependencies": { - "@koishijs/plugin-console": "^5.5.7", - "koishi": "^4.12.3" - } - }, - "node_modules/@koishijs/plugin-sandbox": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@koishijs/plugin-sandbox/-/plugin-sandbox-3.0.0.tgz", - "integrity": "sha512-4ZcsIEuCTHkD7XigQaCAj2BX5ZiWqHyeIx8mFgXIh3oB+1CEaidmmukFbpqnbk04Tc9sOaqANGSEiaxUwHrfRQ==", - "peerDependencies": { - "@koishijs/plugin-console": "^5.6.1", - "koishi": "^4.12.3" - } - }, - "node_modules/@koishijs/registry": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/@koishijs/registry/-/registry-4.6.0.tgz", - "integrity": "sha512-Sn2uTjT0qm18rxec1D66d0xU6YjpOAXTmmDytpz7aoguRS8WdZaAtK4jtSMKo2AXmowM3sQMM4LvYkZsqkhrTw==", - "dependencies": { - "cosmokit": "^1.4.0", - "p-map": "^4.0.0", - "semver": "^7.3.8" - } - }, - "node_modules/@koishijs/utils": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/@koishijs/utils/-/utils-7.0.1.tgz", - "integrity": "sha512-in0vSfgmsjURf8aw+Wagve5thVXGpZRawYZiN9/w0AElPZ0Y/nQtGDvI0UMRcjubizb/mZgO7j0FcdYMm24dqg==", - "dependencies": { - "cosmokit": "^1.4.1", - "inaba": "^1.1.1" - } - }, - "node_modules/@minatojs/core": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/@minatojs/core/-/core-2.3.1.tgz", - "integrity": "sha512-3q37agKnVq9QHrJozw3ACk/Gt+i1xDY/o7363kFJmjQSxQfaQg5dHT0NqjwVxe4AyqYpTWh3487IVyEAAMe9kw==", - "dependencies": { - "cosmokit": "^1.4.1" - } - }, - "node_modules/@satorijs/adapter-onebot": { - "version": "5.6.6", - "resolved": "https://registry.npmjs.org/@satorijs/adapter-onebot/-/adapter-onebot-5.6.6.tgz", - "integrity": "sha512-Plb4fhXvWMOTCj4OVTX2pDF4MKszh3Mq5lGauGI+/eby2m/hW22TFLe6K4Mnpa8QupSoqpGEgB2cofk3bDUJxg==", - "dependencies": { - "qface": "^1.4.0" - }, - "peerDependencies": { - "@satorijs/satori": "^2.3.1" - } - }, - "node_modules/@satorijs/core": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/@satorijs/core/-/core-2.3.1.tgz", - "integrity": "sha512-IAEPN2GW5m6ZEQIRA1uZlQIiq5VHQFvY+rMbZGjdtbX3bioIhtIBtwUseilUW/BtSl3XrjDgOVRuziGxQeyXwg==", - "dependencies": { - "@satorijs/element": "^2.3.6", - "cordis": "^2.7.4", - "cordis-axios": "^3.1.1", - "cosmokit": "^1.4.1", - "reggol": "^1.3.5", - "schemastery": "^3.7.2", - "ws": "^8.13.0" - } - }, - "node_modules/@satorijs/element": { - "version": "2.3.6", - "resolved": "https://registry.npmjs.org/@satorijs/element/-/element-2.3.6.tgz", - "integrity": "sha512-xys5HsclvKvQ3ysO88c85/9F1xdDIpsVOQ6tlPCeL/HnTKtc4wWzAp8Tu4YlIlBujzsvMkRohVrr89u1Ck0+sA==", - "dependencies": { - "cosmokit": "^1.4.1" - } - }, - "node_modules/@satorijs/satori": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/@satorijs/satori/-/satori-2.3.1.tgz", - "integrity": "sha512-wsToUdPRzCWgqZcFHeeaA8uyvqPCVSSsVWfNPP+hY9pEJoB/Pe3vREYHBemzy0UJ3o29S08z05pt81AbeJxOTQ==", - "dependencies": { - "@koa/router": "^10.1.1", - "@satorijs/core": "2.3.1", - "@types/koa": "*", - "@types/koa__router": "*", - "@types/ws": "^8.5.4", - "file-type": "^16.5.4", - "http-proxy-agent": "^5.0.0", - "https-proxy-agent": "^5.0.1", - "koa": "^2.14.1", - "koa-bodyparser": "^4.4.0", - "parseurl": "^1.3.3", - "path-to-regexp": "^6.2.1", - "socks-proxy-agent": "^5.0.1", - "ws": "^8.13.0" - }, - "engines": { - "node": ">=12.0.0" - } - }, - "node_modules/@tokenizer/token": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/@tokenizer/token/-/token-0.3.0.tgz", - "integrity": "sha512-OvjF+z51L3ov0OyAU0duzsYuvO01PH7x4t6DJx+guahgTnBHkhJdG7soQeTSFLWN3efnHyibZ4Z8l2EuWwJN3A==" - }, - "node_modules/@tootallnate/once": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz", - "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", - "engines": { - "node": ">= 10" - } - }, - "node_modules/@types/accepts": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/@types/accepts/-/accepts-1.3.5.tgz", - "integrity": "sha512-jOdnI/3qTpHABjM5cx1Hc0sKsPoYCp+DP/GJRGtDlPd7fiV9oXGGIcjW/ZOxLIvjGz8MA+uMZI9metHlgqbgwQ==", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/body-parser": { - "version": "1.19.2", - "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.2.tgz", - "integrity": "sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==", - "dependencies": { - "@types/connect": "*", - "@types/node": "*" - } - }, - "node_modules/@types/connect": { - "version": "3.4.35", - "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.35.tgz", - "integrity": "sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/content-disposition": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/@types/content-disposition/-/content-disposition-0.5.5.tgz", - "integrity": "sha512-v6LCdKfK6BwcqMo+wYW05rLS12S0ZO0Fl4w1h4aaZMD7bqT3gVUns6FvLJKGZHQmYn3SX55JWGpziwJRwVgutA==" - }, - "node_modules/@types/cookies": { - "version": "0.7.7", - "resolved": "https://registry.npmjs.org/@types/cookies/-/cookies-0.7.7.tgz", - "integrity": "sha512-h7BcvPUogWbKCzBR2lY4oqaZbO3jXZksexYJVFvkrFeLgbZjQkU4x8pRq6eg2MHXQhY0McQdqmmsxRWlVAHooA==", - "dependencies": { - "@types/connect": "*", - "@types/express": "*", - "@types/keygrip": "*", - "@types/node": "*" - } - }, - "node_modules/@types/express": { - "version": "4.17.17", - "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.17.tgz", - "integrity": "sha512-Q4FmmuLGBG58btUnfS1c1r/NQdlp3DMfGDGig8WhfpA2YRUtEkxAjkZb0yvplJGYdF1fsQ81iMDcH24sSCNC/Q==", - "dependencies": { - "@types/body-parser": "*", - "@types/express-serve-static-core": "^4.17.33", - "@types/qs": "*", - "@types/serve-static": "*" - } - }, - "node_modules/@types/express-serve-static-core": { - "version": "4.17.33", - "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.33.tgz", - "integrity": "sha512-TPBqmR/HRYI3eC2E5hmiivIzv+bidAfXofM+sbonAGvyDhySGw9/PQZFt2BLOrjUUR++4eJVpx6KnLQK1Fk9tA==", - "dependencies": { - "@types/node": "*", - "@types/qs": "*", - "@types/range-parser": "*" - } - }, - "node_modules/@types/http-assert": { - "version": "1.5.3", - "resolved": "https://registry.npmjs.org/@types/http-assert/-/http-assert-1.5.3.tgz", - "integrity": "sha512-FyAOrDuQmBi8/or3ns4rwPno7/9tJTijVW6aQQjK02+kOQ8zmoNg2XJtAuQhvQcy1ASJq38wirX5//9J1EqoUA==" - }, - "node_modules/@types/http-errors": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.1.tgz", - "integrity": "sha512-/K3ds8TRAfBvi5vfjuz8y6+GiAYBZ0x4tXv1Av6CWBWn0IlADc+ZX9pMq7oU0fNQPnBwIZl3rmeLp6SBApbxSQ==" - }, - "node_modules/@types/keygrip": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@types/keygrip/-/keygrip-1.0.2.tgz", - "integrity": "sha512-GJhpTepz2udxGexqos8wgaBx4I/zWIDPh/KOGEwAqtuGDkOUJu5eFvwmdBX4AmB8Odsr+9pHCQqiAqDL/yKMKw==" - }, - "node_modules/@types/koa": { - "version": "2.13.6", - "resolved": "https://registry.npmjs.org/@types/koa/-/koa-2.13.6.tgz", - "integrity": "sha512-diYUfp/GqfWBAiwxHtYJ/FQYIXhlEhlyaU7lB/bWQrx4Il9lCET5UwpFy3StOAohfsxxvEQ11qIJgT1j2tfBvw==", - "dependencies": { - "@types/accepts": "*", - "@types/content-disposition": "*", - "@types/cookies": "*", - "@types/http-assert": "*", - "@types/http-errors": "*", - "@types/keygrip": "*", - "@types/koa-compose": "*", - "@types/node": "*" - } - }, - "node_modules/@types/koa__router": { - "version": "12.0.0", - "resolved": "https://registry.npmjs.org/@types/koa__router/-/koa__router-12.0.0.tgz", - "integrity": "sha512-S6eHyZyoWCZLNHyy8j0sMW85cPrpByCbGGU2/BO4IzGiI87aHJ92lZh4E9xfsM9DcbCT469/OIqyC0sSJXSIBQ==", - "dependencies": { - "@types/koa": "*" - } - }, - "node_modules/@types/koa-compose": { - "version": "3.2.5", - "resolved": "https://registry.npmjs.org/@types/koa-compose/-/koa-compose-3.2.5.tgz", - "integrity": "sha512-B8nG/OoE1ORZqCkBVsup/AKcvjdgoHnfi4pZMn5UwAPCbhk/96xyv284eBYW8JlQbQ7zDmnpFr68I/40mFoIBQ==", - "dependencies": { - "@types/koa": "*" - } - }, - "node_modules/@types/mime": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@types/mime/-/mime-3.0.1.tgz", - "integrity": "sha512-Y4XFY5VJAuw0FgAqPNd6NNoV44jbq9Bz2L7Rh/J6jLTiHBSBJa9fxqQIvkIld4GsoDOcCbvzOUAbLPsSKKg+uA==" - }, - "node_modules/@types/node": { - "version": "18.15.11", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.15.11.tgz", - "integrity": "sha512-E5Kwq2n4SbMzQOn6wnmBjuK9ouqlURrcZDVfbo9ftDDTFt3nk7ZKK4GMOzoYgnpQJKcxwQw+lGaBvvlMo0qN/Q==" - }, - "node_modules/@types/qs": { - "version": "6.9.7", - "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.7.tgz", - "integrity": "sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw==" - }, - "node_modules/@types/range-parser": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.4.tgz", - "integrity": "sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw==" - }, - "node_modules/@types/serve-static": { - "version": "1.15.1", - "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.1.tgz", - "integrity": "sha512-NUo5XNiAdULrJENtJXZZ3fHtfMolzZwczzBbnAeBbqBwG+LaG6YaJtuwzwGSQZ2wsCrxjEhNNjAkKigy3n8teQ==", - "dependencies": { - "@types/mime": "*", - "@types/node": "*" - } - }, - "node_modules/@types/ws": { - "version": "8.5.4", - "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.4.tgz", - "integrity": "sha512-zdQDHKUgcX/zBc4GrwsE/7dVdAD8JR4EuiAXiiUhhfyIJXXb2+PrGshFyeXWQPMmmZ2XxgaqclgpIC7eTXc1mg==", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/accepts": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", - "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", - "dependencies": { - "mime-types": "~2.1.34", - "negotiator": "0.6.3" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/agent-base": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", - "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", - "dependencies": { - "debug": "4" - }, - "engines": { - "node": ">= 6.0.0" - } - }, - "node_modules/aggregate-error": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", - "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", - "dependencies": { - "clean-stack": "^2.0.0", - "indent-string": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" - }, - "node_modules/asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" - }, - "node_modules/axios": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.1.3.tgz", - "integrity": "sha512-00tXVRwKx/FZr/IDVFt4C+f9FYairX517WoGCL6dpOntqLkZofjhu43F/Xl44UOpqa+9sLFDrG/XAnFsUYgkDA==", - "dependencies": { - "follow-redirects": "^1.15.0", - "form-data": "^4.0.0", - "proxy-from-env": "^1.1.0" - } - }, - "node_modules/bytes": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", - "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/cac": { - "version": "6.7.14", - "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", - "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", - "engines": { - "node": ">=8" - } - }, - "node_modules/cache-content-type": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/cache-content-type/-/cache-content-type-1.0.1.tgz", - "integrity": "sha512-IKufZ1o4Ut42YUrZSo8+qnMTrFuKkvyoLXUywKz9GJ5BrhOFGhLdkx9sG4KAnVvbY6kEcSFjLQul+DVmBm2bgA==", - "dependencies": { - "mime-types": "^2.1.18", - "ylru": "^1.2.0" - }, - "engines": { - "node": ">= 6.0.0" - } - }, - "node_modules/call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", - "dependencies": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/clean-stack": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", - "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", - "engines": { - "node": ">=6" - } - }, - "node_modules/co": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", - "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", - "engines": { - "iojs": ">= 1.0.0", - "node": ">= 0.12.0" - } - }, - "node_modules/co-body": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/co-body/-/co-body-6.1.0.tgz", - "integrity": "sha512-m7pOT6CdLN7FuXUcpuz/8lfQ/L77x8SchHCF4G0RBTJO20Wzmhn5Sp4/5WsKy8OSpifBSUrmg83qEqaDHdyFuQ==", - "dependencies": { - "inflation": "^2.0.0", - "qs": "^6.5.2", - "raw-body": "^2.3.3", - "type-is": "^1.6.16" - } - }, - "node_modules/combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "dependencies": { - "delayed-stream": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/content-disposition": { - "version": "0.5.4", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", - "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", - "dependencies": { - "safe-buffer": "5.2.1" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/content-type": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", - "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/cookies": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/cookies/-/cookies-0.8.0.tgz", - "integrity": "sha512-8aPsApQfebXnuI+537McwYsDtjVxGm8gTIzQI3FDW6t5t/DAhERxtnbEPN/8RX+uZthoz4eCOgloXaE5cYyNow==", - "dependencies": { - "depd": "~2.0.0", - "keygrip": "~1.1.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/cookies/node_modules/depd": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", - "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/copy-to": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/copy-to/-/copy-to-2.0.1.tgz", - "integrity": "sha512-3DdaFaU/Zf1AnpLiFDeNCD4TOWe3Zl2RZaTzUvWiIk5ERzcCodOE20Vqq4fzCbNoHURFHT4/us/Lfq+S2zyY4w==" - }, - "node_modules/cordis": { - "version": "2.7.4", - "resolved": "https://registry.npmjs.org/cordis/-/cordis-2.7.4.tgz", - "integrity": "sha512-SNP9JAhSjVHtozfmvU8HvREjsVSWTsQLuyHBH8PVnLZDJExe7l3srqNWs7vkKCrKWCqw8JH1Zj7kDF1bCbpwzw==", - "dependencies": { - "cosmokit": "^1.4.1" - } - }, - "node_modules/cordis-axios": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/cordis-axios/-/cordis-axios-3.1.1.tgz", - "integrity": "sha512-zBdLVIfnp8jAerS24T4JY689tmYQl2WpOFMv0Y1g3j3BwwUO/ScveEhfR5Oix3J2JepxIUoA2q18nTBJj2RaFQ==", - "dependencies": { - "axios": "~1.1.3", - "cosmokit": "^1.4.1", - "mime-db": "^1.52.0" - }, - "engines": { - "node": ">=12.0.0" - }, - "peerDependencies": { - "cordis": "^2.7.4" - } - }, - "node_modules/cosmokit": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/cosmokit/-/cosmokit-1.4.1.tgz", - "integrity": "sha512-d3ZRpKFahJRvLbo1T4y0ELCudjk9AeDUsfgKm+iAti6yPCeoPLGNUGT4expTWsNkrSA1uk7CKhtBPiizFYvDgA==" - }, - "node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", - "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/deep-equal": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-1.0.1.tgz", - "integrity": "sha512-bHtC0iYvWhyaTzvV3CZgPeZQqCOBGyGsVV7v4eevpdkLHfiSrXUdBG+qAuSz4RI70sszvjQ1QSZ98An1yNwpSw==" - }, - "node_modules/define-lazy-prop": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", - "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", - "engines": { - "node": ">=8" - } - }, - "node_modules/delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/delegates": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", - "integrity": "sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==" - }, - "node_modules/depd": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", - "integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/destroy": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", - "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", - "engines": { - "node": ">= 0.8", - "npm": "1.2.8000 || >= 1.4.16" - } - }, - "node_modules/dotenv": { - "version": "16.0.3", - "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.0.3.tgz", - "integrity": "sha512-7GO6HghkA5fYG9TYnNxi14/7K9f5occMlp3zXAuSxn7CKCxt9xbNWG7yF8hTCSUchlfWSe3uLmlPfigevRItzQ==", - "engines": { - "node": ">=12" - } - }, - "node_modules/ee-first": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" - }, - "node_modules/encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/escape-html": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", - "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" - }, - "node_modules/execa": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", - "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", - "dependencies": { - "cross-spawn": "^7.0.3", - "get-stream": "^6.0.0", - "human-signals": "^2.1.0", - "is-stream": "^2.0.0", - "merge-stream": "^2.0.0", - "npm-run-path": "^4.0.1", - "onetime": "^5.1.2", - "signal-exit": "^3.0.3", - "strip-final-newline": "^2.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sindresorhus/execa?sponsor=1" - } - }, - "node_modules/fastest-levenshtein": { - "version": "1.0.16", - "resolved": "https://registry.npmjs.org/fastest-levenshtein/-/fastest-levenshtein-1.0.16.tgz", - "integrity": "sha512-eRnCtTTtGZFpQCwhJiUOuxPQWRXVKYDn0b2PeHfXL6/Zi53SLAzAHfVhVWK2AryC/WH05kGfxhFIPvTF0SXQzg==", - "engines": { - "node": ">= 4.9.1" - } - }, - "node_modules/file-type": { - "version": "16.5.4", - "resolved": "https://registry.npmjs.org/file-type/-/file-type-16.5.4.tgz", - "integrity": "sha512-/yFHK0aGjFEgDJjEKP0pWCplsPFPhwyfwevf/pVxiN0tmE4L9LmwWxWukdJSHdoCli4VgQLehjJtwQBnqmsKcw==", - "dependencies": { - "readable-web-to-node-stream": "^3.0.0", - "strtok3": "^6.2.4", - "token-types": "^4.1.1" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sindresorhus/file-type?sponsor=1" - } - }, - "node_modules/follow-redirects": { - "version": "1.15.2", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.2.tgz", - "integrity": "sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==", - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/RubenVerborgh" - } - ], - "engines": { - "node": ">=4.0" - }, - "peerDependenciesMeta": { - "debug": { - "optional": true - } - } - }, - "node_modules/form-data": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", - "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/fresh": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", - "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" - }, - "node_modules/get-intrinsic": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.0.tgz", - "integrity": "sha512-L049y6nFOuom5wGyRc3/gdTLO94dySVKRACj1RmJZBQXlbTMhtNIgkWkUHq+jYmZvKf14EW1EoJnnjbmoHij0Q==", - "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/get-registry": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/get-registry/-/get-registry-1.1.0.tgz", - "integrity": "sha512-D/0p/sxhBYQVVHJA7e8r47HSLeU7OTEfBxDR2v5AY9wU0l8esIsD6hbaAdVYRx8HVSQCgwgDHHxWT+rhoisnkQ==", - "dependencies": { - "which-pm-runs": "^1.1.0" - } - }, - "node_modules/get-stream": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", - "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "dependencies": { - "function-bind": "^1.1.1" - }, - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "engines": { - "node": ">=8" - } - }, - "node_modules/has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-tostringtag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", - "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==", - "dependencies": { - "has-symbols": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/http-assert": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/http-assert/-/http-assert-1.5.0.tgz", - "integrity": "sha512-uPpH7OKX4H25hBmU6G1jWNaqJGpTXxey+YOUizJUAgu0AjLUeC8D73hTrhvDS5D+GJN1DN1+hhc/eF/wpxtp0w==", - "dependencies": { - "deep-equal": "~1.0.1", - "http-errors": "~1.8.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/http-errors": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.8.1.tgz", - "integrity": "sha512-Kpk9Sm7NmI+RHhnj6OIWDI1d6fIoFAtFt9RLaTMRlg/8w49juAStsrBgp0Dp4OdxdVbRIeKhtCUvoi/RuAhO4g==", - "dependencies": { - "depd": "~1.1.2", - "inherits": "2.0.4", - "setprototypeof": "1.2.0", - "statuses": ">= 1.5.0 < 2", - "toidentifier": "1.0.1" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/http-proxy-agent": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", - "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", - "dependencies": { - "@tootallnate/once": "2", - "agent-base": "6", - "debug": "4" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/https-proxy-agent": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", - "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", - "dependencies": { - "agent-base": "6", - "debug": "4" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/human-signals": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", - "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", - "engines": { - "node": ">=10.17.0" - } - }, - "node_modules/iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/ieee754": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", - "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/inaba": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/inaba/-/inaba-1.1.1.tgz", - "integrity": "sha512-VYgrcz9EwjHELNU74R/p81U/G00u8KuFzao43pyNp7UZix+NY78eUzBy1Ks0tSgxgia+luJMvTD67vV02pk9yg==" - }, - "node_modules/indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/inflation": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/inflation/-/inflation-2.0.0.tgz", - "integrity": "sha512-m3xv4hJYR2oXw4o4Y5l6P5P16WYmazYof+el6Al3f+YlggGj6qT9kImBAnzDelRALnP5d3h4jGBPKzYCizjZZw==", - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" - }, - "node_modules/ip": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ip/-/ip-2.0.0.tgz", - "integrity": "sha512-WKa+XuLG1A1R0UWhl2+1XQSi+fZWMsYKffMZTTYsiZaUD8k2yDAj5atimTUD2TZkyCkNEeYE5NhFZmupOGtjYQ==" - }, - "node_modules/is-docker": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", - "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", - "bin": { - "is-docker": "cli.js" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-generator-function": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.10.tgz", - "integrity": "sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==", - "dependencies": { - "has-tostringtag": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/is-stream": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", - "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-wsl": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", - "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", - "dependencies": { - "is-docker": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" - }, - "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/keygrip": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/keygrip/-/keygrip-1.1.0.tgz", - "integrity": "sha512-iYSchDJ+liQ8iwbSI2QqsQOvqv58eJCEanyJPJi+Khyu8smkcKSFUCbPwzFcL7YVtZ6eONjqRX/38caJ7QjRAQ==", - "dependencies": { - "tsscmp": "1.0.6" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/kleur": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz", - "integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==", - "engines": { - "node": ">=6" - } - }, - "node_modules/koa": { - "version": "2.14.2", - "resolved": "https://registry.npmjs.org/koa/-/koa-2.14.2.tgz", - "integrity": "sha512-VFI2bpJaodz6P7x2uyLiX6RLYpZmOJqNmoCst/Yyd7hQlszyPwG/I9CQJ63nOtKSxpt5M7NH67V6nJL2BwCl7g==", - "dependencies": { - "accepts": "^1.3.5", - "cache-content-type": "^1.0.0", - "content-disposition": "~0.5.2", - "content-type": "^1.0.4", - "cookies": "~0.8.0", - "debug": "^4.3.2", - "delegates": "^1.0.0", - "depd": "^2.0.0", - "destroy": "^1.0.4", - "encodeurl": "^1.0.2", - "escape-html": "^1.0.3", - "fresh": "~0.5.2", - "http-assert": "^1.3.0", - "http-errors": "^1.6.3", - "is-generator-function": "^1.0.7", - "koa-compose": "^4.1.0", - "koa-convert": "^2.0.0", - "on-finished": "^2.3.0", - "only": "~0.0.2", - "parseurl": "^1.3.2", - "statuses": "^1.5.0", - "type-is": "^1.6.16", - "vary": "^1.1.2" - }, - "engines": { - "node": "^4.8.4 || ^6.10.1 || ^7.10.1 || >= 8.1.4" - } - }, - "node_modules/koa-bodyparser": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/koa-bodyparser/-/koa-bodyparser-4.4.0.tgz", - "integrity": "sha512-AXPY7wwKZUmbgb8VkTEUFoRNOlx6aWRJwEnQD+zfNf33/7KSAkN4Oo9BqlIk80D+5TvuqlhpQT5dPVcyxl5Zsw==", - "dependencies": { - "co-body": "^6.0.0", - "copy-to": "^2.0.1" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/koa-compose": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/koa-compose/-/koa-compose-4.1.0.tgz", - "integrity": "sha512-8ODW8TrDuMYvXRwra/Kh7/rJo9BtOfPc6qO8eAfC80CnCvSjSl0bkRM24X6/XBBEyj0v1nRUQ1LyOy3dbqOWXw==" - }, - "node_modules/koa-convert": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/koa-convert/-/koa-convert-2.0.0.tgz", - "integrity": "sha512-asOvN6bFlSnxewce2e/DK3p4tltyfC4VM7ZwuTuepI7dEQVcvpyFuBcEARu1+Hxg8DIwytce2n7jrZtRlPrARA==", - "dependencies": { - "co": "^4.6.0", - "koa-compose": "^4.1.0" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/koa/node_modules/depd": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", - "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/koishi": { - "version": "4.12.4", - "resolved": "https://registry.npmjs.org/koishi/-/koishi-4.12.4.tgz", - "integrity": "sha512-HKpUShf/n09nqawF2elEHMrB74FmBE9DpBq70Vi1K7f4UtaC4SbeltUgQh+R94HKeChOujzVFbgLkipjslYsMw==", - "dependencies": { - "@koishijs/core": "4.12.4", - "@koishijs/loader": "3.1.4", - "@koishijs/utils": "^7.0.1", - "@satorijs/satori": "^2.3.1", - "cac": "^6.7.14", - "kleur": "^4.1.5", - "ns-require": "^1.1.4" - }, - "bin": { - "koishi": "lib/cli/index.js" - }, - "engines": { - "node": ">=12.0.0" - } - }, - "node_modules/koishi-plugin-glm-bot": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/koishi-plugin-glm-bot/-/koishi-plugin-glm-bot-1.1.2.tgz", - "integrity": "sha512-H8pCz6m2U079LhEvHFBhb/SRKcWQTi9FWhDPsIuboXqdJiKhW7lP56avfvXWZWM5XVxhmCBcCN6+QSLk/jqHEw==", - "peerDependencies": { - "koishi": "^4.11.0" - } - }, - "node_modules/lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/media-typer": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/merge-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", - "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" - }, - "node_modules/methods": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", - "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mimic-fn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", - "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", - "engines": { - "node": ">=6" - } - }, - "node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" - }, - "node_modules/negotiator": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", - "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/npm-run-path": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", - "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", - "dependencies": { - "path-key": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/ns-require": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/ns-require/-/ns-require-1.1.4.tgz", - "integrity": "sha512-Zk25pQj4u5i6DS0vaNO5aSSXewybVqqVVjz8AOxFy9DNPtmu3jlexMz6kUXLV2oB+X6iQeAnHXSzj5Qz/IeDaQ==" - }, - "node_modules/object-inspect": { - "version": "1.12.3", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", - "integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/on-finished": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", - "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", - "dependencies": { - "ee-first": "1.1.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/onetime": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", - "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", - "dependencies": { - "mimic-fn": "^2.1.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/only": { - "version": "0.0.2", - "resolved": "https://registry.npmjs.org/only/-/only-0.0.2.tgz", - "integrity": "sha512-Fvw+Jemq5fjjyWz6CpKx6w9s7xxqo3+JCyM0WXWeCSOboZ8ABkyvP8ID4CZuChA/wxSx+XSJmdOm8rGVyJ1hdQ==" - }, - "node_modules/open": { - "version": "8.4.2", - "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz", - "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==", - "dependencies": { - "define-lazy-prop": "^2.0.0", - "is-docker": "^2.1.1", - "is-wsl": "^2.2.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/p-map": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", - "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", - "dependencies": { - "aggregate-error": "^3.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/parseurl": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", - "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "engines": { - "node": ">=8" - } - }, - "node_modules/path-to-regexp": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.2.1.tgz", - "integrity": "sha512-JLyh7xT1kizaEvcaXOQwOc2/Yhw6KZOvPf1S8401UyLk86CU79LN3vl7ztXGm/pZ+YjoyAJ4rxmHwbkBXJX+yw==" - }, - "node_modules/peek-readable": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/peek-readable/-/peek-readable-4.1.0.tgz", - "integrity": "sha512-ZI3LnwUv5nOGbQzD9c2iDG6toheuXSZP5esSHBjopsXH4dg19soufvpUGA3uohi5anFtGb2lhAVdHzH6R/Evvg==", - "engines": { - "node": ">=8" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/Borewit" - } - }, - "node_modules/proxy-from-env": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", - "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" - }, - "node_modules/qface": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/qface/-/qface-1.4.1.tgz", - "integrity": "sha512-52qX9qdiDFd53xnYAFitkXVldcSddd4ZQiFTV2IluM+2HdDiJph3CKtmPi7CTCA9QF7K2d2WUAH3E2Y4P6fEjQ==" - }, - "node_modules/qs": { - "version": "6.11.1", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.1.tgz", - "integrity": "sha512-0wsrzgTz/kAVIeuxSjnpGC56rzYtr6JT/2BwEvMaPhFIoYa1aGO8LbzuU1R0uUYQkLpWBTOj0l/CLAJB64J6nQ==", - "dependencies": { - "side-channel": "^1.0.4" - }, - "engines": { - "node": ">=0.6" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/raw-body": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", - "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", - "dependencies": { - "bytes": "3.1.2", - "http-errors": "2.0.0", - "iconv-lite": "0.4.24", - "unpipe": "1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/raw-body/node_modules/depd": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", - "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/raw-body/node_modules/http-errors": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", - "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", - "dependencies": { - "depd": "2.0.0", - "inherits": "2.0.4", - "setprototypeof": "1.2.0", - "statuses": "2.0.1", - "toidentifier": "1.0.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/raw-body/node_modules/statuses": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", - "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/readable-stream": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", - "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/readable-web-to-node-stream": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/readable-web-to-node-stream/-/readable-web-to-node-stream-3.0.2.tgz", - "integrity": "sha512-ePeK6cc1EcKLEhJFt/AebMCLL+GgSKhuygrZ/GLaKZYEecIgIECf4UaUuaByiGtzckwR4ain9VzUh95T1exYGw==", - "dependencies": { - "readable-stream": "^3.6.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/Borewit" - } - }, - "node_modules/reggol": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/reggol/-/reggol-1.3.5.tgz", - "integrity": "sha512-kzkzs4nhZeiphyh+amekq25/3PndZDq+5Yt8qCJqPSyMXPC1pkwhfYCQyJdXxoRz3/uqt0+VqHulagUCVY84vA==", - "dependencies": { - "cosmokit": "^1.4.0", - "object-inspect": "^1.12.2", - "supports-color": "^8.1.1" - } - }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" - }, - "node_modules/schemastery": { - "version": "3.7.2", - "resolved": "https://registry.npmjs.org/schemastery/-/schemastery-3.7.2.tgz", - "integrity": "sha512-CiDDwMQeNq7eusS4TSuo5bMfi/RORPXbBNMpd1s8a5I2ukD5NOpExNKfgHJSK6Sy4PcjY35luIw7bVo84dkPHw==", - "dependencies": { - "cosmokit": "^1.4.1" - } - }, - "node_modules/semver": { - "version": "7.4.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.4.0.tgz", - "integrity": "sha512-RgOxM8Mw+7Zus0+zcLEUn8+JfoLpj/huFTItQy2hsM4khuC1HYRDp0cU482Ewn/Fcy6bCjufD8vAj7voC66KQw==", - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/setprototypeof": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", - "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" - }, - "node_modules/shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "engines": { - "node": ">=8" - } - }, - "node_modules/side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", - "dependencies": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/signal-exit": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==" - }, - "node_modules/smart-buffer": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz", - "integrity": "sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==", - "engines": { - "node": ">= 6.0.0", - "npm": ">= 3.0.0" - } - }, - "node_modules/socks": { - "version": "2.7.1", - "resolved": "https://registry.npmjs.org/socks/-/socks-2.7.1.tgz", - "integrity": "sha512-7maUZy1N7uo6+WVEX6psASxtNlKaNVMlGQKkG/63nEDdLOWNbiUMoLK7X4uYoLhQstau72mLgfEWcXcwsaHbYQ==", - "dependencies": { - "ip": "^2.0.0", - "smart-buffer": "^4.2.0" - }, - "engines": { - "node": ">= 10.13.0", - "npm": ">= 3.0.0" - } - }, - "node_modules/socks-proxy-agent": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-5.0.1.tgz", - "integrity": "sha512-vZdmnjb9a2Tz6WEQVIurybSwElwPxMZaIc7PzqbJTrezcKNznv6giT7J7tZDZ1BojVaa1jvO/UiUdhDVB0ACoQ==", - "dependencies": { - "agent-base": "^6.0.2", - "debug": "4", - "socks": "^2.3.3" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/statuses": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", - "integrity": "sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/string_decoder": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", - "dependencies": { - "safe-buffer": "~5.2.0" - } - }, - "node_modules/strip-final-newline": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", - "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", - "engines": { - "node": ">=6" - } - }, - "node_modules/strtok3": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/strtok3/-/strtok3-6.3.0.tgz", - "integrity": "sha512-fZtbhtvI9I48xDSywd/somNqgUHl2L2cstmXCCif0itOf96jeW18MBSyrLuNicYQVkvpOxkZtkzujiTJ9LW5Jw==", - "dependencies": { - "@tokenizer/token": "^0.3.0", - "peek-readable": "^4.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/Borewit" - } - }, - "node_modules/supports-color": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, - "node_modules/throttle-debounce": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/throttle-debounce/-/throttle-debounce-3.0.1.tgz", - "integrity": "sha512-dTEWWNu6JmeVXY0ZYoPuH5cRIwc0MeGbJwah9KUNYSJwommQpCzTySTpEe8Gs1J23aeWEuAobe4Ag7EHVt/LOg==", - "engines": { - "node": ">=10" - } - }, - "node_modules/toidentifier": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", - "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/token-types": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/token-types/-/token-types-4.2.1.tgz", - "integrity": "sha512-6udB24Q737UD/SDsKAHI9FCRP7Bqc9D/MQUV02ORQg5iskjtLJlZJNdN4kKtcdtwCeWIwIHDGaUsTsCCAa8sFQ==", - "dependencies": { - "@tokenizer/token": "^0.3.0", - "ieee754": "^1.2.1" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/Borewit" - } - }, - "node_modules/tsscmp": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/tsscmp/-/tsscmp-1.0.6.tgz", - "integrity": "sha512-LxhtAkPDTkVCMQjt2h6eBVY28KCjikZqZfMcC15YBeNjkgUpdCfBu5HoiOTDu86v6smE8yOjyEktJ8hlbANHQA==", - "engines": { - "node": ">=0.6.x" - } - }, - "node_modules/type-is": { - "version": "1.6.18", - "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", - "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", - "dependencies": { - "media-typer": "0.3.0", - "mime-types": "~2.1.24" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/unpipe": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", - "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" - }, - "node_modules/uuid": { - "version": "8.3.2", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", - "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", - "bin": { - "uuid": "dist/bin/uuid" - } - }, - "node_modules/vary": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", - "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/which-pm-runs": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/which-pm-runs/-/which-pm-runs-1.1.0.tgz", - "integrity": "sha512-n1brCuqClxfFfq/Rb0ICg9giSZqCS+pLtccdag6C2HyufBrh3fBOiy9nb6ggRMvWOVH5GrdJskj5iGTZNxd7SA==", - "engines": { - "node": ">=4" - } - }, - "node_modules/ws": { - "version": "8.13.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.13.0.tgz", - "integrity": "sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA==", - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "bufferutil": "^4.0.1", - "utf-8-validate": ">=5.0.2" - }, - "peerDependenciesMeta": { - "bufferutil": { - "optional": true - }, - "utf-8-validate": { - "optional": true - } - } - }, - "node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" - }, - "node_modules/ylru": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/ylru/-/ylru-1.3.2.tgz", - "integrity": "sha512-RXRJzMiK6U2ye0BlGGZnmpwJDPgakn6aNQ0A7gHRbD4I0uvK4TW6UqkK1V0pp9jskjJBAXd3dRrbzWkqJ+6cxA==", - "engines": { - "node": ">= 4.0.0" - } - } - } -} diff --git a/glm-bot/package.json b/glm-bot/package.json deleted file mode 100644 index de6be80..0000000 --- a/glm-bot/package.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "name": "glm-bot", - "version": "1.0.0", - "description": "glm-koishi机器人", - "main": "index.js", - "scripts": { - "test": "echo \"Error: no test specified\" && exit 1" - }, - "author": "", - "license": "MIT", - "dependencies": { - "@koishijs/plugin-adapter-onebot": "^5.6.6", - "@koishijs/plugin-console": "^5.6.1", - "@koishijs/plugin-echo": "^2.2.3", - "@koishijs/plugin-market": "^1.12.5", - "@koishijs/plugin-sandbox": "^3.0.0", - "koishi": "^4.12.4", - "koishi-plugin-glm-bot": "^1.1.2" - } -} From 47ac7f9b1f1cc00dabc80dcccedf1b80b7b2ee4e Mon Sep 17 00:00:00 2001 From: Shilong Jiang Date: Thu, 13 Apr 2023 12:02:10 +0800 Subject: [PATCH 100/110] =?UTF-8?q?PROJECT.md=20=E4=B8=AD=E6=B7=BB?= =?UTF-8?q?=E5=8A=A0ModelWhale=E5=9C=A8=E7=BA=BF=E7=89=88ChatGLM-6B?= =?UTF-8?q?=E7=9A=84=E9=83=A8=E7=BD=B2=E3=80=81=E5=BE=AE=E8=B0=83=E6=95=99?= =?UTF-8?q?=E7=A8=8B=E9=A1=B9=E7=9B=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 使用ModelWhale社区提供的 V100 机型,跑通了ChatGLM-6B部署、微调 pipeline,官方示例的微调流程跑下来效果很不错 --- PROJECT.md | 1 + 1 file changed, 1 insertion(+) diff --git a/PROJECT.md b/PROJECT.md index ce9a91c..4798259 100644 --- a/PROJECT.md +++ b/PROJECT.md @@ -15,3 +15,4 @@ 以下是部分针对本项目的教程/文档: * [Windows部署文档](https://github.com/ZhangErling/ChatGLM-6B/blob/main/deployment_windows.md) +* [ChatGLM-6B 的部署与微调教程 @ModelWhale平台](https://www.heywhale.com/mw/project/6436d82948f7da1fee2be59e) From 5fb705cd5b7a467fccb1e8e3c5eaf84ccc50bca5 Mon Sep 17 00:00:00 2001 From: rainatam Date: Thu, 13 Apr 2023 13:52:35 +0800 Subject: [PATCH 101/110] Add option for saving checkpoint --- ptuning/main.py | 1 + ptuning/trainer.py | 19 +++++++++++++------ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/ptuning/main.py b/ptuning/main.py index ecce8c2..2aa5ac3 100644 --- a/ptuning/main.py +++ b/ptuning/main.py @@ -354,6 +354,7 @@ def main(): tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics if training_args.predict_with_generate else None, + save_prefixencoder=model_args.pre_seq_len is not None ) # Training diff --git a/ptuning/trainer.py b/ptuning/trainer.py index 5a9a27b..63101bc 100644 --- a/ptuning/trainer.py +++ b/ptuning/trainer.py @@ -317,7 +317,9 @@ class Trainer: callbacks: Optional[List[TrainerCallback]] = None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, + save_prefixencoder: bool = False, ): + self.save_prefixencoder = save_prefixencoder if args is None: output_dir = "tmp_trainer" logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.") @@ -2825,12 +2827,17 @@ class Trainer: state_dict = self.model.state_dict() torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: - state_dict = self.model.state_dict() - filtered_state_dict = {} - for k, v in self.model.named_parameters(): - if v.requires_grad: - filtered_state_dict[k] = state_dict[k] - self.model.save_pretrained(output_dir, state_dict=filtered_state_dict) + if self.save_prefixencoder: + print("Saving PrefixEncoder") + state_dict = self.model.state_dict() + filtered_state_dict = {} + for k, v in self.model.named_parameters(): + if v.requires_grad: + filtered_state_dict[k] = state_dict[k] + self.model.save_pretrained(output_dir, state_dict=filtered_state_dict) + else: + print("Saving the whole model") + self.model.save_pretrained(output_dir, state_dict=state_dict) if self.tokenizer is not None: self.tokenizer.save_pretrained(output_dir) From 60275ccec50a2cb33cd9e04a98761970afd0f960 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Thu, 13 Apr 2023 15:07:58 +0800 Subject: [PATCH 102/110] Update README --- ptuning/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ptuning/README.md b/ptuning/README.md index ab91468..318a7fc 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -155,15 +155,15 @@ for k, v in prefix_state_dict.items(): new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict) ``` -注意你可能需要将 `pre_seq_len` 改成你训练时的实际值。 +注意你可能需要将 `pre_seq_len` 改成你训练时的实际值。如果你是[从本地加载模型的话](https://github.com/THUDM/ChatGLM-6B#%E4%BB%8E%E6%9C%AC%E5%9C%B0%E5%8A%A0%E8%BD%BD%E6%A8%A1%E5%9E%8B),需要将 `THUDM/chatglm-6b` 改成本地的模型路径(注意不是checkpoint路径)。 -(2) 如果需要加载的是旧 Checkpoint(包含 ChatGLM-6B 以及 PrefixEncoder 参数),则直接加载整个 Checkpoint: +(2) 如果需要加载的是旧 Checkpoint(包含 ChatGLM-6B 以及 PrefixEncoder 参数),或者进行的全参数微调,则直接加载整个 Checkpoint: ```python model = AutoModel.from_pretrained(CHECKPOINT_PATH, config=config, trust_remote_code=True) ``` -再进行量化即可使用: +之后根据需求可以进行量化,也可以直接使用: ```python print(f"Quantized to 4 bit") From 8123b592152bd3290cc01e084ea7519abcf1547e Mon Sep 17 00:00:00 2001 From: duzx16 Date: Thu, 13 Apr 2023 15:24:59 +0800 Subject: [PATCH 103/110] Add length instruction --- ptuning/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ptuning/README.md b/ptuning/README.md index 318a7fc..2cc20dc 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -176,7 +176,7 @@ response, history = model.chat(tokenizer, "你好", history=[]) ``` ## 使用自己的数据集 -修改 `train.sh` 和 `evaluate.sh` 中的 `train_file`、`validation_file`和`test_file`为你自己的 JSON 格式数据集路径,并将 `prompt_column` 和 `response_column` 改为 JSON 文件中输入文本和输出文本对应的 KEY。 +修改 `train.sh` 和 `evaluate.sh` 中的 `train_file`、`validation_file`和`test_file`为你自己的 JSON 格式数据集路径,并将 `prompt_column` 和 `response_column` 改为 JSON 文件中输入文本和输出文本对应的 KEY。可能还需要更改 `max_source_length` 和 `max_target_length` 来匹配你自己的数据集中的最大输入输出长度。 ## 对话数据集 From 2b5d00d6e2e4c04e9bbef2b8f2ab48156f451c89 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Thu, 13 Apr 2023 15:34:12 +0800 Subject: [PATCH 104/110] Add slack and wechat --- README.md | 3 +++ resources/WECHAT.md | 3 +++ resources/wechat.jpg | Bin 0 -> 154573 bytes 3 files changed, 6 insertions(+) create mode 100644 resources/WECHAT.md create mode 100644 resources/wechat.jpg diff --git a/README.md b/README.md index baf7d97..44a3442 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,9 @@

🌐 Blog • 🤗 HF Repo • 🐦 Twitter • 📃 [GLM@ACL 22] [GitHub] • 📃 [GLM-130B@ICLR 23] [GitHub]

+

+ 👋 Join our Slack and WeChat +

## 介绍 diff --git a/resources/WECHAT.md b/resources/WECHAT.md new file mode 100644 index 0000000..ffe3ec5 --- /dev/null +++ b/resources/WECHAT.md @@ -0,0 +1,3 @@ +![wechat](wechat.jpg) + +扫码关注公众号,获得入群二维码 \ No newline at end of file diff --git a/resources/wechat.jpg b/resources/wechat.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8ea08f2423d294a9c50efad144fdc1058b079fde GIT binary patch literal 154573 zcmeFa3p`Z&`aiy=NmImhK@rXFN~v@qC0Xn?QVB_RD${klRg#LtERpPl*}5r}(M42S zQ#;a)MT%~6jUoo45HjOBmo;ntmHz0W!OobUJh`u=|Z-=TS#wPt3`=ki?M&-;0v zmHdso78$Y3+SVGOP!NOyzlgjFS%Odp4O0ArpTY1?X{eIY;K54DG}@4%!~nu^K@%@HFZ&TL>i_bO zJR2D~R0&Z+sFaDwppg{nNQ(S1!hmrqQTl$6FF%w)FvcOYp~I9_;0@^`kU-wYH3fI zs;56~x{>jGlLZS+m;7L5ZL`#NnbT_LH7-A{b=|Uc+fVKuo?d=?_U`i!*dG{nDEx55 z&qpGU9Y1mMRNUz^@fR;$PP&qua`oD8x6{(^WZb=X|55JaCwci#pFJ-rEqh&F@#bx1 zT|K|y{fEY;=8wXT&aUpBUXfU$m=^`+`Ss`X#J-u=NSN24!Go!TX^MGK2KmE-I&!em zggHY-Svt}-`Hr4A_ux?V?_)3A${99k-YS8{=H120nv;#{rU(^N>zmnMn^@?-HM8F* z_P2ReBEzW^*gWb;1Vv;{7bEqN|2+P)2LF+P|H#09WI!LeTrkt-?#}!`x6j4*IblMmpAE!MmE`B_x zQQ|1YM?QZ`*y3gna01>V9>yaMmPsjjv`Lw2>!`XOgO*LZv^eGWAvpfMu{o~Tk}%Ci z*Ms60=^A}{&^(b-{xZZ>|6+*pA0AicQSwuf1v(|ANLBXb&HnUdik1Gdn8rs2Za4FUJ=T|&envPm%2*b+e7_;*HzrP#b?B5Vi#VY($Ge8;o zH07R4PIJqugcIdsbviAET0H#_AG>{X_VIYXsEy=Fk)!lSRyjkRm?CU{a^LM_eanmG zLuW-=4H`cp(1G^pRptDOPPQCTO%}$H5eMYROUM2P4zw=cJJIO`UykH@65Hg+g&H~1 zsgCtOpvZJ~T-t#xkRyUhObcuB$B8G5zdc1Tn{ggh%vUHepOeRw=(fv|IX3;CTQG|A z{vR4do`E=DjvOu(y^teIGUdnz{eeeR$%4G1V9#NNJH5rv`d)g9eelQSNLDzjmVE4H z@H%LRbV0y6m+~7mQpdYT=i2M7S)fk&bXikJxZ?ZWONOsLyK+walIZL$=3PihV#7~A z*UyQ*_q5wMKWJ;&&OOX;O8*W4m1kyTErtav;X!q1icisnSKA;1)F{Vty;p@iTh=mFq2eCnY61L z29tfROUo!K>F|LHy1>Bseg%szT1EE!l+aQCWb&ysA7Aol9mVyl@CrEnazkb*IJnQf-ZK!HY`oRCJz@BX>A? zmf1qsG2LEe_iyFbVGi?^NxzfK3#h;ri!q-sx?NA#w5reC$9IXTnZ0=}yIP9#Ef`mf z9IX)bevi2pOP@Bxl)V;5m@$^^*ZS_m@vCMIRP6~9`M&C1tA^0M#P*~z%5OVRm*3iU z;F`$aF>ZO;sEwNSvl2Jsx}2U;(X(^(aXcYe1dCDi7j|YLRXtr0L5w|v4z z-FwnRsdbzf*6h+fys>VTZ>nn4Bh@DjoxOl-h~A0F`~IFBIn-w_D6(+}V?h#j)7z4T z$1|c8Ho*4(QahvlY~(Ln7K~5Zp+z+&1R|%N7owa4^&6%R3)&G>{LFvU&B?n}LabPF zgfM$>c9p>V;ND$%w(oC+R~N3?9CF-Z+{={~Zo$FVoX1&DyL8NY&PvCF7AjWi4wP>n zj@v6AcNZ*QPs`un+G!!Osed5VidVTiV?+JPz)QXbi2uerfll)^f0?sjQaWZD$Gd~& zE#T@ogH8;NqP<+~HAI%inmY)s&%jZ^1TeB$^ZuzSmaV$-!Rgr0Hs-7T0=^tJDoGgo zvmA+p%_Jr26q)v?9hYJ}wN`}D!usADmuK~roGDP~_aAcY%lAZKBCKwv3u6NJq!r26aN9Yb*<6{j?cJ86`b&m@6 zmXsXL&3DmU;?0Omm>~kwuWR5e+}l;mb5b%-L5`P2aHoSBu70Fvg<|(V|>3r$mlSC*8Zb;^9mf?0&Rsl7)KH#YjZI-(z|(N}_Fe%SAb2 z8k$@xNBCqFdEc-zk>2dx;|E`(TZ>C+HZt#G_OedS+d^`we;C@T%jo7>gAsN+S&L@) z6Y=fQSW6Y*&5KJ4ptn`SkRR}wk{&stjgWg&;M)wkI?17zkXJ(r z>zTtg-rU2G%y8*6sFfq)aeM3~WSAVWo{8BphocSWMDAox5!Q6eLf63QYU;}0ZO(vr zkfLeThiazsRcnA}Ju3M}80fejLh0oZI< zCyo!h_4%SF_86T|I~+n`W-tXdJn{T0u7pGjT7>E@Z571GUefMP3yHHk0}<){ez6e! z99nw;?eJp3g5N-kDmq^VDwBt?Kb%7rIof#r(|Dl2kW;Jh%{)+>!B|D@SFO>+KxrnC zIIV-*^zlz3qCbg}e1uL{@h28)+{Q6IAgssZgt&~wc+V_;jV-|M#1ZH4XgSg%0KrFu z+7KVjXpY}jej8JYj-vbdFWMe6S^6?qDG()Keovg_$lUqY=HPdvsH%;$H#vGDRRM-+z6l92t%a*O7;IxR5foklXCCS&r;st=`W3#C{H2 z!xceb63OeD&}v_*m?X zDf2z^C7CWqCV*j)wCjtF4Bgy@c6KxyV zfH1fDLNcMZMpsb%iR%%4>q>^hYR1Z}Jg_r$aijrO6b&-KyG0sHZh@iN?tr;RGrwbq zPAA~)woKNE^})qr2U@4|K&iX>EA|s3^6G(|`Pq_`K~|H+TPj+ZCA+Nhs~IO}eHN`y z5iNOrGdhHr;=W&H-Mie4M<(09wl~vmXD@Trr_h!O=~c@u%gmNT`1W9BN!#Sita#qY zoDnbPG$tJgpP-dU|By^-Vf7iZp_N>jl)<;@QyCx0$aJQ(2hGEiHXM}|iyd!a_tEJH z>4kqYnVB$|aFJ7edqlznVNk;4qFG&g;%86tK1ubjZZ2SPS{3SYLfAW`_Pfqtw7pck-rG@(#>5Y*$m)82r5zi)S&m~ej ztj=CWH+7K_Xq^pVOnbzUjNikRim*p`d}15tA!cgVCgY2XXkF<$q|1p}5AVhdHQSN& z%qVK)$gK-E2dbTHc6b>#a`LU40o6+k)DDe5jGs0As(8;ligyL+JxZ^ ztBSFf=X9B78s$cygXyYb3&s>I$1aGG8o=w>o>iJQ(=>RMIC-!o@?vBpvEf^-uIT#J zVzIU7e1+;;FbZ4-t|fb*$-tI97$d#Zap|*Np@_K(1Y7P|LLqy`@Pu&%+K$` zk|7uO-M(QzNsD_2t4lFg2jAbBgz<7o4KYAnCC(yq1ChJW3-U-+lR@Y416TmLhaxk; zavg%EfuuGrlOuQpp@{brMPLY*ygDgIFuKU4($~`SJeZ?zg8ZJE|$RF4SNA+;*Y^%pFTgK5a_w@e|DXZgh`Q+b8dewTiibG;#lZ zX)?lc4T&`{=y9M1_!h!j;`{~&Ngwy+(%1j5ytMQYep~O!B23+*<5_LA^Q7fZ*PhOs zjrblf$1gG>=9jz`(QVx^M;uIuc^B`OuQ^Lt1k(L|NN zf)3nzM+mJf1<7Q@c>|JU2)G}T`-H~Fr=9UC1Woc8cwL5|jG>G*?JvRG>*RE*AG z80-I)i@S^#auuW?t{J2+v}Rup$XHosyy%5qWKH>)DGh#6lzdmxzgICnw`i&9B1Vk4 z%8Su*Wb|06%!B%Ud(URk)tN6}CD2~4UHc|bM!UW z$Ls9oPk$!WKaQU*GU59mp3TAahN?BqnlV+H-iIi*T`ivsBzyW~Hb@C{Gu8ohKd*<- zMWtoP{G^nHgWG_8vm-4t;A`)!kTg{~O#A z+znP|rGt-||1b3CnONI-R%)Ng{tv?WUx9yT$M}Q6Loj1zAD*-!o__YqZoXHW<6Mo2 z9+RTQ>F-p3Tgqvl)@3ZSdwTR%mSa&5_XD>lk>3*{ER-EuCssx}Bp^MDAN0&nAZLM% zj79%~oNxI6at=jX1lV*kCZ#`a|GUrs)rgo{hJmXPFJMryH&$3Iw|NbE7q?^SF ztVgN!kq+f{7|W*T_l{}Or<6I`mO!S)E?28yaL8N_ipS-aiw zW`4>b%eb$f6A);IR$}?>A$YZq?5YLhlzS?z?-!wd3;NQDPPKH|aJa-`hz4H==>naD zT`Zy%cfC$_vP5c&XevTJxs(?LF|T0<_XFlCM}B!CM`ZrTAto@E84>}LNcF~}`LZQR zq%%>}vhl`kESAmtj0ThXqro;8VkDQe2OhfUG8eaBCQ>1XR&|htrgzO4ZD5lw z0O&ggRU}C>zia+`6CMPkU5Pccn>lweWy4x}8_p>c0AWMVbxJpgQev#fa73evXwN65 zswa-mysA%47D4zwWn3=WGZJFaB^oj#AQ`M?jv*f^VpnhG%Zx{`G5R%%;5CXN8Ov=> z&f#@T^Wb#9r;$fpkvA8AIp-J)#Nedaj>mj07OEp@Ko+wW!Z19rhF=5R$xJkn`^r?N z1A{06bM&L2P!@<;GSBTn9IeI{Aeb%sn+b6??jY8ZgPq~Jd37+}GDy1@*?4y?{<7l6 zamW|`qEj3XiLuLiaP?I`h2{jFa~M=?r4ASP^019P=P-7Bgb}dyj9S}vvLlk*J_VI9b*cKnQ_yhj@$}4+yZ>ElvimS5b6Ax-)mVj)^kM#-?H4J#2PaWo z{C04W86#sns+mZCpx{ZqG9cnyQW>OSYBJvtm>*TlEI(1f27P-jk(#c)?_vn*gw`Ix zg<^hTF9?@s#mD!3N--Macjj|Gami9p3Pk_%(*msm0NZB|E+{lVk@mNh{kp>N*9nU} zZwoBfvEV$3HlTVhIw&k`>l-Wl7tmPyySyc!v2B(LjpcKsLo+(mDeucb8V>lk0x6+q zSj(0WjOX~JYe182F6Fjdze`N-Qlk(@pZ|Ew9MPA2#`3;%4vk(DSX7;$NXNRnqR%m;YK0*V0yJ?b?ADg>%)<^_Y96CPL*M`(Q;&zkIxNvZSt)2 znO1&v(@HFpdFyeO8q#y+$z3+*a3VdABl6RGj7#XvxCkgz4I5(N@p`6=*2N?IfuG^N zy#e(Ds1{cDk~0eCh=MocMBihK;4p2)6GH z51zf0Kk=;AiL3919B?{2Wo4iP^|0-$faKKMb zb4UER-t9`_lP#KJKG%(&bfAp|f|}#)D@TS_gx;1Ti|xq*3teoW6kZ}3CW2BjL_3ZI z5BIo&KVLIyCc=7w6PoUI*LQ*J>XHgUW5%QD6?xI~`0aTz_dL_Z4^Q106S6(h=i$OF zO^dyslxp>~uk;=3mv%l4d8eDKyLehMBibB2ndN4G;mWfpX>X!hTpdQ7@Bvr)#+oi0 zYasCy_!E(?db0gsEiRVhXMDmzRUIGBlOvnPqFo!mdKvtSmnmnRc&#<)h^esAK_Yn zO}#z?6TEOexY@Un{hk>t1FbdxFD=QXpdUs1_k&MyC~G5_wbPuee=8Z&T9S|T zp1Hr(ef-HBh%nBWgXJ&6>_Fl%(KT2%(Igsij6He_2|2c_+~O<`DwHnhJQEs{VG+H0QgYozt2 zb>UO7eX19H<$F?lCo24|3>UVOQ}H>dcl33sm58rvCB-JVS?iv^4->o!GcO1-(uLiW zBje>ry?2rb#6pQF!h{BSl{SaB8`*t7E_2zY;k(WEg@FVcrn4rT(|i)x-pbBz)2iKQ z8k~v6g6q>5Mpb>*ec3OzFZcNdPid=_LgnA8(3IzT_VXPxJc8Si(8F3jq|CNK-!H1T~{?FnXwww62gWTs;k2`1UE_JXaodrLEqg4tK`PNnMlp`zREX!Qi+1AiD zof~yIj4*%Q%ZMH}UcJ0$H*XUMk}m1kJkIFljGx`6K7|T{D4_g+2dglK#eETE)DTTw z#ejE>UW(SYog z5si-yG+mblikC}k5tLxj_=f-FT@_GB#E8NN*f;h-8_B%A9tyY=)c-efj(%e$Sp|MJ zMifQSh}u?BoE))~nLs51ti<(HkqcPHW1@&!=FfpcGW~9NcC34|VMfQqEtBc?@%Xy^ zDZGG9w>hrJw#z>aofO5sd(ggiy7Q#LhC`z61S)T#pJUXw>JQpfk_GD;`V4#auo*xr zL!!R7x9Qcw7A6JD*@`#R>;dG#mMNPegmbdm(kJtimK<7SZ4-!Cm~T0LoSDeG$s^SD zKwWQ}JUa7m-VIOxxH#^vZKEvDZJF5`?A=+b^vN@O>LW`raEL!*IX{uwTh4!V!)RS6 zriwOEu3~&1VaJ;1YpofAx~F2bzj0J|-nbCHM|G)D%m~KWs|(1s;JP~cC`&$lxMzIz zZ$`VA7Pl{EIH+QV_h&PzG|}!FR5sM5D^oCuT-pe@yH$5gN1|E>BwPNqBn!2B9yt{J z4kN)#j_k#H&a-Z~yk`(2O-R&iKlxFPw3*7*Ck`#FQNO%VXN0Qtlc*5-y?3Q&TtaLU z=~1i&;3DiEstWY#t&?kIE;d>B%4V#bd}>CUS*)zIdA;F4)5txD*Iu45rsp@|zS2$~ zEhzMf&thi(kHeBbAg8~Ma!PzH7`IV@n0pu~kEhgC`9K6ct|(*t1`D&~0$G-z@vx#x zWnpXTj-NU<=)JOi)-(Pj!5=ZdF_Tq*slaqdNRn&8xNzXUuZ6U%Y=&*YB2fx* z{F4bJz?`KH*t6__M)J{2A{<-x+h<^mDS$D(4;Vu{m`FE-l&>Pg)gSRH^1`6!|D_aH z$=?v~mlPN04=Jw2Ha!sE9X}{AksWdoLxyRK^k|m`@mc{v{s@gjbeCFtdL+^d4Mj5^ zu|m3w7a<*|@4=4I}c$0CvS zQb&&0GJT3)n|1J2{Y0h0^k9G&gR4>DshSadQ6YIKy;G_0)H@j49YKV&5#nW6gbY=L zIR+hUQpFu)TA}BA@t&m5GObxiqCrYz3)Z#_A#N}BfM3sbsmOQzk~cUcZM)XyUWwNNqaaF*Cq`yw8!v<10Ivr!j$RyIAv~1|(k<=!sT8?`e{miHshJU-i{tF*RV zte$2s=>R)om5O*TDAb)cDBNLK(snh^u|5zAZvij2ei61U)nW_V3=-ti3e#DwSdI&g z*!+|qvtxRHMkb9lY5xk{c5-%H9#XQFmShb6%jGqb-7yi+^ zYhTE?JHK49cMYC%J3$q}OtAC^Se})rVGGHu;$*wTei~ay#@cxmevy`fTTlmH)$P$US>WV5@0l2 zHG4rK{d7XvAliaNDuN8-zCuMaI=9r>AGTgF1}k%Es@w2bn|Kc%;T50Ma5?-N`o3Kf zi4c1W)3tgo+!xHTwNT-B$sh~~bpU<))l-gigVcQ~6PQAWG6C@08ww!}aXUq^p~Kl@ z>7vWs0GU?;8g076{jjAzApfz?Xv(X-VDq{*ahv1v;K?DUb>Kzf6sOj?bQ%lIT1D)7 zCpqG%I5W>e=h2;ap{6C2!l`lXqdc#KnBRaAY|$Yr5;Pwy8WON*NJ_L312vL$jwP&KN(&!Yg*;)dY0 zzo8vIrZ%!eHe|SNaQB1BQ7+jW!P=4Rx4fnOIEU>zr3dqa99Di9TOS`-L^&6~@6)t2 zhe3^%QPNe87Gw5+oQys?9)151lZ@atV=%bbLK+};Q$t0f^Qa=xNtd)U$U_E@=cFt0 zoUi~rYDXalxnB`)zhVd+IAUW6oFHN9E&i}rNs&bT<`klvl0<4RLu4ypJwTlD8`)?z zQqAD6DgUGxVnJ`>b9zUIy{UFYt8N`fzB?K5` z9Q1_aCGjVf;gfnh>g7mpK6&VMrz$clIHfRsiaPRD;l9ifBw=2gtrfD!e=HY*=}9&( z1U7Eeb3ClIkQsGS3P5ErSh!;oAgfl<+Z~9=hEoTZV4rJyYWdy)J$tJBl{ECq_a^NtoEBYwsV~ z%5T|A#j%O#Y8mH=g$nbPVOz3-MB!r+0{udw^s6;mO{q5mswI%!$Cy$FFs24DY(QcS zwPs)rH))X8c+!t#y-W$}@d0osj6IL|rHxABc+XaD-kKKXZKhx?E}l?HlVMovGiFpo zDnd(FA73_*r(xP-IVNaB+rtT@7{vEf)+TN;UVE0)v9CWn;H;R%Uu6dj_Fh(4cH}P? zJ;f}{S1+0uyV5ASccp0&%|N)^?XDm>w*Ygw{oRJOu4cvVS85k<^Ug0Pl?#NYDje<~ zaeFcH`BQVZ?AT`JO-vWmT-oOd?{m7|HZ`lwP0AMgKrY{d+6n>O0slNq+`O%i>*!7a zT%Hl+E=LkhF?$Q;h-=nFpx2mKEeFf+e~eZd@O4NX;i7eC2j7{?*5BBu+SbNQ+ABH& zQWXq-?ig@=+|fh=zO(5oZ%$F!k#gg*N9z07z-yX}lgnPaO3yA2t9s!BiQs5_3|4y; z9rJLU(ByHT?$NhLmygRoJoCk?Vv);atTvUSZSm}Yp~7>fGL2ZpHl!gtIv7jaN}g<_ zCA%V0KXQDvkV6Kp#dxU93$1J8fnjP)ij#H9k#g?ODT(w9Y~aaD13n>&j6|gV15Wj{ zzrDVeLn?b8xax^(2m~I>Ke)<{haHc}L?3#0l;GdgY?(tW8L>2IEcw1&1~{tX5~({u zutge&_g2Uex2&3rA?Dvf@i8N8lE~0k!85qw2G|b!LxCpQ?d-tDLl4b$Jog235QUk& zr-3joJ}}E0@UrrrInwn3$)zfw|BI2J%fq&`FXg4qI6ZjtFOf)d))+AI)<7{I+6eS@ zD1L=Q!=5Hxgl|8=kj(TJpAxwZ6Ghi?r9;7XU5F&(gLM4rerLNavxXZ*ad;}(pO9MNP>PVk#)2ROaBq#Lib@4G@o!mLs=t1DC9WZgVrr}=@8 zo(~Q#>q;~TH{8aoo(i0T5m zsmnQHwrG2TmrCrZXGBZSD0~=Nd!5&@M-n011=(byZ)EjP#L^XNYLs(?>{Yk!rN_2D zyvO(8yl>48tE$ObSe|Zc-1bwu!#R@txF&wZo_G0r;@29%nOBF*IoD~SQWR@vmf-o^ zX5p0t59-Hu(Nsg?d__1bx-X_CVooY*Ne1Wfd+rKuwH^NKlP&89bOq6$aw@|}WoMyN zol72XRKOOS1D45e3Vxdjm3Y%TxC2eb^{41gp4ik-8}_PDR1s1{tHK36?Z6&%mb_3{ z%@#*FLftQuSW_7NoqO;XwVok^tj$iJ zpJBBRr4nfUsf_uv3mOei%=X{Pu;0WqJs#y}Ik>YjWXRaOz*5h5{HgJM1&<1QH0Z-W z@bFN|1UmF|b1wNHAC^j)JZ(+|MH?r;WMLP9W2sSiyv_(heG+MBpT`6ou1)S|TPo_R z)b$3OvQQz6PV`Sp;D1a@U|-V`p3<_9_Vwy%6hZ=#DqtCKHcS$--$n7Coy}zooQ)xv zXROJNIeQ>Ks^CbOw=sdWk9Z=iKe#d%Zr(B(M9ZP`uxBH{2DL~fnF#A}Xirz_x&60F8C-9)USmwb6r^t{@GLif^NoHDTVGz&th(4E+` zs(>X5=W$_XQ0}*`KLky@rl>vW%dWot8|D86)hU6SW%DT5eV%kx2s)$Cl@7maS}cZ| zR&>n+g#+7v+*#lw^Cgcm3upI>=y-~9JS3Q5=hpSn>z}1tX9oqqZ2nH=GoonOrhY0E zP3k4iove@ra2j4q_lhd&X~d|B9W^g{V(uR}Q=egfYvg?wWm6MBW;#z03&yQW1IU7G zpbxZ>V#0&)UPO*s%pgPWFhwIcQ1>=D5S1xy#|0D6ybvQy8`XXVkpbHV(gCS)Y;#!wtH&b2~}JMGixJ>TK#OcB zz=v=^^R-7SceJsO1bOUC)w?rfLfDKIXQWvGkbYDI^;_=DX=WZo1x~xi^*j)|m~Fzk z^te1v^_%%#1XQPmo@z3wV5D;+-D)0v4ETAy*{<8vPIQ+(wPDYD36KjQAW2?J>Z;x5 zKW}zwrVI_}YA!$W)4`;y^`R5j_v z(hBf*X8xR@?Otj)DB60EZ3fHSw~bknap*WX%}(RTjYsttbjd1ED{#l%xFfm zJNC%t5}2tkTkA1UD2j%#`8NEu-wGI7Hwnb0$pGNy!C|Bk<&vmmPIiN^Wi9k84iv?$ z3E!1W9=G!PwhS_BG~q$D+qLLJY?V&8hK{}Zt?NIuOdKDT{2gi3_^i&fXz;b&(>zli zSeb?>Z;d|K(q^CXe${?vriF5HjOW;c#E+N98Wg&9yLEhgRjhI2#^yV2d20uSiHnpj z6b3T$Zrl9a73FvBEmr$I7H#;gv=b1&g+g5Wtn?$JI~y~VJsmjDin=&LW6dX(!pfr# z$7(zp*-sf}+sE$hR;LIvzs5S#qe`4koNREQz2*p3VB$p_6}2}z(OO$(R(@2VORXU> z`uh(CXl2!-lEUdr#;V4}hNg&*-{aQYs<->D`}KA(EX=;Oit@iC{FM(Up)vHoxa!5C za{vBQWs8<}VG`xfi-2H@85usrc{=neG!ov~-ZkKG_w(<{l7rkQ&z*7WY=YL<5rMrL zwI>j#lf$%{?XGU2FPh<%^hTgzZJRj69ngn`n9hjyz#f@gONjrhtE)Kzwpab!>q#|j zi?L!Jp-Eq^;Q%6~uV)4<;$paS^EFlD(IhKjw}9@Vven%bc;|Qs=~w=_CB2 zO6?X=)hmB@HuwI4vvwv5oA8dYH-Q)vg0y{)U!jKpRc1{v>J~%aOE}?%g4yMd34zWFPk;m^m(3g^CyXi~$xI?V8Fx?cLlin#Lf`X8;P}F(jj1 zUK;(O2*IV52kA0$>0umpo%gy$cnpVdy*7&1@q_X2JZ&21v8N|=0SxB&W*x@kxd7Hy zQu85G;L~6raqiFCiaztU$o{<`jN_}n;bZZ0%s02mmCPJQw+KCn*{uGJ)&BhmM2(bSxR8U;xr^U;unz^btUs{9Fv6=_3mM1-OHQc3hO1bi%NhS%Q4I+mo-B_#C99K*HNrTz&uJ3b9bPM-o%`+#A)l=1|2g|O~Ju`H}> z%I9UD+6c>j2<4%YQBVx6(TAI0@7AFrnP7}GRgR<^bnnLXVWsb2LTRmG?>af+rMg;Y}1Jh?W)%o6QuYgLb6Rzw?esd`1k~_JOp)#pwc zt*J}1+t=qM3kn}e+B&Pf+!&?pH`cY=n<>BKt@6GG--(NyvWBW5f-5K4%-^^o00fWX z1t=<9*31+s<8>R(t5foy2qJR!bxe*8U@o^Ead^$TZG#zZC(A7dAEb$J7~}AKC4O zHaFvWxF^UUM2gMG55dvhUDZt z`*8{M$Zm(ltTG;d{RU_vok~8>h|}g|)0Ng&ujn0}8F}i|l2cobKQnG&JPyd_PLjpq z4euVp4!Wrj1%WLNgHoQ%{0@3X<82len8x0BpjymOvtk872*WgUye?waVG&bG(hbhf zx4xfqz*BkD+{Ff6-|bN_+cW7Y_aD+{s;*x9}4E%(#XoKyZY@`c9V|E+jT5r5#9S~4n?DmQpm zta`gauR1|zBh_#I=^4gNX26fFb#44zO_u zd87+kYO;oTFVK28Eoo~W-tjSuv+O)eO*G*k?NL0?{}~ExYT1tR^LXSvZpT%7M++T) zVj7nUo>2Y9NdU#yNew{;p;3`-t|Q3LUOb&|TRbDJxj^JJC2hjk=x2FWtQePURIt`i zY;slj+rS457!FOB6c}hUGzI1qLk+Ea7gId^ zN2~&(m97cQQd%}9p2VUn^q=`WWYKf5@&8KgHU&F;ET+tX_R;;Dv@iEL`q?9Qe)Y%} zs(lr7SIhoK;%U4EeDK$JTHymHOQXAtA=OyMfJ`IE?aiZ5pi%N`Kfg@-?3baGM-e0R zv=4B&&QRv}1cx#|Kpfm0MVa5guoMFD0iBUwk0Fx&=p%HXe&cU}asP%x4>7Y=1o002 zxN&s_H}0X}#xY~&pEzc?_YC-KF{GOYM$S(ID?U{a>g~1eQRb$^wBjx<%`?I&Gqn9> zK#tigmjp_evG`{Uxbg7-SSN;&J0K4YBlhbHK6@)HOI2r-Rpyrlx zWIohQcu+rR8vr`m*OyPmL{t>VVschGcmu%)b9G|8ILnwbWS7N(-GEFfHa7r0-;~FR<>Rt zi8DC_OQa$z2W)tPBK|3`=<_c!fC2FTBLkqw>Gm-Iu6vC=sNw&pzYa*Lzl$nDrYS}S zoA>$QmM@6tSt>dsEX?g_aibTlb1JPr{@l~j%*1RL{lblQQum&)1;WO@9fAC#SHDJR z3G?f#fL+hRj$NGzZs{DR+je7Oy#3I-OW(}X$v|{Cb%}J?&%UPUP(_6YbVb{6W3sea z?|3wFjEKHOko>b0FmVWZt$$62H5uFs9sXqKTbHN61WY#RDAFB&?m>iAW7G7zx-f*}Y zUFbk7r6p05f*wKP8k4dV_<~Gy3J7otL$Jy)B$xc@%be7fkP1Fx>a7&4!k4%%IHf@G z-gpd)F1tmJUQeK+wO83A0xs?!eB`IZL96|1Iq#ViEH}e^F;)ch=2@tZrF5!lboCC9 z&VjOO!I)j_7A}maHH;z$d!4HEk4n3JWyAWZqN3wZ2@CGxKGQ*O*6Xo>aAvo_P!?d= zY1~r498SJMGb(a?l!8VJ5{_ICNSc{GZqXc_;t{yPR$=e;V&F56)*8xGn}KuWLg}al z!@-d`8_Ru44gynO!#SoX0R85W(j}X;hkF|9zl(Imb--(#`LaaR=DHFbn2Xvpv}2hW zyugIqHa_q|&YjSUUoSK4`6P&mt7{L7qdu10IB#;^SK0U0T(y8g=xDWZ1ZHl)N)0^v zOY8Rk&?O%5vB6L!rAFB8PxL;OSL&3uhr651jxTGIHz!Up#_6Ck(H9%@u3T zkGndf1lw!9A4(%{^zRDj=Em(nYcDe>{Iu|KPnxtGE$f2OnyYc$Z0X0~8zw9RNRLBugc=2tU!BWRDtpMVwRF#LQ7?7HVUr6%LkY zBg9>xHNU14|9dF>fqea+QTQ}3{Wc(#uV6U?x(NEn->Lmm`X>-xee$X;@qo>O!6*zM z>nob@He5O)XoDzOY8`O=F-u2K#q%^-G2Ceo&*WvkG^@LoCul2VGrRQB4^VE01m#*O zYKGL_iMj!GefFWn-yDNaHB!X5{XWO&fAHUR5k-c)FR#$smsS7*rLKxI9&d8M3ZPtH zwg~ohGcjBkkvupok z#Q)jt6kJ6+1x?XT!Gv}S=g;jF3P7ibHU8eCugVMJxo@kyO8-#h^)J-}{5RhOMRpB# z(ij-d2Vf<7Mxqk+lfBK&;Ei%1qtsB{jY)@cWXj8=U0sRDo)lxcNC-5?4Pf?Hny%K4 zAOhB774soIMwF1noR0NDFyBpDXwv?IK$A8tXuHbKeg9;s8QCPzc0Aei?Og6TR@{1{8F~x)SmM2(dC^n5o$-9h3 z<^yNZa^U@eN<5xY3hzEh?vZQ;PLWHa z?(eZsUreNy=3rU`Sg)y(7>SX~3Xj0`N+sF^znysr<_jHW2IjzSz{Q~4d+t{x{}2As z8(n(ZU}P7CTp0w`(E+LldfNbQ=~=1Q!qr?8ljxKVvU|i*Vra~LP zQlWix+Q7D~fh*P@hbf+Oo-F^rD5i4!98(3U4yZT6W;2Edd9ZXvz{m;p!`f`@?v56t zylgX8x@0LXShpzL0j;CD(IDF+HAQ3_E-F^3t|yMsHGwOGlW)P?)6+o1E~sMdI0=pC zF8YetA~-2uHWc(F7<6isZ0>tPKH{3JP27MMHMMPe<-i>~n zL%DW@wW7T_0mUsC9A?^Pj>yd%EZ)s1PRYaCD3_1Vv7G7OiCT{em59xiegTJqA6B*% zo_ru;HFtF$fjWcc6je0x+5o30p`0FnQRX$# z6tl_3_BZ*m$#+b4+w?|2FcGRrxm9yQ$UJdLu^ub!L*iOs0JkqQ1nb6y*b@$3SS@H1 z!io=aK!rPlz*&6tFnH8zAcxn%?o~h)on+4St}-lV<1F(DBkT4FmQ_Y?l!d+Gk=&PJ z|6ua%b?7^JfT>eb89Hft;O16z^aS(fHRiu-gZ8>B%6^s_k~FOLNO`S0IU2lH7AjS? z7wrze<;WSIArnG0XWs=;WhV9F9JMv<_?O`+AVT*f53nwWhLGNA;W2=Jy=L(q1mxVwVJpSu^& z4$#=D=|9&J$7(7cySGb$|6H)~@xjXT%oAax$|m11AaqBuMGP?JcCeFg*Yg>IgP)xU z==%q&nXF*eOQv8oAQ%t!(Mp%lb&tH0QUz%9gxeXvpbvw+yN20Pb$u<=W+-WCU71tp zwLa!vqLQb^kAwCbeH2rBDrNV!%8u=|8}-_Vl6u~1`OcZq=dsH1Cp&?x41N19B}eeZ z*??#j{}a(-j3uk_jy-Omtz3?fOX0o-s_^_R)>~ip(PIIpn|$gGyTUe)F?Y$RHqT0f zs?=}HBC>?4}ghKCR;sR0LrwpY+r-ti<-IhbzN zdxW+5V7$ZS$WtHZX*@m>qY+73pJB(9glJjKGWBCxl6lx~1eVTxya-px`gM=5&vy6D z>{kI>Vj`qzl+J2oYdyXupBOp!9b3#8r>Mtj)%Oui48}onpbsLX&w4Lg9Trv|dPv9R zV+FC$w2DjUfjI!ft|6qi9g)u0YrQ9&XsA3jWpA`#SXPnsoNRs~6ub;FbF{n|GRxy1 zWvC$~Xhhyl9wJ=Rc0Kq;l{`u}h=xJ`UfU zR7dt*j469BO^!rl%V6X5eE0KzuSdgB>=v^vS+Diz{0EZ*N0j4f*@HFnUhe2+RC9ra z>v5sr4gWue8GixsOiKzq7f&RiU`<2OuWOOS5V%T1@s0^sQ9xildnl4y2I0|1y0{;1 z_gq+Ij*4n~OcjZMAe7L|dY=PoIcp9Z{L0&k4?v2yk7NAIT3lha6fFHurss1w&&UrJ z{H{c`H43U8>g}-pCG?7VI}rPRcNX}&puc|{It_oayyTbN;C^+#NeSSWLTceI<5Ew> z%?JD6u`_v_XWD}l%I>sno+gg>L&g|buQ|SV+`o0gQulsogU8eLv*><~pDyi17DWlB zZCfaP$8n9)(RO-PA2Q-kFxf@}$ygB3ARX581XZ-Fj}xC@=c+=n-WL`B57X&M(3Rdw z&%wt?n@be8{PZwZ-0~Bv_}d*nrMJnUGFZhadm(t!6E9{}vy4U207+O{vdcH{mdEfK zw< z8s7!xXbbnCMo&XvccPNdN-}+M7P!%v!N-vS=O^?q%>`fe6w~F@4-y}7_u&wwusTG!Y zD8{^iG;TlZJf`v?t*7m&ERJxGIa~#$QQ~(e8e@A8C~s3#gZ|~%dhqV9-ZthzhQMht zzvrINLGPR)d8k|3W!+bSzI%>zL%`);-~yZMEf~`t)iKrPW`)LPi4C&5EK;_*yW`8I8z8>RSex4IkO|K|dQkcHqUj&r%i@d&(Z>hy1>8r+Y?4?l z(9Wf9*qV_tvZ4?YJvNecNX_(!JS5+_+J>+8+ zP<2)<2G*GuRJ!Q(E_nrFcDt}ebZ;->X?UPcB}HbH6%vJ61MB-wyuMWN1Wk& zZlxT7o`w8Smy)Q|3A^nSx|c4!{e_@dTM9sMaUtC6b;IkvlNTC$ek=u&p|1g4QQtGx z&1kU5Xb1Q|6TaU)Z{@a{T^4nUj?1nUo;6e+q zTS^6E$R(PLFU`_90tUI12f$9zEDbkX0@mo4Cr}8&hR9w+HaiFqz%i3rh}UUWaQD=Q zigMpZx=7!ii04Ep!dPRXb9|^GvemaNZ2f%i9Bn)y_;So5he7+2hc{|n)`@bM!%Ic9 zCQXdH*qBN6e1$2S2qTfX`p-*tf}x(ZCMtF6M-zuZ?>+wV2`k&oA2w1VK6%Y3TK7KR z3vqa?miB27@$Ps@{N>S|doC|{mN-~3(*LIR;GO&Pz4p#lM&`FU)!^swsvqP?-ZBx7 z%DKaUn|Ymnx4Or;&fGwD#C(LPius^?FQ&3&Z%TER_{#QeZ0<6I+~aHAcZ;})1B4nV zg(>|d7S0f~V+VO%dX{3gAd%j9R(-Q#5&iM(cFE{OB46YP9DJ`x7j|I}<1~aV2)X~( zf3EUxugkT2Wn|Ht#M(kk8%GZ76c}6EhiydTL~z5`lBMep7GU4w0{hqp zJx&52v;+E{nIiwf?3eE3h&QOcfa@c$58dMv;ijEdls3+C+m&^yx4zF*cNijGU>0fr zaL%$_RugE0i)b`q?GDkx`u5NF3QsB*ziz(?4WQ@1pGRYkrmxsO^{P`FS7t^n_uPGW z;TH69atfDn9PUaY_`cpH_927fzBxbk85)jYW_K_3LG0X2l7#}s_ml@ss}rRaSRQ{d_sYDO;LMhazP1#q(53Hg zT6&`|V{aB-);7q_&xi)O&BoL9yI-B1U0_W4YtHxcQ+3bj8Fh};3j1E2-9{2$(} zJjUP^HlXTGD}{mw`8|RAlUL4>8bXJ9CZ3lm3LO)ZeuR$>X=a%V+RBflR;gspZQfU) zvS*V8q#y$`}HB&cS?gYtx)=Mje>9Kj=aq>X>C__l2H;o;uxj!6Ncd6FP^~ z=s?>#uT+tbO;;W~AUKHDY216gKB+2Kj?AQ)pYBqpNTOq{tuQm5K!C-dT?^Nrqt^Y>%O1setxg# z^?Lp|uQ<##$8mhWpZ)v!yx;X3KbKFrJntC(!ns!lESbzcE;Hv{ql$OdLF1ws5)~e~ zA{bBL_;B7ecqLe(DH!#+g-S^?OX11oLBOc&3V;sBsRr@hEl1(4M20j`DTtI7WG4HZ zX&WW9tl}?qc*E2l&SX=VE(PIt*l~p?zZly;?YJ=Lj~Fwb3$@*@5OIPdrg30*Re<;W z4(xst5DrM1{6z8`(#-1Pi%m4rN)zfntr(rY90ty^ep|nH>kOWoD5Eg32kt`^={org zmaHnAtz}pOtGYa7bO_=nWlTHb5)odvyaY^Lb^WN+05)f%$xdpp$tK@68J~W_`vr!^ z7wShNEE{%byJ$79SY;z(9@PQ!rBc%uA_o(S)SW8gwkF?$L*iS z+m@EpMxKJvSB&3!(7%@W-uq&ZWr&wN$2p)hRM?Fay=k5Nz3 zS8Yo`*0ceM0Ye`*I&&X*l5G%h=9r1aog_{OdWAJ-BIRs$Jc&-c;+^_C$-2#I@?VUR#N`EKn&d^OY)Au^iSs6oaf8&5b*5 zZ@^^M#*9?+H%X@{l38())h&VuR0t?3u0Q zB~)eJ@#pEcij@n#Jgy5_Q%;E_lZQHw>y_ND5Ee9q@#WBb{D4c26o|DOgNjfw{PRdV zog@zlRZ|OWqPgf9YM%lG;cJU;7(q4wNd4K1o*0zPC@*|r)o~q>h<@EY)nmQSvKZ*i zd*1ZaK>9XPz8*c2(w2>^m4P6a%T`+)Qj%9OqeSxxeE>?Z*5T)9tOa?Le;VIA$zvs_0OP+u?4Fzu zZ`3Ic)%<~kGZx%^N5Y{lxr|DVGS*0L`-4FestgdhQbU$!iG{}7bt&&i`|Ge`U{&2x z)ve(zTPtit>$Kvb9SGscDWF#N#e3S%2$%`pznnrJl5BIE()BlHI?2%Mj>}X3!A;Lq z4qteoN1ZmddP#WZ`n>UwCQ0>d6$CXG1PQj_$m-6DseFquY5eKTKxU6^4Mqczmd3v^ zBDAh80uhCtqaR)x0_lz-MF9}wgx5qSxWoVd8^*(ee_}liKuf>9sD&b!O~@J%_D7J23w>AO-!YxTAom(FmY4dAF;78F3VxG)J6xr=?#z zPpAm9q~)btd~1a3+_3B}p^x!Lq?R4kpZyYZ(sv>E%#a+W*<;hz2e+FUM=gI;o7NW1 zIU%8*{vdC?wdH}Uqw{>Vge_fgmYb(Zc3WKxbdc@$!MJxv@9}>gzx8&-$)!aYspVyh z!k#Cnssq+$;}(mf!WRJa<0pKr?VdjY_M`}5FaEQ<4;ydj*84rw5hPrXs>JwrSRv~8 zCqZa{8c8baSuUL9g&i%rSdX|;1htgAQ5!rck(}+dB8JzRPm+qp;N8)8Al(cwbIF21 z16;qzbmgtgN%brO0%3d$i09^;Bj z9&xBfU>!ML=X!`YC=9ztP|=|KKPvAZ$GC~KeTckVj~#ImlD}xOjo)E(hoGt05mk9E zZ;vwODF4eI-iL%8o#N+uHoOV%HW$&=*IT6L)OWkRB1yB>Z(5jExm?OMvbb3!HOk{k zfVoiVp6Z1KQ<`HIDoG8ErAlMXP6<1;VUS?S>QYmuZ9*LYIV4$1B?{W`+XN67#2#Mg z8PQ*TuagL8gPPWUbAZm3tZ$4s-cn{9j4{_+=p>kbNWATs3UC<5#Zwhwa^_ileb_qq zVV_S0MobH@yC4K>w+b30ULy=!%BVh3-Z7B?M%{-eCSnpSI7IZn?ZKVPPWE~YR#%sJ zPO~}zGp94!a9;;S`RW$2FUQXmVPvsNiPi7DI%Ce-N-R1t9^Y~n{^BQycr5|MRKEWMJk6KQ+xGduo|ub$Iu$!r3icy+C>A49rllx%WBeHjnspJX%uVNdwmuiS zt|{g>@7TF38}xV;tK)SZz1U|X0^RIgPXSlVxn{j@w-8solUW3hYLk~++u)@wK7BH$4P^q;?dW1LdYf53P1pY(uC z>i)hN5R=Z?yf4CBm3q-^p|^E@X4Ao1lhp^4_8v5|D!gNlvn<1_<){xc$x$#0BurBW z2o2@X_nCcqJu)?wx*siGUzJzOmLXa5Gr^^n^UL|D8xsmvj;I2qprM_5nH78XZ%n+@ z6|#}H@NRkAB986yX&42O2x6IX4}llq3$+jA7SCl zEu(vo{QU_?v-w?o7!4~&%2`qcEo#(r#4@c~>VU_$XLXj%NR~Bg7=Lik4v14O^fOr4 zQ56=ICm~OFR(^l|;$n@g>a@FA>3<$xa4Az8ux^$h^>*hB4vupaU z{t_w`3P`DH>uL%-5sMGUw-GV4A!)_Qe z*Me>7V+^8!d~KK$7e(}O^>#tLzcHWS>>7?tl<`k9un?gAyek~NPrNP74BCnx7%t@< ztT55Up8Mo|*WOw3rhawDjgm6Dnlb&e=5XzuzcKg`{KzQGSZzj{Ll%3w&G3FpoasGl z&N=tNwy!KeengP`V0I8xc>+`6h_3_C;fT|~+S$NdoGP6~<6lk!wYiYVA0;y4xC++r zI?`jy4Z)xUTY)LL1!BS1j^UQq`I!;ci0iy!C0_f~u(EH1o<+83_-FGO z1;3<*7pATLS`6 zHnrO$k7E$vy=h%p=p+1r0CX1>>Vpt{H|T=wnYVkd-62lz?SMp87@eI-6#o*>;oqjS z(ob2fqJD6F_BSR`8ECh6{T^K_GK>aGnwaU^E$eZ2B#rAx&w zBY0HmUZR~H`lc?R#AT{QId0i{q3%9nplp4324+6wSUik%}gP^`LeHOcYFuiE;v`HBfN*DbQO1(zw2* zlP|*|pi`t!r^!-@xE{p~{*C!zYKW6>szLH(lYG1j;0!hwj1+d_jUrtL{Qfkge-H`9 zkUNn)a}y7Ug)TxPitfd9ymZg3=&u<@>N}z$Z+%9hUI-1zvx^`l(ruMgW#f00^Up?) zxUm;ReJ;Mk-OYATx%v6|rGO(ww_i|hu2SZApheK}B36&`sDirMMqDoK;!!eD;%FS5 zuUcgYf}m8+n^Q+cMWWH`vX_!VC=cr$(PuFGC^IAXURmgvv;#`R%)%T=dX~z(O{v`q zcD8R`OWzp2#@RV@#ujTk{tAc=D(vh(dPn5NY@c3mBb<{iP08*i-kj>C8a)<3^4a`L zIeOljYx%cKaEFt zcVfut{e7uxQRM6R5+P86ny`g15Sdv}&KbApyo$;DfY#OEJJC)Q*hJV+hk!%a30sJt zeAhDZiJKDszjDhk>o5vdF@#OCEI&95axlp-MY`CX>+$tC_czAs7F3Hkdq9K)Ty9P< zCR1Qp&L>HAXRT@t)hMc6-@l>8bx7Oh^i9{Mlbt=pHzULE(XYB`PJ*Q=mHA-_XF^7I4g?DEXUzovcA3oL?;2|2yW>x!PmgQNm@L%4wio|bzXv7g%z@}KkjB{AKWdZ-!Q@uH zcIzT2W=l0daR`ju&=-6HX`Q)Hm-eOt;(%T^acBg_*tGRo@(C@}QN>eK4)-#wFciy{ zkyPQRZbu6+SgwTvesFI}DBB2XV=4KRVOfYUR?C0<(cCU1=$-oCK_Zwm5cqvC5dPia zyB0M$j_uMXEoJy4Pwclx1(tvw&iJAVl7SN6# z@%w+LD^rRmDo7w<$e__XrHjT2BB-wmjH)rd;s0=4#(LD`@C0$s3D-~ZcMQMD}$SWs(e$C2i6+JI{&afpZ9qPOZ%>j50JpLfd@ zs9td$H9{kT4uVPX8T^jBMr7FQvIiKN+%f@1#=uqDPI#E7L}q8R^~f#2$RK>4|C`py z5BKwC;L$Cf_#P;u*s+6iK!5x#VE5l8@qH*g=D6plDr>6m#odb)C$zyJts}aL3{KAh zU#UHOlBNiEc!D3WXJI)^^pZI)Mu3O$ee4`~R(69jPUcye$gBEOttQNme4iuvr+>v1 z!c6u$HtR|rjhRE6TI)kvaAu8X9oFq#jxL_D3m1@kWq7Ye_%KBKGl^TsFzD^p2%7xR z16fKe+f4DzN%X@XM_L#}aat!GBE%fQ2cfxMx_N}h`GOxQh3QZi=B!{08i;qq)+gms z(wi=QW$8{`>#5d|?%;7VPejqIpepB2YB8Rzp~<_hhM-yaVFLBzYmB+a>UcX(4EZP- z7LZ|01I#;O@lGZ_Nb^ZpZmvdQ;qh|y)*LrD1~KF9hioKu70SW!c90t?&2bkgjL!ZL z&F~E8rV*%+x88TLX3x7`R3`gHZT;E5O!AKOxX+%tJVUmP=ipB}R${vwoe!li56HQx z8xfX>IZnz6&)81AK#|e6*OfYYdZaJeiK!F$S*~wMfH+P!1Drxo9!VYUy4M07#RFFS zD$+r)9QRsjTnP}CJAm9EGTejWL>XsMUr!vj9mYltK9Q6_6e3Cb>;|UD7DiZ*n@@CT zci|D6(?etQD|BX3FRU+49nkgVsZ(J3t&&?QZ$M&CU_X*~P0UYul zc!ouP2<%Pxw{;MQo-gKk3Y$|+>jxWGMCfaf6B1yR%KEawJ>%`X_$)m8K7TJi5>+@# z6GJ7uk^Ja4>+3FroISLZ|EU?d-~+YL9~0GZ3i6qfj_<>^G=0p>yVOdY5nr_{E5~M9 zm`BN@A?i*1Z&ENz8hD23C@pBvlD7_KrKxO^LSFyqPz}-_0iA%AFzeyZHvJo8_9C1= zfB*^3y1O7vwBNqxcWDx^DotMh70_Q&o-h5XZc0Xt*J~+kFOEMh(Xf2*>`nsDXrv9x<9`#Hl%eWx9N6XK9N1kV z%$cYvml=dYOwR2>1cSg7vL2+xwSU2I3KlqHj(5dk3$586{l&P^?<)^_@ynXOgcqHT zZz-oKBAq>3KyAP&$`c)tJlMa3IA!vi=<prd2)s=O?}b~OE?Zx_h%YN9+sV&+gFTlkRQE1 z?!ARrgx-2CmfblVQ=#kWCSB&DmK`5sf9aJt{stA)Z?Yj6!=DFtH-Y>6e|7)tbHw3g zPw@b?1ehsE7EIOQlTIwVWZNvgb-T{#+_N-igaqjdbh@fg@>su+5}&&+*nJqpmh z-;9JlQqY`(Mat9Spw)^3oG;U@?X-R~CBJ{l>3^(X#GAxS&62&+!{^!cUaeH@yn9J+ zrkO9mt$-e0({ZmjOTS06CPUg~!=sKpmXDVB?(zeeSt?kSym{ynmpapH32q)q7nihb zteB-#SD?Yp?rB3{F<#Z*Md&OXXQ>P4C>um+UMH~7$p!$5Js2z(*e^T@u6{S%I7Vkv z2!8Opz*6h4B>@H%wbv0&N1NFZ42`yTmsP1{)#kfrO{MwD}YA%A77 z7or!qtie#Zt8;tktvHTn|fh; z+ld~Y!SE4?4QE4w#A(q){ar>WYp5TQqi(#4um>jEj+Pt+$z?cd8`E~foNPANj zlm+*7pa*Et_sqDyR)}R2py}Tl0W^KprnkU3t=Z&uZ@f#A1#CAEGtMu80E>#{NA-M* zh(lnw+7qOcfrb;GL}K3eZ;4my9y?+aio5p!2;sokJiiaqXz?_TP^wT(EpiEI-}4^n zsUq?*72?3&5RO_V1%pY@0B#1Cu(T40yrZnaCVW`$#+RV0ng0V_$!Oqjz_=J>_4Wzz zxZJjx!9rqy9=*nWgQ{wUNOIyeA&-r!{;S~#WbFOVBh3$q1FlbLLp*#mp;d^gw33x5lWkF0;HS3E zK-JiPVa*RC_bq@Edc(5;U+B^5#ubMkJysb#oOKYAsN<086RV-lUksFNtwFp9%1q>g zF~Y~t2X)$}Ls;_j1F=(szh_qPFa7|i`sF47Gl8!IFcW%LAK4wrQ=qj+dQ;GxF+#MT zfR)-MfB-`oHN-<@fF6J>b;xtTbN3*n&)J=5mdYq32(IOJTBc3_+p^Ot_H5OSS2n&* zLh0Kc);a$`zYrnj0gmO0F3J!eh*Rv)=p03))Ci#917e|l`v7pviQPeFb!F44_rdR% zp|34}M7)I_Xp#7rIO+#XsHW?TZ>YF)Y`ZXb|NK)+Tpwegqj4&pFm*X}BGG>F5@SR8 zqWAfomrQ0^IQ`-{2yx_h$iSW~1?bR*ge?{3{VjUi&+|?&_OogZqUl2X#UGH8P#WKZ z_}NzdZc6u8A${`NNQ9=fhqLvAiZ**d4qnk z)G@3tY!c`!2HNr<#4g>=B=XN6ftWkg{rv|XsPUx46O-28OeyLkG&>er85S!q z-fEq^>vnO3Z}lq^hOhSdSm@cLA(P(`QWR_?8)VNsR2v;7qH}Ym2$7y-{0cS*nE#F;kzUA>2m6=B&+vjA~yO*c_jEH|zOMLUQNw zdK`!_Z@Y=c-pFt(RM)P$YD8tRy^62$t9;o8-4$th{$6M4{^+M(@!q_)ZG?nOI1B2y z0gw8MWaof9)=xt|!j4~`{F0=peb1kSnJ#?(PeMTW(8RFD)gOddZRev_72?d@b#{p{ z=i*l6dQ_*$`OsU2eN$Nds6?ViD^ZyV+XXl$=D5@!rL2(kMhWrw%@ty&U&a?vyRO$A ziobZg#alJ(&Mq+B#%womRsNch`m&`ISS3MA@= zFtr5{Ahku8FFHiz0!i!(?jSP`xlxQke@+^{5#9#GDNiM5Mw`ga z_eg&th+0p7@k)`56^9#d$YeJ9nTg^UP!>9W9(hIF(Z(P90Mwb-TG(ELX4*%`JY_R# z%dPr1DVIxmN%mJah-f74;?Vo-T5M!<(2P-dGtuZ1G)dm_s2(pqlmm<&&K}%o$@l31 z8RakJUj&7@orB@uMWyZ7Plz?Tef^&!?`)8)56``*tm!Yo>(DO0u<*=&$W<0HG18Mu@3|E^{w?%JbpLlMQK=DVp z;cF_}D)i6FtPr-7-mqvDLw5-P{J3VxM_it?%ugrV-}$g1l2;^#R ztWJOWC?bc6gy@l+3TY@~6B0;M$dU0rTjZ0LkX+=Oyf){Li%%a(&U_Rx6X_#`8y|req&(5HmSGIeusv$Z{k?e& znQGZiLZ2gLvt0&LhWlar&UyC?JFzpcfhI=#4|*ILZapx9IsQ)Jy>%)%_)#uyi3@W% zDup(aD{E6S#d|dm379U?Oa1XJnB*FL!qbY@_D z*+}za+$=cQdG+J{L0t2e5%1Z2nE*LIYtk)zvuvqKC#|V%Jr$bqN zVwA$3X$EABer7zN*$pbXz0X;BEB@=Z&*59^k+&L_ZGzqp@K1*$j?F(k$p3Pn-QNS? zt4Dexm~PwIEsV8mnAbApY>pa!*?gs7!~(<29ilo>uYum*dPNxC%m|qIOx^^^-I)_7 zhe`jI_RLXIq%Pv$#XDw;4~H=pGN>1iYi2%LcIH-JX#*uU+;V8!GKu60`BfXrvoRzo zq&W;H@v3|{C#8fdu%=bIyy-7n48iQ~!3~dFh4n_QaiJ@osC^XAOu&$)|0>nLTJ%mJ zRxrm2!Q$OAn-PX&e6!!+1E#pKH@(y(yaypUBe3W!{?R$O{>!sop?w$=wKdTn7Dyf(CMUU;2-F~o~(fJL_uWa4#1$i%^2 zQ42tP3r{+g!Zge>!AOkO-3woq0%<9rdpMY_6vUWq5Mv(6Bz+h=NjX)jP|QF3)-#+h zo}zl%habgtP4gJONb7%OJ~K?lI7MbneRd&nL>QR4mqHB8b@uZ#Vul!6zr+&^}6 z7iWp%KIsD(8tEX8>1hI5zF&#P(%9qag;zY5HlH2Wnz(D;etz5viM9N;J#NFkbRHKx zD@hsSA57tz^`O29U>sy{A4iCTeY$*xlkn8&Wk-=-E_HPHI?C2Z92=1GblON>ti&>> z(tD7De5G~P5xLEkdU4cKWC32C?*a$;*16fGn3*u*D*@ATI;i3_~R|GWE8Su49URjxV zanhby-r>AW3C$}3DS>J~?+d*olOo`u6ArXe(%)(><}rR`mn>vbA$%ZY$3@p8f6i3h z()ND)70bm-9wU{X1#5#Lh!~jq74ek$zwexZVAOfa&dh7r8EpF7OKor0_AkUW0BF%zL1A;1 z(Z2P@+h^=ut*O$YoIVA`V1pW_S- z6oxNQdAxNH&;|bxTbn7l`&z?H7vcJ(-A37$QLdanO4sDiT?~t-C0s13DDR-1eCXJ` z%IB<|Vo7UCjABJ|TbS_7i7QNM&Vfgt$8Ei$| zK0sRu{0yVeoV26mN{ug-6UpZd)Of{Kn4b#WKSf@BIN)er9jR zpVVc^IyK{Mv*v`6`&JLvA3mmX@f@pBOW32d_4@4L`>pW03W9YC&&;()&E}MV0-J#! z@xP5@7rMzsjGW9uav|1{T|I2T1(6b6@%$Y09TMdiS*bkI&VQ)+HJdCJaoaHgqx8W$ zvAx1QMk`i)skowC-8dRIwkvLUx(KS|1w&MV{&lweU{mkHvu?OvT62bT+s;`H(&xGz zxcoaV-}g&1H-3@K+MgG04c8xWyVknQS0oto5DQJ7-p5YImPSq47UK>;16vk4zdT{u zeRcE0*k#5^FI8=(+!BE}shOJRNDKe#E7R zrfsqQV1f6i_g$>fxs>+|Hea3%>n?iw(pTsr>#-G1FCCsUxZY6qGBmn~w^v5�v~a zBj|nAPtziqegJv882N@hbG8|Bz{^I><5TA;qe+zymIOXJh=%KO*w7{WH^!U5dcv4* z$x~zaX-sjhJVA~WYG~eiZWA!9G{L<^F@Wc>OgnD?i6acwNFLZ|5{zCznqxN{;z#ig zOq*p!Fkj}QjGjeHPD&PKWkE^LNmz&b9M!cuVzmn`e_p~n`-UGCQ1oh$`V;Tn&Lm zsxO|X15vTjk^@iq5m|E|zrM0T?(<$hjn5A6S)D(13-f3lVS<~WJw1SVb}{0^%2;W+ zZJq)WguIK*gIfT6OxN1Z%1<>+pf!40xe0guW(r5kiD{0zxR2LIT*G96NfnvEq)K3= zYJPINL+wZ!j)nNv56C^v6Gx>m1Dt*6;z&?^RoFY{dt9fedwZ=;@WEsPp_J4-^Zsr7Unh$7(to|fm*7=G1-M) zFka%blpkj{zZ12vBA_rVAm9@X3}Ih~rl`ae_?^ai-zjG!9tW|@dJ_1(JJma&k-Uk2 zbO+>E^B?6g6Yt~uG9lZ~YCKo#XR3)RYqmPH;#&;fyZ3Y=L-&U-ao3P6UevPK^O#}x z=55=vJ}l8eX%oi6a^OK*a%+0l(uh`5Ek|7UR;25IAO5a&AYRxE*F(XQRw2GErYpV- zbQCW<620N^yU(lV4c5L0XFKW!QdkQhe(i)1!;U5{W;V;4faX;|Ie6~N#TDYWkpd$U zG(X*04pBP|&xZlp+d0Ze#Z4%UQP6B3GvFsaQ^H$*y}+6?6RKx}P~43}mEN=@@g<`P zd@zzd^tg_Y|M3@VR;tk}`8C6u@J~x2A?B zxPXDf!|EOQ;d;TYy2aFhiDIoP4{w_8sbZv)M%sVhYRct|HfQhsUEXQJ7Rx?=d|cSh ztu_R@h1Gp#|!&`yDm3I^i(L}RXMF7 z9iHqXKHj?D7IzefN_=W!hWk9kv&NBVzl6-#7>hM>`$%tz#e|DGTc95x((AV9?PpY; zu<&Qa7Ep#@S6Ih76P6g2VRk-xlv&j`CnNpBzRXLfTl!5bWe@^c_V8%Pal1br4Sdvi+>Xxe34T zc-rNBqKt`d^Tq`Tw(DB-rngJyJhgh6lTigbFI>41nAc_8tD|`);DkbWN0=oSkfX#I z-5iTcLo#N?dM|aFBcy1{312lZ?h!<=s0>?Zl`Gi$jR7_r<6XkjCl=2(V{)~?WkYpPWhLRN5G&6dN2NNC6hw3Z8dBgKq)%4n2p zih0mNwbx&zd|ip9z@r&7a2nPjKKSg;$E!fqto1~N=X8R{8 zCTK7Xef>_xxK`W6>6+qPn|sP(U`%f5r(?D$LqE!LyNU-Un-a_R-``zw_OLE?O2MKg zyH>*RDfSVF^yM6){)TxX&QMqHiKO7w{>!WUOu7+AJUE;A-9#dEX;3XDI}PgkcVf%S zw0;+5cvyU30D34}8|8uN>!}PlL^&#QsMXJ;0daH-Jg>^uwk%JI0KanwakLH$qWZHS z+dC$>cRQ~5uaGb&5r)@rJ3Fd1`~q;)1zpNAZLvDpzq+Dtm&D>@O~>4MWff%MEOAtM zqDdftPsrO4hIb(&&YO-DXwW80%t)hYmtvW>!jDf+_*$b%nWo`gOLidN%%O zLB=SV*gkB0|DI&X-reeU*{Mx@1TP#<&DqnpnNpmy3>@-a zo+i^GpXl#%EuuAh#ok5ernb53G`Bq`BU_0_9y94HW2aom)Y2X~YIk6&teB{opOnjO zDNlo34KY8(_%Lfb^>vtt3Y%ctu(;aCCWO`o?L1?d+9#=dQe$o%dzo;PqZKQ2e%}{~ z?k#PNSyRXj#3^*tanojm;zRtt#5UqN%zg{?p=RQbJ&HqBGMg!U*=vzP{aI%#OS1o? zMX$%WNAsYx=YRV+v976TWjE+}MGS3kdEZBdRUp1ae|+ipqaP^3JQuD7?^JD;J7Z-1 ztwJ+}oRW|Iv0Q54LEgn!?1lG?6)}N-`um)s#$tPQn8^>JWpYvfK%!x6qZyZ z_e5jO{_K<{q~(0h3Z&w?m&K3e7J+_bs-@asZkKiwVR$C{1J+FXWy$Dp5{RWV$ps{@ zNZxA{5(1HGq&f1YTV1$yx%F%~>EhSeh+FdP$MCwS!~59fE^WG(jQ8ZIFxt09p8be+ znMHAI@W;#j4~Da?1oHXpKbKEfW$7P_hfG$9Lt)c5nri1EKjeM`a-8DtTFzT=?M5v2 z>}~}ojqIt`jG+hpSnt4sB4eGHl!UjLD-<^7C=udxQ=MKYtmMYph*bEmOtK{?w*F-< zYB8tRxF)hxZe{TaCm|76IeHXf%1DBe)JbE40&{tr!g=zv?%I(Yo%aW-Qj=V-ls9Xk(!-NBsbs5Mq z+DuyeUGH0H{&Fw{1?H-+x||nUtR2@n>^j9ar}{QN%vgNfHu;tQzFEx`BG(KLYlg_C zznxi?jcp0a#jBC_`+sqh=!!U6X|KBZ06G)3^1?InDJm*gnqR)aFiZ?H6y1)FG_m*V zhJveWsQeoyd4`V8VA)GQK%91>^Zo;otyfuEaeq)*$86v@{r|4BktIt#+y>sy_ zEofifnGhU=(aX0BsZgZ=jsD`Gg_Ha2WDFo6Oc;bZvRQ>Q}DNH>xtR??2DApmg zs!5hCN0NG0Wn$Z^sGqtn?ajsbo{&s@axAvGGGrUG-$4|1@FQdM3E0d@rZkCgZXNd& zPd<8ksEMP~-itP-8>uI&JndaQte(;yM_M_b@ZS)xrewX=d{!5fv_p(ex5f5sDc<-1 zugEU3VBmtjW@I1lJ9esm|4m2oWd}pJ&3lFoc2v#PXe1U(l1)5-500^TtKB)oe~}{@ z0J&Vk9W&5)Ht5SfVMpT0crbY%*5jhqMp4xqpLC3AIdNw(@)`njKq9R6K1#;vCO ze_%9kjCOBW*_P^!8)fYB!M*1shil2Os{;JqW<#?oaC2BU!J;~m0VkL?zsSd z_pPIFcx(LDayu`kGG?P$8vp?q5Cx?gEVSv(ZF)surHq$qQht_d>;&^fFr)KiDh}4(oG|zGeU!nG9=)wSHe*oACrX{BMf-qc#IyZGDF9zECG4Y2#Ep&>NXbZXb=A?7IPjzyvSn_P!Wcac* z%QtQC0WQ$_Phzxlk>f=ry#;M+>w6pU#vCCZf*;xd#HKk0@J#~TiO&7??rlj4D}EN8 zG2&u0GOZaZ9)`V_TWr5Jaij>%I%GZ!OX1rqc-lj5d{HftFZo7gT4e$nHjEC;C`ov1 zFz$$C#X?t22(39K$gMXc8OJ$}k0Q^Ah!B*RCr*H+|7Z%dy)qngMm@4lON3*=zp8+L zLOmDm_HYS)xdY8##Wdv=4F=H-bs>VH`vX3jLskt2BW`I272rS5)n5CP6)SeZq%)g* z5~5^BGwTax%H@YW&s zLGFyS*Xo10{)$9leE)(Y5HX7oeEYUqNX*BR&DT80C$CLmiAHl!svn#b69?Rkt@-CI z&autyI8qugA5FG!qtCV4UqDYa8!7!Hggt}`t7WGvvSzW>n=`CD=`G1-AOc3UL) zA27%7PcA)g7itw8Bg+vsF{<;znKMhGH3Qpj*ykjim_38i@(LIp$d}=ueADjPIJVXP zPCf-K_QNmL60m-p40{3z%>(=h=)ythVY1i_{-_qm1^fOe7W>v%8-^oQ;~v0L_F(yg z(B2F&i0sx{q{}l19R&q(5*d^Y7BGqgY4zY)#szSq06qRNP7>)!;i!QgLJj^=Qm@5X?w+-2rio9ZEr$1Ff|MHm$|>R22n_(j^qF7Y z9LHFvbLel(+iwd4J#cEjf9ThR5rWPUa?IrAnY=I<^8QM}kjaGQ37@=8=C~WD2yi+& zX|rG&Wa2heh~I!252U^Q2ORqgUR)*Ku|AwV|9vGp;ocYN){6^p2f6dsPMKNkgYDlb zn8#}JOyX%9-HxV>>mMU){QwC&?oXzNU?q;*-|LOHPc$s(-bUK3*cO3b@$0zhu6t$5)q|_#o|^gl&5JkH z9*k(k@@9!|WN3zZhGR>cN0WTbT4I-paX<8rq{P6o)pj+p=3BvGEHb=Zh~u3taDgy= z+@uUnB$G{lg=cUY6p^>zo@|FNfH(+MR*w`K$Pft5WQVHFP*9?RyZUI4O8F+D z<%n2%xacQuhrMey&Gk5uT5v*~3aqzp)gxu2M!~^gQ!0ZapccBlX@m~6JCM7;6{uNX zS@L5OUT6>6763;6URd062={QF$@Uk>lK}($Y*&|rjl_k z4w_d#H7vL#bR!>s`Ol@0YS?dR&>ozL;_N4jQA*`2c+H=(_UJb@)1BfxvnT6ugWrT; zM^r2BB3Pe1f@>Rt9CeJOy&#NSRlVsQldi_s5*fG)X6P4{{0Cb_J56hj1msO+gztra zodX7HlHoEK8R@;mHTy({rp?r>IUdPN66S4F7>wKp6EvnTAU2#Du86!!NCg=b7b}_M+Joj3W z4o`JC<1w#g1?y8`+gEOd_-F6_jl?8q)=c;>3@w*B3<;P&@eA=uQ~qDp#n;ufF585? zQ94Js)=aTJ%HHND=NYf&ylys3g{LBnb-JUovRnbg!~$pmlQIO5d{aP$uecP`U}&~C zgL8JQK!kfo{e=l{q`w|!crL@o22q1%{r`4C=@T40iQg2CFxcW_$jl1{!KZ)t1L@jI zZ+bsS>!WgYKcVmOGQGUzF87y2GWJ}0`(Sy@_O?6FWQD9^{nuvh-(_?iyJ!Q7;G5Ez zg6xZyC}`asSkJ==x}#}PWEV)d@@9G7+Ejn0Jao?7h4ReIG+E>&F`0(tEo+?0*m?O* z*4-lypKh2Mzv9?GFcn4C;(xt_(PdO^tpiZv&EJEa zaQt`(6vdv-l}77!B9H%QoP9Q1ehBH)!$`Pis`!?Sxb#rjzNbSWq5Nh#%yCIb(H81k zUAFI$P&zv89<85>zJ@0333>;%0MFQlA6@090FD{YT0p(wGKAaC52PY)_H=gH!L$Vd zu2j;SgR@J+RXMo+9YC8kF+TW5$MYau0m)M}K&03Qx7Xr|^=1L^0X}|duHGh2oREKe+2tu{qbGf@GbV$oS9E+2%G;bJT!RYkk>FY zh%5e6xB$JjKf{HdVK-DT7(Z_<5#sj#FGKB<0*Xw1{N=4k-on)ST;qg0%snq|a`!#I z>=&1|3i0v7ZwEdX0TLyrPX#?czEI(8-{YZBmxJnf4vo7A!(SEg)evxDcW`Zx(p>LO zG+&2zfgF;_o0YkKkp*v#?MZw#Sb0K+c#)e@Qla%l$mlLS7R>1|u58 zs&vGi>`#jxqh1yH7pYgZJFx79I9{~HWr)gg9BD%J(4aU^E*!PeDEb@o70VZeD12DZ z-yk?lnoH!JVUO@X!_lVa=tg1+!uW&69%S}&;YregHBpas#|MB!vf@D+38s9B8XSV3dPYBdJ%#s|0)T|HBbFF6Snw8t z07-xyGM5gQ?}0$1u!d=R(7Dt!paTFSFDU$IKm2^bJCYp5>$9~VYD&r0K}$hZ!k|fx zoUo~r&V<_t_j*#lb&{QG$RqnD2Ejiz~YM1W*s3x8KpedA> z#1}bg{(B7zI6Jc5WVECQ8>)v)6V?*3@-sCKi=3@NZS3E&a-T$^*l3uMnVJSfg#Vdg{yHUd85{T<#tuFFl{l^nLait#B&>! zBM&yv^8L(AI>KCf@$A!tq9)F)&%An`7Wmr3Bv`CF$BAYC>L+do@AapDz%zQOmuW1W zH=~(s8>IOr%v)4iQUen@bL@FA%q~5=J`(q_K2JjE^#=hfdzvI(*I}p=dD3*j6Dz_~ zWjqZ^&TDu0XeW`dwnjCYW%bb`V9>_~^GsYM^$fP}*jv0KQ8mTu;W6@oY(I>+d?zg( zcfrlXo#9cWJd2|6*V(g92ZEQ@+=i(_C&jtRxFSmx=YoNp_rn-4*@t;{^PnDYWXNYV zwOGMBI`Z~%;|jJ#4ZT4wU`V7A&H4;-D9mml9!x@E$Qc>u`YxB!>!@TLlf>_rvBdOW z1Nb?vE-LwitLa_dtQqc^ZBbZf|DpaV0rsxdSBL1*)h zAnlo5IOwXLF+RB`4X-qxW)98^*A*!=+6hMcJD$$}t@?pacrOjmfP|W%G)5gT;4dl} zQRvbhz5kEYynuZ?-oQClHN7e*;!VL`^^QBOpc#DR#F@_S?HTon8I{z};d3{nwqLQn zBefw`y0VmJqnF^WVYOAmyY5a(-!7j$N2d5kC zFzcCruDWWx6h=XWw-RYYMz6IWk+q1z?ZdO9@hu-H7+Tj3{742(oyk%+*^U<|d~34ENNcbF$SdeC5*cog zj5J}vjDS*NjoEz2B*1&+K=c_Li*33OStY9O$>gGecc8$6hZB4Yo=H#J2w!F`%0|#d zet2>+HJ3PfO>m4sYIqaN0LulHV&BK`Id^G7NJ}RXN9#I@8-Y@d0B`194j!qf!S@G^ zJrm5!|LTdm6?%i1NMTc^SKjLXtEsq-$>XtevjYpci+!_JkRm`(UI9d zpZ9W}ulMUb&)0bq%R|L2u{H>sj4@0#XxoGjq=Wyol-#hUx))6-oFJK}Z zH#0+6o4fqw`@i)uWP7EHi#A$y-?3LcW$CZP947FkWflmW z{ip960uzk-X+sbZ_`f&Ax?pM3F$a~a<(VQqknAj7Pb2lzAja(QxcjB}h{mI6;>4G;=PLO-Fy_Ip` z5v`y8VFxON8b$0ag=i1gyB1{HIctvpFBu`PF1evV8j*#1-z5 zndcQ|w!Aht7h}l5BpYnt4LP&DYEgJ7I&ycTk6m=M`vtLR#pAn~73le=gVfVCNE)@I z34k9ZeI40aJhMS6u!CP;`AgL6jrgT0A~YSce~p|Y`$2{gU=+@hF7NhuX?FjXs;|!h zTL}k=K+JTr+g=r2K#f$*u$TZ+ zc&Y0E+DK1>Y3b$6R4w@r*0U z4y_`&wD4@>fAzM>XGYt*EB@=Rn%ue0rMs!nEA6N21atHAEg<~PXF1DjWq0wM?*~Bl z_l0Es`s0WhmuCXMKK1KmFGlne{~vsu8pU(4*^=80Y(`k&89pZ`zarM#BHB^Sa*eTA z2VVW^%Ye>>WI3RSsfDAyaofztR|So@R_m*0qK=4*ciEmJ@T^Vt3IY{GG)^G9flVm)&dTV>ypSOF@z?pKRw0q6B)AiyNcsWt>-*tQD3`9m4-ckwcOzG4r79e zzuNDAngG(TJ%FEGBb94^X35bWt<-2wk-7@a>-zS*sT(}OM2L(bU){ymQYfc%@ild* z2}e)6)J@d2OJRi#QW$jXyYg3e%-4j|Z2i&&$CWIxU%IK+z7b|~KNnpz+KmYY7`0lB zw#=_ch=}J$AeAnGU&@_=CpBpFjXbqrbt^Xga8uY8QBHygZ4H1=BohDK$Ka_)7e*LD z_KQ`tB)B-e8Bxb(*Tj0y996;U$i}$TpeWMFCgNzF2Ie|YiJ#VeZpgo zlI^-CJNM*Uk(GBo?CR6?@T#5(IK=bL5pFdm`Y6EGxA=Lr%R&uO&F@)as{5j1l#jR78KRY!Lp#grqu%XJeg1yJ*+Cfa3{ zzu(}WHg%R_U4?(XRBc3Esxs$HrzM>I?bgG6%a)q`f*6Rmipxi~I1}@6Tm*)2_uzyA zth`+O%yl++0gq24HS;izg0sbH07f!EmM@QF<5bz`TZ`70J`z2T9E}@-fx$D~Mq&7r`o(PZCaEaBypKfZ zeQ8(A1&|BtqUjx(?#qx)_jp;yKHr~bs=I*oL0@eo?G^rZ5F(eI3OUu4j%(=MqsL;m zpkPc%Ra{h14N@q%IRiBZosci+3%7V=q^4ZG30cfeW;itrvE2TM6SO3V57gr)3pnVl z1&0f)UzmbuiPITrEvYf#Q}0;PpSOrh$Jw#GU0o&v6-eI<#EYj(FFmdoo6`<VNmJw|~=yb=D21IkG5|RWla04pz*3nZ69=4P#i5L0{^&9V!W@KWA>rx~a{<+V`x*b3~+b(o!Nm{LP;ds~F0sY90z~_pu%>zHF351cVO-1Shb{DNENmg^$%XwK# zNO!0>18lTanr5zR@Q|}G847ZK@HeZK-B<=oU{tHn;9=(ssXb}56F8MY)@g7<+N}6O z!ZK$cuwZJIDf_=4xaYJ6Z0|&u|_Vd4h9pa0<3tJ z(ZW4cQ|O0ijq1NiVyclIu=S3nw3)|wcDT(Zv3MV9IQmZ@zn%cB6-O_C+M%j$OMG~H zTtwVF_njyox^;!va4;$jT2=PTF217TdX30nN!78nv11rZaFizg%zP{+iu=d2m5GSU z=xoSLGzB1@OSVJ}%L=kWl_DGADKx_Y&5!?Lgk zmO`Tvt4s#?vehooqkd4zZ2(16f^UjE8&sNBC^&}kxU(Cx9$#ti4SB6~w0_X`Gea;m zyJ}zbwcNX%$w>&5^ z6mhKY^C;*1j!XSV%p&BK@7lLy*Y(VqwkG=+ti4<>`&w!F1@m#1Aa#YbnR@%ci|BO1 znEt5;Ny*-rM&~L0-2hHWFbkeT+c_H^1FQOfNyQMv7{>p~Km@Yz8^aR<3gOE(^S+pm zDpT2P@$Er8odJS3x<>2>$KfAj8@nmlDG`LvH~-e&o6i!*SFZK4EYmc=g-7;B;# z>>1c$lnaXQdg@OudC++d3p5c|YTHg-1?&rpP21?%n=5aRukX=F^Rp+DL%aeq_h*!0lQi^BcrF>^?| zxD3Zj0}CBce3ON9H?&11l5J>2R#=M79|*({JVjT7#>Hd{=PFd8V3@C-r+G;qBP2su#tK z{7?5F-ZF}!*-5X59T#r~Hl&8#jEU^xZ_K*xpae}Kg&1zjzzv3y903{iX2Pgn#6zOg zgNeTRO>KC}PBi3sj-@jWQKC>oXQ3(7Zd4sn4T{yraa$Z{ax`1yZ@foKTOYChKsfhJ z6lsnt%_(4=2%H}a|1~F=$6(tkbkVMZ*YX9YE=HDPx`{zW)0e&`L$_sbn$tkD7Ukm; zG?eAitngJYsjerGYt93OM9@N25g(yxhV#4pKkb=>O^zqC2k`oJdzL}smD#4R>lm#F* z!x_#f6>#0r|9DJ%ltwcwK+;#a_R;u|w8Q_Cb%6TEx?m;#GV2fy?h<5j?3rP3vB!bh z&Ek{qFALnqeE7^mU0H%&$9cv$!P}$$ZMB~a(Zx-_p_=ZPq>|*CV`ulS6}w9j`GLRr zdzPYLEt<9??Uz>79DW2Mk)6Tmt!>P__Pu)}XD>LjpAHkb0Cg2?7`cXxGjKrDS_=#@V{mbp&!@7_mC=<{W-v2!K3i z2_LlC$$lX)we^jI82S)u5e{W;^1@xmG#{-i;MlCW>MaI&-Wc6V)~Z`xg*#(&DA0zF z>W`evTZ3}iT z{OvpOJ^h_ywMHIJh?J_wJ#P2gJNslJ1KXQ(G@6G~Tq?&K4|VEyaUZ3Cc&1J?E&Gta@fNBAtX=XwM{&w*n+dd_HOK3nojigi?*zi&A z?q()nNg+G!*+Ma2GpFUdf;aogoaU%gtk~Q?n3$~;UGUra_h;VdJ?5M(VF_#@J2=-! z%Tat>1J3rQZ%}*Fz5nCJ8Q%&*<7FCVfB|z{_#|!R5}raBD>?LO&(wi?y|&#BG0jc< z<@yOCt7tj~B#E7W?qmcW5Dq1efq~!tmXaK(sd48TtlqZDs~&eipC59WmL6w-!Dyx_ZG?v#s$KYE)UOdYLhD zVM!ij59dDtfofQVnpcHZPGV_;z!MAdf<smlGYxO&J zyT3hPvYIK;%r(U}EN*$)KfVj|p@^|fdx^=tfMf`ONB*Z{fU?FeVoe{PX}04-TCDjw z7fHmVQjvgRIJj>s+)@ZoPZCP6D;p@vb;^VxM5#$2=#tA~+aV2Pw z#eIouQ?Ax>cp2V^$fc{2>A;YoBt!QqwS#o*!CBJ_ksrLoI?)IM2oQ9rx|$MO|eBY11d>`YxwO+U@wGVhy| z`I5AA&sd5080L}l06r8ZFDB_v-^_p`TgZD3vOStJWfN;Fw~_mB^^#6G1YF)U;v5CV zCTfX88jt_PJFTh=3(6fHefbsu^%!t&fS3G2(L6O>;AD!}zMgfiyS8iCNWIMb$MV6R zA~*@;kE3WC*=wm417uW~Z<;8pO<%FCz+vai(bnu!-@YENY5|r;?b2B;iRFf$W&t2# z710;fpK7-=?HTTUuTXK@1$V;qSIgz;jN@{iaRIMZSp)&B`0sJfMEQEGI@N&)*C_B# zC0yNP&|?3F5A9KiP2qYIaN1hBTQh3Ntwyb%?hUDSrOVEz^IpQq#SjZSg~g&Ij+@y4`qmLf^nYWm#4nibQ$M}9GgSG zZrd_DqnzB%2Jkp!ZXihBkBKt|)6u9BsXlO}{a^6d4D0aBW6-|s1-uyy z2?Br6ovKr{BU13@rH9@SNppoyEaN_vb5f-XsnvCUueGH!t~jf&=vr4^n2vfQ8N$8x ziXDyUxHH|&E=m>kZ5mA_jHWG~r?kgBX7EyuENMH$)mD#z4Gsl^&-wJ48YfXOsmVrtozXdP;fFCwdtR!NtteyDCN^ zSUyB|lSNib`Ccs^jqv3|Jl9W0T0akR=?Xf42*6L4qXswsePA-F|3ch{Y4n_|5M=D( zxeD<0K?IcYVOk>3TyWIkN8k={MOvK+u#$AjM&N+2tvtI-L#DfrFm>qJIJIE2DU$li zZs6$diL;;9M0v1sF_9||B%{g;5bKM2W7F$rEaheO^jx2S7hOWK_+&Vk4`NYsi}?JY zH(GRlS~S4FS=NWgW%!op1wv7vZ#v?nbOBz}$H+KaT>2iuO92Qrz3uhTJ-eRp0N%TUY&o38e*iLiVh32LVEm}E0kWy=d#3qQxLTnH%vciEd6-=!b8n&} zCXoq&!;tNymn2fS>|ID&2u4{6Ha53#rXJX~_mS&!O<%dSna#~yVQ&T#IiYR+_KB_# zK}h%hrX^hPsUu(eZ~5SZ5rSq1qsQ*L29F2Om`s{C|4RxBw)r{jB%}aw>y^Td{YQO zlbA)!OPCR8MB2eFN0rloa@6k(rF0yq$)z#w%&ca|CLlr;3E5fy%=jm&KqYT?3Dn2} z?>NQgDks<#cSB9p#JJ2P?iW>q3UqD@rR`I7Xz4z z1M)$FZn z-wAlfG^(1$aoY~O@FfZzA8jYdy~^^Rv=^D{s3CS8+oTZ~D78{&G_NHxb@F8~?X6v+ zZltAtipb@_Vd7wl%KqDl17p8M)s^DwkOj4I*XIjdMo1Zi`&e_Tjty14A>mmv5kBuZ#=nN#T`g0@d?G+mO>@ z=K#<1^OdcoQcuR$m0M(WqrI+|C@b}QyvhyJwZT#SslK44-TMHZr@{f@@Tw`z#3#}Ke+IG*O#r5C7DlMFSVjgekLf(C` zhq9evp8LD1%2p&rvCwm{?DXfC#i4_&)sCIMD+=Q#SvLn6d3QTd&M6qnuLa_}nV)m7 zyT;pGez`l>aFh8FXMexJM@3oM*AwlpjyF11_Y=F4KHSyx%(SiZI({3w^cbtzz9)W5 z{{#_;`?d1XGTS_IDn*A2T3~RvcFa$K$-V z$|U37khBjbL0XdfER+21bNZX@+}buk%L?FKxz!H1<-Ttjq1~(?H2Fx2<{i*#{*1H; zZ5*^Z@lwB_*QYf?e6C{nEmN>ow3u0XvHL^l`nwrzK! zR@eH~L0MsP{nM>dJEQLJR5RmNC{vg}@VZbAviF{UNBRUD&le9gQ_XYee600EA;c%fM)KqO^sgOUC)fV@11@% ze0nMB$>sGuOL@=rKQ%Y6j*Q|~L)VK4nTXtT*KNCpcq@DcOyA)c(5w4M{GRh~2#2_3 zfF=}4n`UIuNg(Gks06UWK+{Q3q;lF0ae5mmbfX_UxWHyI)`o)t4hW9D`5_U z{uG#;1QlCSe*csrsVW>#8D*l;ycN7 zqo%P0l;*Hr&4LGQ|3LhnF4&~HcQDb>Saaacpq`?U;n4N3FgJXn2XA#JvhESjQb8x0 zovghMGwinfG;%gmsupkB_j2+&I#{1uYIlyx+#Zhc`Jb{Uh5wtsMa0E5)wgOmYo&9) ze5M*`Gng|4+!t3Ltpq}n#y3k3WF5DEmxJjM-*V3BuF|6Bc2fRsnXZ}r|jT9QGZ9GNSS;`WXRx%HKej9R?9YS1D*5y9+)^ezJSHz0|^nwYcOe|e-t0cB05 z7xQts>==@A7|+*9(;+JrPQN;((e=ob#-S?6dhhma0e;9D>v@E<7*xcZcPoR3YEpfh z7GN|jQJ+9*Iuf+e$%arKps(>7HGpx4tTo`_+R`L#Og(2)jiX`eva%Fy9o9@&tUg*n(wH2=5WVRHl5iKiUu3&V)(BmBdw%8{k1 z4pvlyn+`^TYf$+(vmZIJ2u&?Q8LB@qtfh-ZJ48Z*Nudg%a8D9^n~JgX6bn?lkQXDS8h zybxXJlPOQ9VW_euw4(kJL7`EP7FWN#;%y#T-T-5(qM$2C8cm+aNF-dtPejk#A?WG| zlJL0M-~DOk@KHqR(%mb1{ua811G%YEdPB3N1f$(Z_`v-2$&+p}ocgo=a|U<@*#s4^quN91GC02(x&VLY!Jp zy;xl%y&-P+`SlO`8#h;e4MjG0ysNy4vXhI&U*Pwcz6YHPlh9yue>6_WaEi5tsfDEi ztHNO;w5pmf{9b{*4l1y(WwHuKN!eGLenl_MtA0f$>N5>3r96w%ki_f&2qcgV_O;ph z&LVU_*lboPsx?%Azg2;vtOs#Fo`O?n{9n=3_JG7EX%nROr!OiRj~cS>fbFDr2mln@ zw->oEOQdh;@SQjzUz>S*TQ!Rxee5M_RJ3p59sW*k!i|dB zRRMy!ZD&p=4Kr%2_LN_fc{IJa->SUIRFzbz=hmbzgSBcc-Q>LG{sr?81W%b2t%z#X zTBUvA@m+;v0_cNa%?F#17~1QFp_p!0tvY{^CGountR+925-u$tY*M`~c#)o>QG<^R z$hqZl_P53x>x_9shPemqbIsm5Uq;!bnSYutym?>esH;@;SCM%_6{fT2M3#+R$UP4+ zr8UFEL%BC9hwkZqn!S^GgM_Z95?3>}e#Tm>UO5!IDIi3X@DMAova9p>6(3AZuXDf> zY8_3LwEwk9Mp2}QSdVBim&G-)THM(L@4d3ZUD*%y^LS#bQeSCV2$9F$GJS#RzcW#+0^pT}%ss6VQ&r$=6q+DC|ws6G94!MQxc z?CwkB3^{(Dp2PFPsmEHYB7>CP^l&@l3vn$2sHkYW`bC zPL1meJ#Uj$lH&uw{9Nfn>Dd+CQThi<%J08tIbV%(>*5%Q3qCvl?5RF#{eGL!gxZ@G zpB@()S4nF!Q+(I*zT4rIw!`L5uj|}s`~fj-?KE%9MGIq7Rpe9Dr?w;aGL&hgT>sOH+lS+sNIxyDf%1nITWh_9g3?i47%`KCJYzkC<+}m?(Z* z{zkRO*^t@>!JTF~qX#|T9Jvm#7Ffd`#^33qunCZ;W0Tg@xYaK>xy z=qdl$YnG3H4{u2s_L;Dqd9pV^XT#w`%gF<2#E9nsO3@FKN+JxdJgg{e3;GEcwJi^) z|BaGbM5ev*e8eH~AUIgFnSXUZ$HFScKVXs?|BPGTE-kKODJ6R=EY3M3JC?b0CL3RC zMEK84ajZD_3oUw-w1RAa_l^yiouk+n?2d~3vq)ONNagB(*_OGD4AG3&{;`!#&rYLO z)N!YzAYOY~RkSVrEAATMY);MOygDRl{Csq>2idiF+FFH9dja#EY(HkI;ATpdO2 zW0@l%P&6s4GM6q0n1L3s^p|Uj{`mos&m~>(E8*v-{=3%`@%?#t&UXN`!#bF=N|e-i zKS(=RuvyG|gP|!LvC~CCB3>)TK(03^GtyaaH;YC{6=ZCP9lVO16@(isza2QkSdU6- zl3H8_`|R2=-V17~I3WW2JznAxFpZ0@Y{0Rp4aiIV?zF|*xJlJ-UYhC8Y=k+;eJ=>r zYn}R+Do}#{)-~&&+U4JkQOvQ7KxGG}7|9?Y4-OitWDwl;vZOwF`s%zq*445talg)X zadq6km9gvrcA)Yoj<3_f!gDLL{;s@B_cy71m5+b%pp3oH6)G}-zzk}D{ge3de|Nn8 z!+2f;^lML(w!|0gB+kTOR9J1OwS|y%&~)oUsVgN;bjhfdyt1RoyHYS%2Vd+qC+k=E zJ(j;rlO^CgmKFjKv_`1ZPLt#vK?9Lcep>)MXBMq@qwM#GjkrmU2qTMX(9>mXN7J^d z)XkrQu6-*WZ&&Jw&iXw5jx&{Me2m>0=M^{nfk3~qJ$H0eOo*la^`*xNXX*KnUArR? zL2ORt|KOjr!+!#Z$g>(HQXu8e=yT{PQD%|5uv|5`VNd z{J#v!4-xB++!ZXb zSZhbWeS)he0HBu2z-nUXo-5gNJP&vbt~|@Eu`9LPo3f*2*CFAUrb4U=wQ1l2YfJmq z!YHegL(*>JFC`xxkJ{o8_;|UeL-P4^Y)NuXjmixh$DNZ`#hVoWfTL;> zr7h~XHQ04Utxq#bifY)zU&3_risJ)&XFOx@`7MabH#5N*3Wzg#i#f*5zDAZ<^JbLO_Y`fAK< zmKx^x`Lt87-ncY}@LFS(ztUo@%HLes`qsI}tr_K_`OQ<*;>{n3O{Ytr?KdP{6CdrTZ*ltfzefcT|!5iJ?`q1B*;bnwhMipZez-Pd@1Qh%vp{T3tRK%W-ex3y}qx} zPOVWzaS|m#>!}WslN5Mkq-klB z`m(j_6Nl~(CFi`Mv#$DR?`qn_Q<^1xs}O6#PwU-4`KDd{Z};y!BK0vsJZDeY+sYR$u=*QNoo8KOynZ3B{EutuH}csZ*}QRLz@*S z)qT?Ryn$c)6zgzGRafy`cbHRG`F2_YrFWt-F*8~@Le{I_8}raaBT~9-qN!XFtNV(2 zEOysj<(AcQC;bS4vLADJ{cJgOA$ce{27ND;nC{9Sg^(>$sJ7SnSMM zXMHP9q4=oa@nbz^gV`#EhSH&a6K2Nh%gnNFq=TMA^P4fV9uBkBJJryaT$K;qs?U~` zK-3Dykwnt7PPS>l@rMlKb|Pt6A}QYxUmNzlZ2G+V8%a!+=Q1}2-vMr7)WV@7AG&@0 z19+W0<%lOu1i2Y?OhqDZ@*3WD$KEL2qp}u!I=0;)IvQt?$68D1K2%2L|?It zFkE2i0QS7H85W=Tp-J%x{SO>O5&U8d+;1i3dz*;)?^;}J{`VjMILz%}%j^9CG(jw! zy44v?3_oCDNRXGZ!rQ=@pr33f4hiDn_5fBjTj16HKuAGi)(c4dsZxa$Jp}pB!dmwK ze~U6i{V7Zpw`>fq$O3My2|id1j62Z=5~2U4yY|l<`?LS3G=&rww;JN??s4M6k_zj( zn|1+PdR$gIj~x|)Q9bwS@n7Es+yhNkgAub9&mt1@ItI7mfK#dzCl1|k1M-2%bPPrr zy1+&Le8K;dzZKLzD!3AB4b~AF(9xji)+feLpuHFn6gW=9eH`evOs=`BC4OqzX50a< zNqij-Y3R2>J&gjb3=#Aa7SeE#Ko`A$-wo+E)uUKnIDa-q4?}{GCXhho3ZN2Igh1S*!d!`_?3h3y;|7a(3o(azT#)!yrfOBV$lt^G)W_l5Gz^d zNKs*~>FA3A+L9^tFc_tYCpWNGH!&iAJeLmv;z|kZ+EdxRCkQtQw+sd9xdr7E^vgff z&HlY9<|(XCI*E)Io6!D!=b4E2<0c}}R>A|%CtlDi zs6k9-!6BZJjSQ{83gWyP24~EU`G>23_|A2=8Q3znaZyl*kc^>xn}G_!=Ek69Bo$iANA)bP1-Ml`J>mPS~drEZ)=KRbbyX}**cWNqGzjuUC4 z^$zCf2HH$yjA5EI7{anFOX%|iEwmF6be*54m`I@`rSQ$KoR*_0gpZ1X=MhX44d#3` zmP&}k`ecm8(k{N=?m2yz+gEn8&^)1CN#L5Flc$!Ls?js!wsXV;0$^g>QY54^iV;)r z=5!Ut)k)MuD4x!nE>HlRh=1SIft9^u_=2V2OiFovpmJET#R#9wL@KJP-01)G_668V zJWE6PI#AA9x{4qDF&3j!V1{acodDR&A!;;s8e=f(nUkbIapyVZGB}wd2?4}f(w}ZXFp8dQJ$`yVUmq0K95{5VXkK#XkAH{sk z8d!__O3hbJxgG1{TPTz6wrJHgz~1yYSvM~YYSE03dTwwWJcU};R#}bjsD~z`bn#!W z`oCE=ZlmZ#7cSlY?gv1%TQ6_S!RB<;`{A0@TBA8L|Q3%_DdQPpVI zYHh4;`u%Rd*}3;kzJBAvrqMwIOB2GVZZxOlEAA)dq;KN)x7kMYgg<5Q(7=xK?aW_W zopJVNoj;*?kJL6jnJUfotDZzvm6G~VS07S_uL@6I!#_@H6aO;?mJ zGVtChS)+){ZT<*?q}7#RNufrWl-#HV$+=pOlA)97e(JOF9rsDi`Ogl1U2WZ_wrCpE z7Txmpw%@@0TMs>i9QS7jiavq>(zdig-UN3kxq6h>aPeSho?x$LiP(`ZG@Zqimn#~# z)LOlr9CsrTON30kTfWMBs@%?vh+Or#o6?RDJWCa%$`_GVn?8(hKhw~cZUTn#?Lw5$DT!WMZPGucz&eH~5+ozGmt13~{h(1gcACb3zP85~Jov(13EExicPH zS&7#%wjrrXknqE9V%U7!Pw}FK7c4@+3nD?_u$94bOH6g-KH*$9e|n+3Fw<@6fHMY9 zn)`u&5R42k9PMyw=})5%u}c!F_1r*DQQ$aue3cOV1bC~?yty?CKm6fa);fIUQC$or zklY(f){$D(GTe=hZZO4&qp~mmYKOJAbI$wxY^ML+6GLTPkYE&X^!N(~iJj#tm_hAe z3hYHa08)uD2p-ZHMV{B!hC4V%nnRK?rmQL=<&ftFR$H170jrTWy4WsCu}i4AGbl%n z&RJ2XWnge%w>II2$c6Y79wd|748hFHk{9MTgdkzw+|pt{-0Thiq{UOTJi)N}6p1d@ z*QuJ`GjBe6>6J~)^Ap#j*uCp79wROcpza>x!Tfqnzg|u9B{(&gK|PckN#pV>1FYI< zt{e51#@^9pG@d%gt?_k$0o30%A5dpZ>B!7;wX6Sv13Bmke?l~A{11etGz-nL!D%B? zb^k#4;ulkJ&kn6}PPS^rN2C}a;q8TJh6raZah1NhTk6`@f=%Wn_S_t2??k4W%r6oE znua&iCRkxf$Pf51kU<6wB+G7aB{R`XzPw?Hf7$rI-f@$k*ENeZEwiH374c3WjKR^m+C|@D3=mfeOc?;Et zo;Ci4$@K*_s$Zo}|6&#XWEcM9|Ln4eQ(E7yr1NzAqcHL7gR3Mv79e5wtvt|D{e=|n z%S9AFg7Q~ZEJ*M{w)%fyR8*ll#Hi^ueNoHv`7DZu&>2!g;M^80;@Z~i9l5Y4i+*U; zCyR9&?lYF@+)=tx$>%mK!7fOA`%6LM>hC8MlNz|O=*Ash zrGxnh^8@NPE4tLoOOpN45%YwRBo@CQeCFsNl0L<>z#B^5@h6OA!jsXo^8~RGLJHFY zfhF&oGLJUXBDU2~td-VI%(42sEhh&$|`tQTmX4%Q8_jiCDJJj~FS6W&et+4%=^7Ar=8$Ei8Y z&@-&K5%n=-!AT^g6zkl?s9`@L#_p{X2!}a&xcUP?SuZ17O1UDxD`6zFl`!O4I6P$K zKTK2*o)(ARZg=Lhv$n-v=7?T8{}@hCVR`5!MS$j{_sy%D?D|>C$-e>+;xgA4e8>0f zoP(k{&N(Mtp&Jck;}E2n53$FYx5>-OpP#oH84xgq7hD$ySqUn$gjk~6THh{n+j|sM zR7#MvVK}{RFMpTxHdhDh^rius%$PIsRS)62R}dcdGut? z!>9C8jU8HRj3+Q}oR600#lPP@iF3aCXkvh{bEJd5`tBFky%dh%8>`QA79vE}@EOus8gA%C0u0+zYW=8*Y3zdaA-ARUr{?2~FCUA-uwgAyf))XR5BlpS@C5z_y3w$K}^?3+9<)X4+0bbsP4CqegjFUAgIoj=bUZ1COX;$U_) zf;>GPTC~lCwxg4IK4gpHE=AjmTe;d%hpNd6g2=wbNZ$!v40I z4RyVy5Bj%nEA?5tTzu}+>Ck!G3H%^F(z@DFBB46WCyqpR5P9C)Dn8j^9TDejzP3@e z$rsZl`<`kyMm4GNR3hmq_35hRrr4W=j76*NbnC~2OB`e=QfCkDJ9p(Kr~blH(eQji?#^5r#Y7&neS<$993o zAZQmegLf(OG;7a}`u))QFBg0NK$y*WX*nD`ySIN)`-NtgziYPIRR8i!%#=kLKj0>p6*{ZM$#5=r*>_gYyUe}@y{9w$l2wEBend`#hSK_K`Y=u-A5L$ejrG0>AO( zBk-v2+EqVmZjKAm6!@<*pE4ZrxBTH?e^nhz{P$4A_jm}K?CE23Jw)->$<9AwB<$zD zKuz<^_&f)F6DUKTh;S474At5{B+1)YZAegl)B?A4>kq^hOiS{I{d=_sh>0M~cvTGw zAh~>5pDwWW2kX9M(=ZTuG^_9gq92!qZOUR|x1Y+A#-hKikHM$>$@k zXfRZbtB~~d1x+hMz;z%gFfhFMQKMGWOe{-%qV#Yr0{eF50Iq>3utz0CdUx9nUGW@y zNBb{owTcSwlcS8)8LT(eMeW3Fbk{W`rKy)a06zHLn8p)nLFJEvH(X7ownuO%UIZDu z!~LE&SX*iCkIe(gWsZZ!B{7$EpZ~@uotEz>yI4poNyb*bRgl5}g6_rM%}gEi&kKjKIW zThZH=GoWngeheygw_=w++DmqH3?}l6xY*;u?ZQ!=3l!-F_{z87 z6+5zsbdG$Ewst&7(VYM(x)ux*0Zv(+8qG4xiC_mmHCUl;QO`)A>k)?*fyk2)dkab# ztPTPKl&c2QS@(UY<-**IXr>s<_W+ToO7V?SmowOoyO5Zh}u)>Ly zO;d`!UPYTdR+#>aBJ^X+Bq=ul(E=EW{F&f-$+!z}3Ie4_TUA+-&BI;8f!jL zQ8^fQtdSrUD~Dl-MO)0m4yGFv*%RtPc(zq-NEkgZ}Wd#0J^8IVKv z47kgT5%vIeu!|zE7tsL38-C1zyfXsLCg#j1=-E{Q-Do_icvg4{nze#~!pC2}-0?uH zkp&`Pl$p;`!8b3k*P9zlM6YPdiF(og!QoP$+?TH&%X$p~11BN3lqYeeJJ7?J=L0*} zx*1d1)LBi=iNuN0obB7OvZo%87qz>pH-30xUO%EGpEyaHjHQG|VFms*kYAW72qb5z zsBzHuOl%Ugydx3GcUkLUNy2C&zmN88bXQ)#XD~~I0nxIXSOs#4o{-~J>|#s6t&l%zx}ZZqvvRp_K)N3l9G_ zN~3tj2Groj$ntGK8oi)pVv5^0@K)aT;MKv+9OP%jz9X~}Xfhcn%3WFqYK|>S@c{XB zX6gKMfSI9rDnIaMT&_pEbp6|j6DqadC8jU_iWHiEbPJoc-W^}A4oF<#Tr~IUQjB6b(>g{OCwja*NbiPT3_nen z?>Mk3L5H{hBKm;(&QGLltIscA`@}G-F~PM8rm_q7mQP~^S5EmqInJ=(nfwyDan&S>pmhY*dRwb`6ZCGW~LzM^8P&bzzS+wytxA>awX{bLJd8e`? zr~TE#;jCaz@3~8o9iLB+D$bw4ST|V5X}RDcvC>pwM@nh_jPg?fHfjK(uLUorRNI$+y*bD-2BGv-CX(lGIC!){6) zIqvtS_`1W3R?pLS>n0``>5W#sK&-Nt%Cl^!?pVs)ZU4TyOV#b=Bh|RA=bH4a+#PF0 zwqEC`HI)#NOinDklHTXqx2M@G^DFO@^OHUOk=~8JD_Pg%uQQ!H&_yc8H5BI^cdOdq zC3nZ>Ys|&qjuGdIeDg`%?$$c*;(eHmfU^~;oyccy%aCO4qHSB#qCX)+@Yj6Vrw5CV zcwuMh#>YdK%%ji!n||0H9Ze`>bv5@Fxq*ta`z{N~oY^r=d*4eGtu@JKMH2T4EI_r6>^-`UQ?>T8;XQDXLL;fojuQvSr(kpc$v zh4A%?E39&Rb`+X=kG0dSu4Zmf&V2MoZUIR4RyT z7c|;*)xm_zEOat(ysD0P zoItDd+r%o3yGGXc<$QHJ=BC&L#%U6(gv2(!toU*+CzkNYHGnu$8)wtk1+v7EWI-lx zmosyg9n|?{P#1c)L;KcS|D(6a{{1bnEOA*fpBR)FZaj#jaKgm(SP9MXBL*2fl<+F# z8ar~SN`~EN-5}Goh|6=VBkhz!u=wAcO$Px9plKaXpPqkq+=y?& z3OY?6011?6N;oUjU5&Mj8vI}Ey?H#8ZT~ku5<<36_Ngd)_GBIElBMjSkZHG760*dc zmQdDEDanw8#H4HqV;_>Nk$oBaI+Jaf<$8{(>*}ie{yyK={e7P2_uuc;t8$(*#_2qd z+kwPWHoVqg&`tNz|e-&4))L@Yhr^k{F^jLBb3($K1ut)pB73`O^If!*QIEVM9 zT!qTGR;5_a@j;@)8yMQ@VckyWgocAsjJ?>g{HpH}M?4AKDz*Sx4|)&S;Dzgnc~h8*T(xI7BH_5*?hslt_*v5V|pLEql?<=;5+Bu&Gw_C zSIH-4gs;9a!rg=)Yk&njZqNqE8tBHL>JZ%+(4#p1w|q7V1TLNzoq>r9ys%6R$`QHU(VYa?fII;P zIWZEX66)rw@2KvJgMij16(XEO#6w&(F`!w z<$fz*@rwgW(*;4DJVgB`mPj?nKVfb3%mbqcg0N!JAADRSv(-=FjzG1!^N3=P+S@}y zw0^j->swQ`iiF0DxHD1rS@Cd)L;bq1C9q%OY4w5#O(T0ijz?cGApp@g@vrsNU~mb?Nx;}2Jq)7dKrSNY zIpQ3$4>xWYigg`fck3Uyv%E^x+^u`L{?#_jrz%=E133(g+-*~;Zbeyvl*YYn%PvHT z+5?{C!>zZ|j4$0+>}YJ$gy=>%@AW`|K!x-n_x(c-t>hytfHS47ES;;#in;xE=T4s1 zfMtrZdBgG+4+vGkGfcz2HE(G50!itimq8`*fQn@=xteUZDho2L&8w)7P8^_ZglxAP zKZa^X?74@pE(fiaU<|2u3Avy(_RP8#{tg+gnt$cv3DKiF3MF>MKQRL-<;LravnECS z(;6=%CxJGuY}raR-{jsjnS#t*{THWAeSP;v^z$o7#CDwWIapDMbvM6rzo}{XNEf5@ z-pMwWZ|Pjf=M@l5XnJf9$U~f|>kxqHxTEhNSG9qO?*ls)wu$>UO6U1$3cm^-7hEMC z$WYQ1s4?!KW54ReqF7yDqFk|xY0?B4e`+MlOBaT1Nu9C9BYwi8&T&-|t{7+x!!hi( zZZsg|?z5e-O*AOhTYTM+^dSjAWBz(T>0Iz_uTP7N3^>yQ6ju1>zvuCL%qdO%^xMHj z%)JUSK5X=jp=07CxHVe=>j4%<1mF-MQ=c)Tm`QTCXrJkvbr)mUHb0E^E*>EC)}Yxd z)SkQR%D?Gaa}ZXInCh|J_uMlztl<_!=!Jt4!C$+VVLs-t0#LA|OPj_xftob{UVzIh zB^ks8dcEYKfw+r}h!{{`Pa-*wWBUdgQm!P8li3G07uRvyulg#jg8~ohj=eiOty4XF z2`}3jkl8>&2eBy_2wa_G z4jNTYbPM15{C?iaQen}9kZ{wkaqA-Q0NA2zodgUH8%a~|m0$2i@b|f?Clc--txwvr zN~{c72)NnHK|BZ5zpDQUNUdGdo+^W)C20q25p>`O_si9zAw5Txvh|=@(6!iy+(GL& zT!p+laW!a`BWTR!LlGI;n!%vSE_?Ql=oU@fm^~@yuB&rC(jN(KQ822?G@d@7|SsRlMyds+00mT;vpXQ4cXb z_7TyC5$z9PTSovpYQmRIO(9Nhzwf@49g(cdanMiTRcfMW2shHY=Y{v>!1Pwt^8;yL zWZo;E^TxdLbe%7pHu1GPG%NI=--cqRq-6E1rXeVcfLssn!W89QQ?Po%V6#J+;b7H% zCFR_jW*sJ{u7ZNAeV3waLy}UXNEIv6wNumtx%YI z8WD$U<*n_>->GhL#WGlcjdZ+gJg)NEZeHtWAp!$sw*0hWD7UH2$49&nQSUzLa^udl zrAg8I=W)#!Q)~?}ffgjPFJAJ|KudVW)!Z_6NPHo_AkMbimx^wU*0uvbnfI%fAM*wH%W4g^Wk$>V%Rnnewr$Cl`A!A z@0qohpq8WxFpowK+P_WJvXKb|Z+H^x<`;Q<*IV5WhiYprDiS8q+*LbgQpuG^IgSFXLH}7J2BxZZo_?6 zsYQydHy+G%PDg2D#x6G%5zj^^g}$|$!G;x}1~LX(&=s;Pl1D3OpT4B0sTA6mIWVd*w(`_ouPmZISC&c~)++B92T|@|xZs>=)Nul<UC96Sz*`QDWP@3cISdli~J<@iI3ceMcw~>rAzC_;BKxlLmKE z*|YlIth1yP+e$y?@O7oKcvWa?*0%FROp{Mf@$x$zgvr(`<%w)^K37_9BwAgyM8|w= zBz!Rt{=&dnxR72e#~ zPkmuN%Yd1Xb9{4Z+3Ra{PxQtvE%mdnx$+OM(6kRB=AS|`H$|&+7l)sCfAA)Uq|z3# z5B*}#q}4@Ry>I%CX0ZXF-%?QTD|K3{52V9eex4AWk*fQ^gmEg8YhqvVbVA~h@o+8* zFQ)BczTc}B^U9d9(T8_0Q)RMB7o7=rrP|`WtDzCZn4Va>24C?6oB+Jzj$5OC-@~4G zl9j@uC*dw9u$j)(6?$Z#Y8MZxSiglvmY`bYMTiE~Dis=;{z)|kS4g+(?R}$G@2Z&| zT+$j~ZJe2op}f|~Gx=Gy=iQd%+?wLU2OFxG3TD;r%8iuRvVaLJi5H_lgS`e>1NGA* zfi6&iN0w@UNtB~djU{cn-^#cGZXn384kcXi<+Z_MYi%~Ruy8zGN6$@63WG$vRROq}(K;fX2J?M&H3zT>E<<<9My5lNw?Te8p2kh1ZP%hnfE;$OVpl{_*Xg4ET#{57r4RFmn-C1CQ*#3tL$*;We%D zet&=>wOEHsfGq-xM^8QzoFX0u&F_z5=pSXNqhpDV}T& zX5A%&i>b1v#5lAs1w-l~DUQYwd%n@FUawJ)P+bku!|ayZV`zGOQdrXS zXh*8c(M`5{!__A{J_AyiJG}HdtRbnBs}DZDY);ZIRsw8)aJ-(_(e$u{ z)_vM3>rB){-9tH&H12EWG!h)|AI5bw22N&&K`3%@Abrq8?me84xw;di0UK*3l*7Dl2{41PnYp0oB&A}xN=CsxUh z$Sr}_wQMo#YMEofh%s?cv?8AZ=Kz6We;I8N%EXH88O!0{t=a$wDj zqBHy6Xk;~IHKlL=?5B$MzSPlpBwf)*yfgR+BAx>smZpHCGG@_iD4p~ELX{iZ{@*~- zfz3k4!@n77pDitU2IdB);z9TCe!~XU7eup1RT<0=#-vE4Gip4KQgi|2xL!34>dbTs z$MtO5jv?H_THeL%^?TrYUtq8*A9SBrF@S!L`XyAGed*8hdaOx$>|L0H>D??_95u_6 zrQdSfBPP6ae7H!2vhPD?02!=0Qgj}N;}W_^1GCR2D`|TCGb2T=f5M{icm#PzGgbDp zN)*g<7H48zJ6)+LVwnQ-;?}F4dJ0D0Ei^my8ek_yLK1qaKt>trTmca}bVcw}D}bf7 zm^c^8jPI-hK}4v_6rx|YE)F$%pH6ggQY?E}0CrOg09~A)0eX|FJ>VDSAIcd15NY5^YxmZVj5;pW@f$tJI!a`VV?ybDUU zq-?0zkn{x-o-xnx3mtFm^>`FuyXWz)vD3SDtYT)SrSLbj-^|4~XGlh1ZlNCgu{)SL zA1zMcG=IdJ--@K^uK+0^_i2T`?D2i&Mef4)n8(8PQ??|n?jJ@UHrL{SwDE+*+6#jc z<>Un0JYRKltr^oryxyES46D9@Rq0F*dc9E5Xd_oKU zq_UM${Fy8}0d|GehP5RrPDZij6ytD7pwgv7YVJPQPnCnvY{S#$nar-INAh=XS}vlu zqZmx_`$UHp&SzLRosW~95svRWQ5R``u0&}wD`3Jdld`Yw(;7?z5_|G0q~#+v0Rswm$hJ(w>Ou=N+4-NNn-fci^h7o)fvWin)}wDBtUOj-}FhXs$<{ zNBKjh!ck6+z@=@)j%qgcDPQLsy0Yy#m7S-|3NzX=hg-H2%KEXjL*x}D}5CEa%iY%Sxyo2 z`P4wdx=L)KrQ))-c=p-EOJSaB(f))y!?NwsmzgS!3Z?}FUR>gcck35ODe5h6knv2t z+A+}?$kH+WflK}sztxUer+d$5ZMW3k4h-Pls=+Q4uTR~t-|@WaVE(1|Pwprd&1%)% zloW}rxbnnL>HH1EEQkT$idQ%22Ryy{BO+|vuz!|(?F?u~du%e%aW}wheefJEgYMvH z2Yg4?zR}0hMtxtfj80Sd8ZILHRq9`GJ^dsCs83U>ba5>yKtGi23JXQm?k+X315~!u z0s{tbNWsW2gaqIEWu5Q59|mmHaZ>@+bAaf;n#>9{7myTamDt)!S_R`eC>|!A1-u8h zq43*OKrN8aY#K%!?qHUlC_>ERZ#@d^O4 z2sAkuqT=&m2mLl)G-rwI?6hD2*9iEvfBW}rd`&Gh=H_Qjs|AIsnWJTM3n)&g`4j;R ztDOgSIUF#or3o?8cz&`_yPmW4jlfPgc4Fr{6{s-Vhp>H&qO#*~lB^SA>O}|v zj1fRD>7Y~?(+usYpwbbl0yT3InIVuF17u$SjLNp}9*eeh;ieVCe7n`2f!?L|ZAN|f z3UU0DURqZl2+yeaRQTrI5(o@z+^s~SO96! z1WPRi_YF|W05A&ra>x9}RfmBXo9alvN<8x`rB(+Qyda zv0Q#})AhDq`Y(rAht(M6V7j!mt+Grumh^NcKy>}+ehh&8E`ztXsK7%tx8e690U9Ob z07^|!0xy7>5nf-QCa-z)qpO8HB8zU5iZ=7FYvUz`c6VH$hNUk=hc_+axj|hiXDy7)x^_e+{J<-N$rTy7%@!%A?iV71A4~ky<_DlzW3tA z0n>{rWbHZVon1Zen5pf$?0kkmQfu_fBcP`HO}z=Hk!mce_c2US%8>%+Ag$+ktBEe~F z)KN4$Nz1VF3Snp=@oPVhQSq}tPGf-W2t_v%V1(pGP|kpGc-4cDB?<E6JlnTKXS~%CSezT#{tGucI@=xL6KmsDu`5ZZexH9oe4Llh#^vrRo|r zA8}2Gaor;MxT*6Aw58_b1KO#SvNj}nDp)r`g6XsC{ld%5j<@ZmHNX@2OybR^i0>J} zkSwr-GhiexBx&sF@+WYkVW>D1ABumonLgtUkXh{NBTqn}NuTisHT8@uLAY_ykp`#<_w$6}=NF5(Fklb;n3^EEMTw0pPMeX@Wqc z^-x8IPxNR-ysjBEdK_!33ud~Uw0ZFe#`ypVG2zN6cd}i`*?YB#nyZ+Pq9z`?TjQ1+ zml9IBa$8|Gbg-Uikn~shFy=fp8GB{OdOeM}khZ7O%xup~oxE^T=|2>D?jNiZ0FVgG@t*!`j#p3gVKT+f0qO;W&xzldQkZutD|J3< zp=rxw=Rr+VCZR^l@S>@%o#zqhJQX~m-(hUx(Z-6~o8z~dsF9U{ql5{iwojZ&$QM8W zOk14mUXjK*rO=PBkE4JNaC{)#Ys~c4=}T7Ji2=8J7b%h{$BPlp>aP$gp9yZfQ&AEm zAG~lp+q~k**SBOZ#qb#{lVbRSs=G`w533ojT;Xyf0ySl|b* z!|s;b?Hj6b#RsmmcfCLjdnuOtk*quScCy`W%LeXh%k?9%Lb_F_s@}X$AV}P0lhp2Y z80Fxcw(FIDw0BT5O}BXc4)k_+l>cD@?O~6f?7CM+g)Hh_^y$wX1N?oDJ=4UBS*^n= zo3fMDWS=e9&8xp7EZ*@*{6mHl^{^WuDkJQ-Yk1I5pr7v?+k>D%-d2>LeO{hrv-nKL z9)WLZreyuuU2`<h!W-p)7CR5F-NmE;$j;5HU z33aB~?tODI*bTu%;;gIbsg*Ud)%6Oow_lD}pd90gTH3cCFQB%mdpSNrYxlfb#_H*- zvf+nLwIu{XM6zvF+G#nw4`ar0%%KmY=<#F1M!m^AD_uT8?}6b_s111%mlA$=o@U@V z!d6a&b*bg4k=cwOYNPr`uOJSE7zQS^Em(TliCY? z8;0Qs7gGhx!ec(#7R@uJzA0SwiwekR`-|Vl*-FBtF`su79Jm{cX*1dKT6Z7}eW`2q z*Z7zhsw}&5l^p3Zx^IzJ7cKW`&Mwig*C?=_`GqCU1jD^gPlWAFrR>VV#INMyg6HEg zRvlKS>9eXfw?TW62!2<9T#7yYY6D=n@xaSE0xe9~I1P+D9Qu3QVd$3Hzp>-6j4o67 zJ|&S2{Nm#`-RQ#b@vN<=YB1VIFoXCgw@8I~eo5v%i`T9d4Dv^vymZqXQ|Sj<_BBEk zUb4a3#+OtUK$8=p*?nEG%|sDViOf7k+fjpm4JYL;K<3{;s&sp~K)06+n=K*Kx$keH zBUu5swDsa9*U$HSx2a&Pl^%;?{Qizbd)IwP7kvRTiZf(PYqoQ0+B~|&-#SBW_7v>Y z8M#3M(+S6oIEn*HsJ3{XdoNGXn^}VDQuLSL)pU-9&>=VF=~mEQx2McMrLilG8XfS~ z6{z}H=tRY`#Q;AO$i^imSj9L;M=BUXYzQ_A7(fMhA3lKC4BAA4Sv>eLfU(`}fmH$J z&@FY6CCP4D@)u_d>>_yk9sm#e%lpOB{sZp^=+VPbH|fYNc94I!ZcNd8cR^eLu6m}5 z0=myEGb3YWn9sL?CO~gU!BR%B-xZResD(({wAe%Ne8U1y?1>4K&6>ZlFG`rS+voTk zLpnzaaO%1@fZ@IasP%G5Ei_POIEHLfngLwB|HWA;*rI^~0mRp!W%~0k2&~%x?1hi+ zPY_$&bl6K68o*va@N`}PTHU7y?^5_JjJ`s0ry2YcWm{|$dq`NO@E1O%2|nrG8DAU+ z$&l~%wm~JBh0R=4Cz^GjHo$bkR{h|4g{KN9J;6PtEFU&zK z(W5jm9T10&5RSt|L7|FY?Ve;63q2H*37c)83*6TT#f^jl?#i&BUSa?NLGt2?tHx0z z))Ao2A+!pQ#PTly$M#6*QWXO+=mRNq!`h=nq6h6`S8$|lBZqNKfctiro-iE;ky=M% z*BLdK9g7CCwbF>Om9*_wBuE#?pliH*-p6!- z=rgpH#GPw7_H-;w6NC+7J<#amDtT%~u>=*hStZzb02Vni)xwHRK%+EKJ zsgZ4|w}V+aPrz2P2W{}rcip*k0kA8ckKty?lD4qp14N#%>hw;4IFCRRY?n7RfLq5{ zQf0?w$N*{v@;FIQhg_ROWCa@Nv7JsixdN=v^!+N8etKofu4{x~+P10Xu>-1tRu5nv zyU#9@4o*{^mEJ*&AhvdaY?Gh9=8z8{f&;_kFo{?63wAvVRbV#>r|fQ{Ap)6b^UxxY z11jPuN;%dO2LAkrYb@%wNlT?O*-58boKA=``Odt+xHLi%U#6r!?4|mlC)UWm$h&BQ z;mD?jEM|*{Xr5nR3T8X1EbWrcEO9}pLCeG5UW#GcVvgF+GTf^MIs&TpI|Ayz3bC-i zMOah+L|EU&Fn-~sq10jMk{Q3U*Q9gx+|!*r#Rz07kJ(O;AKLi8w}+K6Gw}aLPE$Oz z1@wVkoQ>c7J(LQ&bxZv4Uo?ILF&8ob74ct3k9SF{pI5G|WnmHDOLXEOE{_TM#eH;` zcX@Y%nVIQ!hcMv#^W9AxJlecJ(z;h|_ME3z^%oo0cVTqO8;7MbzZZt4I28+pTh*cxw4c zDyKAe(@cxin%$PD6FtV#NJj5$<x-P;yVF?uaeYeuBmwXsRolY%yH@!Z{3`<%?sr z>0Hls@qB$!y2`fA@u`+FHrC&+5tthX7bl@Ib4Aul1Y0+;t>Kz_-Qjmv#ua5zed2oCa&Y{lNdBpj)42=# zR}E*+9AY)_QZ->Ps^*tK-g?aGm&iZd z2Fec8j=%mb>VW+RGDaZuea4r9hRg_TGF99(;59osa(6RVz@xnm$Ps zuQHo$OqK1F{fq$nZ(kKX!|_Igai^3Z{dc!-K(m>UicRyN%B(e*ug6W|s6HN3ZtyWx z7`;u$_sd_$4roFN)P&B0T1tC3bP~=0unVyy1g)k6h-*5pA~$z}^aNx*tNV~$EY;Aa zI10cq>BPjrS|5LUROml(s0>g&XjKC{_hk#MB6_ z(r^`XH*ZUnG(u*jsFJATE8_@0d(*?uty>1~Fhs4i0dir$-^T)0*2qyXhvR|)lzqRP zqVo;^PhS5Qe?uo4(u^F9q%tl+%NdRU0o})2jV{F21(}a$2g_%b5Rb>HI~YrU3=4(U zE%o3TMRsG76rJ06h`L=KyPRbB%h!7F4?c7bAf|T!S}?)@l>=(&3Pze(B@s&t-956= zjCqc|EK83I9^zh`5+SO_>NX~HAF0u^mqrHRuSGz6K<^Sr+e0lV+;mdMHtMPr9JuyS z7TWm@#6~;dTDF(nZ~@_s6lw=ps(Z<3@{HYtA!y4F7?Dm8N9s`m?FpG(axVsUm0Bo+ zuvPme7`go$Y0dQk8jZ((yJ?498;6p57l$B~k!@8ve(3c8;zd$q?K=t0Fx=#B&xAOr zu_^+X5KN#4xC}sy_RWP6&`DAxen0p75&a#TS~S)cvOXz$J({GS<4%)%au6FqsLdCs z9!Q3_+x0L9@6MZp{~(bX*gQ2lVSPEqls7Y|XFFZ)mA;w(DOXHkZ&Pf*P*np-9nZz% zCiI0GTj%8T@yde;0XcVBn&>gYFq zoLS;xk~eGtP1{$%==G zHFEBqMToeRL$A`GT#QBxyHVEk_iaA7&r*3h+Ad{q?9vU?%PNFmO__glxe2Z?vTd%; z7@k#9rPFa;Z$FIzy*}4Yyp_}q=F2zcSFVgS^oN&yH2V{_(UI3qIrgRE?e;}dk`sp+ z>#O3a_KiI}PMYZFNqz%HvBvHsP_eyU1{bFIb{5I{o<3-N+IdMW&;Svq<{HAGb#6=P zwR`^2@x>Nnaiy)4xtbf7Up{m8_UsvF9`6POz!+j8xv`Xs&r18&#JP;+)4F`5r~YGF zvUe9n@rjnrNc-{NYXkR0taiP}=cB?zp0b-HW?#M(SW!rddMNz9G@n#wLzH;EVRyA} zdg`^_of7uz9`-knt{mp$UFGlgGanny9+!RbZQpe>-OFnEC(VTAvwY5Z&tJ&8 zxUs>$lkLpc(F;|Ys~e;Oq$JBdVoQD;AN;H)^ZGyNa5;zwVZ@ zkWnZ`A=5mMM(g7ifDw0e%_#GfzvtUQ{izJo(+^+MPn^akPrQg{iT1`Y&2PLh&UE*R)_P%}p;YRKI>kU(1kn)65$d(2 zUCE&FBFhQDXv^rbo;{kRi}tEA;2cq$^UneX)H0A8ko(bnj~@I^x7||jFW$)Tm#@LA zMgL#D8lm~WAB9pq3)RS?*j!8Aovk~6^FYYXoD*CXnc|yvH8cg_sP0o7Ne<&d=C$_n zixmN8v07&aWTl&8{rDKjkp`$;P+f18ItZ?cj#KOc1`iR!VWU41E0-JWR z-L4zShZud1-K{H60t4`MLW>K#T$A%kY-}{dDJh$`f;qeC@rNCV1qh~GebzPbqI_JG zb;J5MGdY;zrnA835u(}#`v2W+EB;CWEz`u@F;G*l1N{j$YA$$Y|9EE^S{U$9K*L>f?yHR3UIM=?J7rFi;=(EH z3S=Nj3`-Cyayg$%@+<)lEX6omE=za-R*hYidi>^H2WaFVolO@c6k9v!2Y~E855Rrd zRRG@yRuANcK*7HH*ITSM3c(RBnBAz3s{JKBfexx(P7b)qeKF^*;X!MtB^SD51x7}^ zA%E0!Y!M$!_gw=6_$j|1!Keezu_DlwHj=bL!^l0jP@vLStJC_>>im3&$g5X34N!n0 z@7LPW|LxaEMZggag7;2sBy9$)IQuFz<}qUQ1@z5T3@lXdA8JX(^qDGDKDeu(gN<*) zVV4zXU6_w1RN!nw4*+Y0j)#VUfRY|OHzIsifA;9=-z+RVpsZo{Hw%ko7Nk8wu!!H( z#0mf}?J?)VI4$%y^T03vBiyi~vCy~cTUhEHb6WU;psfJAY9!$}c2hkFNvo9v)xE?j zst)KmNxQQAB7s2XYLs1Jr?H;}R5ZPz*mFhT``Q@eU`nfXEC|8laOWm$Um_bB6R->f6N0sU*(kxvgh=(yIS*`hL) ze;?SK^G-i$?<&AIcYN03Yx{70C#S*o(XNuvBje$F-6ovu#$_e#2#W=If^iNmgX%=( zsS+EbWy_*#AGFHy@isLp2C1FTU*?O~p!T9#w@_~qO+6mopI9`&paIpLj(f@ zjc`Z3kf?`D{u5ucVl{<}_Zx*V&LgdZ$K`B!^E<}JmCJ2*-=(wyBxs(iGGAI>VqtcP zpPA^5FI#rGl*1irC!buscuS|%(;mC9@*G*H)L$pKW!K-1w_VM79oTv`ymTpZ^zuoc zQHvU{8cfkL;&O}hy{{dZu`{vcgLT;be#F_RjeU&*hbw1Ab~U}KOd!rHGUjvVHTel* z-sK}MM<2PnveScPcj6`Lvyl87;l&J(+IM-4r(G002Xk&v9p(b{oyYQ8PsZ;*d~20| zz|VISx~tY`(PNea-L>oAQm>&hnlN6wb4Ikf;MIVB(Ho05f8+u^FKuUmQ~5roVpH(F zC&qPM340c5M5AAne%SD}3MQ<#nj74&q;C}v`xkxLXR+||Cl3&RROmx};#5@oM8;Yd(4SWxWZKUU9WVuL0P;vyp z@GEP;FqQ*0%dh07S@}?t*q<<3nK6BImF7&-0u}FzAltfgndU=PbVq(;0U-N=UC_)y z>=Q6T@h9xtX*v%7ydd-pcH_Y>2`=g-RKpA5S8(Vh3@{U4K2kD)nK%f{#B@Ajh_17i z_WV#)V0KT^E87OH z#{uMzfXXz`VHO6mnTHUd7sJcJ0(44!X-qgE=h6bzej-LvclKjw%*HfTsJ8X{QR(+1 z2C7=ac>eBjgE;vMwZ;NtmWS>qtqSIU=kU4`e-|ZFL0TDoEJ@_dR#nK6av7fe_@-t~ z1=*1p)Ufr>Q)2?K4ZmtJ^|oujcH zX7Ke5IH`SDv3-Cor6<*)e~vCvB65_twGLn?7B-)iox!aMQS7v)5UVDX<1? z199*)jffB#tR#K6EmYMxu%Z&c0)63!rGnSfi(!y1J;;$Mz(-Nd!HbISyM=7faRx(c z#c5NH{wp%hOWw_x7A7kkW4_zWu;iBtttk%mQy;oYsl;n4h9Bm`KpUop7!gr@(c#BK z%L22_IDHoQz~7JU6Q+J-cTzmbcn#^XiUSqOGr9-;DmjJg%X1KGo06c9vf2vB3?oAI z8%u$Fxg&%}wzVx%y*QQ^PvBXvO~(19T$hLqlb+b%$72caDx5S)Pi?@(XE%dEZI3nC zXSTCT3Wp@hpKZEcx$tSk%4J*O+3Xan%>7|8YpptX8RjLG&fpz;-R~Vep7k}A$ubf3 z@D+`#SAf}+*Rmg)?KCG8M2_C!HYa-2T7Jxv@W}Z?P~Z|z+Bh~dh<6No=W?f<1C1wN zIwV9H)r1}r>t>*ILbY%V)Xr-O=G%-Bpj&oqZFy^(8-m0tNwQn*0<9WcEqqmO7C~Z0 zj~P;A0XVGv9T*fC6@tJt7|`O41WM}Lfs*T_W#FZuu7Xu;AK5l;uE+g2tzGgG@qy%2QX&7wljcTE+gtIOt^o($ z9ea7k;j5n6$y*(FtWcqt(v!Ip-svo%Dp8{K?n#}G^UDGU)eHiw`v$rFc}l;fvswAQ zGYJ1^{QC6q`5N1DJ;Sk-V|e*rHtqtg9nDi=?MlQnD%Jo78mT0U8#&2l+v1Kes!eB( zdqmeUfCX?k54}Z(=+g3N+Ab=WFC}6XNW2e1c(dpRO@rg=T?nt|;+caNm4>B%ATnoqUEdJ!%X)vyx+@2Suj$?w2G>=$ zE?xU@U)Mee#=rRtY>%`~g<>QJN%72?!Ltt^Oz89K>TaZCx`bMKDC${$A>3{$J_YHy zAEO^XT&F4nW%)b0+`W4EKe|Cj+bIzW54s5MXn*ILyvq80Lg!gmyPmmAlAj(8eKGu0 zY0OW)gyzten2z5$dmspfgOjXZQ+U&7_cY7Lg=P1f=`Oq(*#BmWTZ%Dj_%v}K-xJRN z@I~iqBl7bxbDFkn=}EqUk;WOkblYE+8Q<@WNIBSbDbLXwy z$n(wKmaU~&T*Pbu(@ZG0VCImADaSo{VRhkQu=K4^DaI5l7ltyNiL{p?((TZFmxEG! z#kwt{e9K-(Eb>*badke55Xp7htFdUJbRx_|#$zymxg+X&yRP}s6~!`4j{h9 zcQM5#+e1lOu|a@O-QFZHeL`A;J`l-};^=v-eRrm*6rgqIV=&$=&u4j3-&8)zX*Jxx z6?Vw4RUhSb-t&9^|5QddF{O!UKH*Tb4PwR8v@SotYn$=gZWx+;sKNBui?B^*l`9{6W@bix_`Fu=~sH_s%X;c4KJA5x|Z2 z0-p)^OxgY^(Puphd5NIZjOl(ffPQ*RNIj69zEDj}`#);0Rr~D%adT zs*a|84NUw3rYe~SKyUCn>Ep#+JF>f#H8WvmK)9Y&*?;wQ%LtEaxi|UHn(PT%+QiB$ z^$Am##OfOwm_`MXtYLJ8eg>_5y~A~@axEPDHAHFPbJP3Ub>?8i1;ZFsUNCuB2||T! zcwi8bq?4-fEsog{(jGXecLlg$c9jNdg=ro%dWo*(EjJ>ppwAjWmnj_tnIVjGH_exV;T96S5L^# zl-<`5KsTJ_2yNhpp~p<|9w#2pA6r%-t5}zJwNRJeo$OfJ8tp;T9bXzW_KW93|N0wt z(5-zK@JK&z0PQk%>v~94EB8pSdjps^-H9InR1}*_J~2u zT={|}DAOlJemw#vDf`z@1cV;vL9?UX1Jw7b%YbjkKJg%(sZsn!; zKMXIr9gGcL`|B~+gQ@~3IPlMsn3`sO=| zpFhbr(I(y#%*oIy_vB*p(6Vu9vHFp87RAp}BF^!(DV&_Hz9Lh>d?I40U8xg0fq^o) zvXGjx=h0NK)=0={j~HDpkp4f8L4L+&<@4N{-F-wr?2kY&rVF4u3?x$Xn20y6gU-TQ z{9K)4a@CNk7!NL%kHMFaNdTi3926;vG)Q0|wqs4RpoJ%HV_EIUCU`>;q3LHKg%e0; z-aqQqyq4pkw|kE$_SQ(#m-~t`RnJxaGT#;~E0$yfE>e|~$|4wf1YFSmZA4%?SCRdy zNkm-ZMcQ*ZY$&FkakBZkgKy>S=>?e#AVtLFnXnFOr7#`$8LqEJ`1N!GNZGRKfW(=H zdtC8ZKmiE$@(3W8-!dy=5IQX0nUk41AzMctn; zh>2m&^tcRHDS&XbeL`#ytf*C8G}QS&Vc5`!`>OYHy)1&td;dBS|MOjzm*kD_3TCnJ zvkWXErl;U!k`y^8n?m{iiN{OZpEvrwkW}LuScJa6D6KmJ%-N40ZPq);n%iPS`{&Gw zb+YB3Foo|IW+=1mrh%lH8@#mt?+X$pJ3=`6rCD#oIdz{q1@GUDU0LCxQurX3zpvVw zKbp@5`uY1JisXO%U>Uh3<(r;*8aW*F!elbIOQY$?{`gIQ`dxYJ^Zr5<=9jx-xE_xA z>vv${O*4~DA07R2t6Tp568Vompek8ia>MRhdhd0wAj-V-ggVRhvL1(kyuY5ed7MMf zzSj7UbAv&yht$Xz9@B|_*~TZh&o9g68IPSk!1$4&TzqG3uaPxbrDKt1pF} zF9QRp@wuzJhq*K#uINx}u6i=AmQ z2q0|mSG0gN2dDOy-V59V!j*VhEAsKBbHB_m|Bph5cuR|~5H9vRR?P}ssmDp5^v*ep zI-gLswRNhyt0o3*?b6L|;i1hSMRFjLDdYe&R||* zCNVum$OL@rU!U;Tf9SG@jW={;G^H5&G-^~|j~rs*`>2u%yucx4S0#WJec@^R6Ei9{ zMBWWGw~)kXSe9>?nK^7K@@py03|RwTBOx9_hNu3mEb*^p3i`3N)^z2OPR_Dry&Y#v zQYiM+mmniTBL!)DDIP>KY^I;LqiZtY)jcpc0v2zT`}zYZL6%hT4LCjq5{Q>L8%Ud= zgqZ_g8vy1RbX}u~E3|O1*n>)eMM7eaWP&$35OC|E#hv>-C_2_RPYZ4Q7c>js`C z=0o73MP!1KGqCFXh7|4TQD6<8fQuMm2ZD-rqla8r(ux?UVu1ow3`&R+)O%eup4nZ} z5*wo3EZ)z&2Yob%x?HL}WwsRo$fbc&DaPBByISuo#* z{cXN&Ur*H#SRSQNb1Rk_5I>9L;VgpTn;5_4_3xZ_E;wzvuhzg*xNwyrMC}h^nEnMb z5P_4k2_rN%1A@99VRuNjliy>UkNag2tAd8j(}#ADzjT5w$7ec1w7nZz4*wH2o=4bM ze#7-iIu`&>_=9fOmQXyEfTh^@nJ>O0`%(6iipg4}_&O&MB16TOg6W5O)OND%&)=WIyP+Jm2_ zvSrf99<+m4l;`pP;!Q?i_7RZmtI&Ty8yPsP`rhRpCdp&qCPS>yXeuO9p+c0cOUc@Z z_`~*Aa$$?g(se#D^C8Tjyv&}`uEqVH5k?6Pb1>%DB2&p%gf+&4;lG4IKZ==a|1!4s zgSZ?JV0F?u(BB*bfsH;-cayCF>WhM)ZGvk=uk$2r*`>C_0(ybU6ZFknmkpHOet1Vv z4gFPVh050fwv1hJ;C1SY3U;zi|JOmkE^AtA6k(h<*jFsbXcc3L`J_OS_oYNTfln#O z2Jo(QF0fF?rjal*>F))43T&JjQxr%6DF+N`BqU7+3!uco2tUvb2z^9hi>)&l55?(p zK%`e&3S*KDfED`+AvXJNpqB4?r7m7eqFQiN3^$B@;&ZO@9r8VzU}kV1^)XE_@t{j2)m4## zF6V4JxnhW3QiAuq;~mp^68Ll;OD2ur&-QaE#?q^S&))zpV)Bm<0}4AeznwAs?cHj_ z9smCa{73hI>rT)4!u9z|>{|+dGcA@^2Pp?)+%L{(&6C! zrCno17CF&t)p-Jp2P-xj5O8gV(R&RgrQ^}EtnW&%e$sE7M55s(K(=C26~i;<#6<`% zAMUH=N`7sRQ$#M#?}cHJ%4xf-LCp; z+FPJ2#17RJR_Uar?S^j*Ktwh2!l1XPYfIBC!2hv;_Ur{pAV||rK_ejZsPFhUH-#BV zwTyz%oV0I`o<)w8Mu7mdV1Sx@>fN;@19Z7N8iS&KfF@?>+)hXY-ic9v``&Q0vFE$fe%a@?V?x&?rchqBTGQL}oDnOb|r&{}W^VA6fMO^EU}VHA+>wQ2Q$- z8YnNS<^ea{Kq3Lx!1uTB!Hfi6no7DgYM2Fs6i0UH3eRI%E6&~?!|?P+n>uyf%a|`s znq$c*IN93`Cs=~4N6>Yg=08ZsrzFq7@=^{bcwyXnQhZYUlkKeJEG6@JgJquGLa-nv zUj2jx2kVRv&{&@mrcN9Y2n)z~^r}oec@awjz3<JP;T6V@YJj50UkkH$`LmRGJeXr= z@gN;U;HD7YWH8iKQ0p=E!#2hTrzw*dFb=M|%3Tr9g0xW=3=?lw1cc1VZk>hioll|b zzlDCk=uc-g zS~e1xN}6Knf1I^(9}na%PL#)Xdl{-SOahW`hBg4Lz@y15>MQ(|FXfcQHO#E_dU_iL zJm5GlaIsUg3)mDKdu3G-Y^dm3R&7T}e=y=GG#blhj^nt^LUV*Ndl%YkG?tOE07m^R znpo=8tVOG9qkdZ>OJyBRb>@Rjo?Bea7JInU;ht6mi`(P>L>O0~O-G8M{u0Bi5$ACk zLl&2|p+(cp+bJ+t`@`Ld+pE9c)ap_!f<1K^I1!%7uE$yuTU2HbPJ|fixUV_WcBBV> zFc))1KM8;aG%48(LGEi8X*-?^UeorC3r_}c>~@y96N<>EqOPaXC@DLm6SMPKL%OnH zhZYE$?J6<6t+0y{?O*p=Y>Q*jPSNw9PC0s0x#(+?#z0#>%*d>t(bH6ma6|XO*Jz%V z8{G^C=hQ7*McTK8rP%AI*#GQq%2a`cDE0&Ina7W`0C}-+{MOWWJ(d%{>#+c)5NAp< z91JfVH=lL#(qou+0w#eKlzBS9@IV!qxdr+Ee;wgW0WtFrGy@Ds#H{JUo3o3k^H7aD zcETRic6~rg7ltyq0;WotEK^f^mw?L6g)ZNP|FiYRcJ(7PmGx3I`Flmzy8}H)lK&QMctdn#n`@oTC5Qo zvLs28l$N?jq*YOB+O(&cnx<>E>wb<2x8?Kse!lnf{XNg` z_xkeZD@p?qR%V|;q5Nv_7#1-4VxCCa>p_2p#St5_>`aD?&}%2PKH2xO9ONuQ!|35>$PTk zZmoyyifl@u_79PP;y+$0iD=xLPS8GKQ-Q9hImxbxqU{&LU=&zb>W4 zuf#Fm?KbzG#pVyLBPF^tcQ4>MzT7H{S>c$)}=&K&;a0oNJ_ zq@awpXv&h-4Jcf-RjsizDMd}@!r4(YOI(jVG&#woeTys*$8wc~hSguWk**vQO%pop z#wQ$L`>3eTi5+xhGa|?CbnqYFT}JRbM`^x;DiJyhM?Zm6-e)9m6t2b!+GOnB?5>@i z9jxqm`;wP|@i5OkA`|Mfrg&E}jA!@u$g4eJRsh2Nfjx4bL~+`W0m^IyJ0#L?ci!2x zcG!ee6(t~wE}N)OS@Bq)cmpA<^l2sW_^z*N8C;TJR%G8y@$M}xiS#w4V|>cx6rD)D zA%I)V2lbLg0cc~&qF}T!VUilP1fTJwa#*FR@6XG-*Y|udoSYOYyvPm_%A2>vTyMSU z@Q$qbe0PE8>?2Q)6C8+a+diexQ;@qjx7*g?>6mw}B0}9z9eeVDlK2_ajet5CACWX~ zh46f;a2qOcJU>v@_c`nMjA3DK1X*iEV&M7@C2B_`d#tg!&j&|or73=Vd+itrn_@d~b%_D>+P;c0w+}#G@5cV%ad%Sp*K935BH=y3 zQfyoaz^F03vW$SNB4-^b?_UE0uOZl0WvOKjFthfNSvAIb;=2o*F($uaj0+@GZYtqA zk>4{cEA#7;@OLvQEnZ%_P`ZAgL|j)9AHV)#7BT8GV^Cq0!6zbO$Jn>6es??$u+#iY zF-v7o_h$*?t$bA1&5n?nE)lxfAG!-Xk2>ok7H6h+T>J29p^5C2L!Jjhh34RnP?9e9 za;8QkN3O0bQ{dVm`LdUKfy}afY282b*zU93nZg)<+1I`RJJ2zC6cfkJ0UmZ zHQHTKHS#j}r^qFZce~Ob|L_p*QQ+OXR-?(xvv6e#TG28IsQ`*L6SbzVHd5lN=Yh>Z z3=-ou9eQj+m_0ShLN`8pb6(kuhAYPK zFKLl8Q!es4VG$K^xeg6tqnfw=3Az4FQIXYdFDSr&KZr{c=GFs+h>mct^#m}eWYIWV za?t&f=4LW0^XKMAzzcrj5c$3N@jHA!W}gG9LTP?X8QilB?%P)6N^R~*aUGfXrQIA7 zpN(HIA;+It_QESk?abYsv_8bM)_<0y;g;wW->1E0M)@w&#sj{pSiiigQ#BNXZFv1L>;8pp`#;!2MuB`7V1(XG zI)4f{s$5hspVBg1UoILCo64k$qG|gpO~|P`=&Wsi*4&V1ku5nTL9Nxd*fRwa8@1h) z4Lcc`9u(GRBNZH2dk%Nf!61LA>(uFkave9$ z{-FYC4z*jXqONxx2c_t9=5Rv^*Z|j5xjBrZp2;Pt zEwPw9c435*+q)wY7i#0ZZR97w08}Ap(Jv5>r5~(A`Z&~WTu1oB{LW5N@3y=dZg%!64u)pQZ8!~XO%+81+D%LwAJ?7am6jj3?oZzX z%hvHfYR*&o5~EVmn9jsbMudac`I9*USkwXPE$Y~-I{D~?w%nT}JV5(#0tojH!^Y?r5r&r78YF!0xx zk>*VA*O9#Mhb%1Ya@xsFAXPsWmwDA%QCdSoa8RzkG>#AVB=%6J7_4ZVPMBkc;+CW1 zvtOnJ0UaZCeSz-ny-<}_5ls>eLjY~tp$2fBrvNlDZ=D{g zKC<;xuHti6yZ0U^iRucl|w>`yL-E)jlC# zTj2cu=ISsZm+Ab9IMpTo3P}|ai2pJ!k1))DqBTgtb!98ST~vc_158X}(2IJF%CZ~a zDT-g-YYo77N+u7mLutTgE<3{*^cvXv8VQ~jyjL=D?>h+t^~@2+K6qql{ThlRBgoq^ zAYG`Vm?9J7cCzfvL5bde>bL;2MQvfH;SeE&kWQ@CrDLma83T4RKNz~qYwwp(SiuG5 z0d!wW8SFQD#_dsvvlE9Vvr{h|>i)?QmG78NqL>U@q-b7X8sku!q_#rfKAk-OqgU zzqn-$D-2Ls*iBpf+|yCPL-ln-mmZPiMB?PB_u$}SiK7Q+gET)>y}Jj=p9ZkoOIWh- zQz-yy+fNN(bHnNVnunAMuSZUPbLUN~9Jlz%_SU6dCDunXP?ni!Nhz^-s_3*>A4Y8Z zBB4m{Gqm%ws2$FA>r22CJcav*%;LL%N9TgljGCi4Y*&1|&K=+7m@@;rX=uB!FwW)K zwbix*g9hCL)cXmxk$Bw%VqbS#6qt4yVG#k*KXA8P{=&%9B!>+hL10-E4&Jz7+eH=1 zHPtlwZe`Cu`$CakQP&xNhEa)Pv)}?4O`i(-kiuCe!G9wP>%6yF6w3T`KR%SuL3y}a z5Y3|YJHZ)EL`B!}`JcSjk!8$MlQxG8T^mH~MAI1g*4X?J;Z0+(z82_#5=hZG&9 zbLQj$ERS%vk+20jQYjr91HY>1&`Fp`H9HcY@bRA4okLaA@)zgFomC7f@B?&jk+-1E z6JQXji0a%~vuU$qi*)Ut{CS&GtaNUv?+19?7@TClJ`M^&g*t#DMt53Eh%kx{DfJI4 zNg@KY8BPXUy0F{yITc|xXaq%l|@cErR=Z*9K7F$x*mxhEf7|xyLeG8on?Jp zAFJH|CwhKShP`6GsXvdGLmbh*NnzQAAEfi9&Lp#R@YlKE5CU$n1sYgV#51Z$x|bz? zBg%vfIZS@eAusg_pfh4I_}%$*z>^xnZq)WtICIu~-3{aycO@)%2h*LPPuuS_*s|5z zC?gP&A2R>dY%t!!klo>nP-zh0)%_RYiCGe&IAFMDZnzuBSMVd#%jB#zy*rSW*DLi! zvX2oaMHKGmCm07;ulwK%Mj6!nDT~pc|K8P05+6@Y!bru>jdODs$9Y!YSM)f~Xex1i zK~3b;nzSE0l`t+nadJcPwa?ph+O;|dWNQX>O>L}>NRX7<^oC0M83OA)_-j;lfzZLo!%u%yTw;aeU~IuOxxw4 z+nkDsEj+qJ%e2*>VpRgK?wo}DZO?toM09|b0&X?XLserEnv~+Ea;+bky_(?`A0o)) zZCarxa0hXrsfe!Mk3_P_9UO@cGV8P2G(2P%IzY$8Q`diScqth+#Eywbp7JTbIhHK; zjdv~~H1>Tc+B$Y1Misbq8@&iU!Mc2g*0Cf4+liQ z4+kWyqm86%6_ZqE-OO-YCIO*}CKn8Nz+NZ1IzInslkWARDIY9prp_MrN>_jb>)kKX zM7w!T;^Kig4+7lF3$C7W<*Py1m5T^e>sXOOxq|g9#syB!e!~OH(hr@8I;TDIfB+wxA^V5ioVE>JBWx>nL;V{lI2N3#_O#DsJm8RS(#G89sZncu1_cErN9VLwn0 ze$f$40vBJ(a>qB=22uLd#4+Hb`}%NKyuyHs$4atD%J$ITIbi9paT;K4BQL36vhbSk z1?nH6Kz+a;ZFCjgH6h11rfYj7867b?LQr$&tALRCfF-&I%{@>5BA78cuX!-K9P#l3 z;QP{=nZU8A=x-c(Cw*mHPM2g5#{P>i+qIa?m5t>A=_eG{J3wLmi&BW8RPk%ZhDhl& zDEJN(^XHcLQU{g6F5edu==y6xu1sRYRVX(C(|>yLZ0kX9(F;bQ^{-?cKOW@=$;LNa zYoZv}dJPTzz+8!Mj0~srX~4WvahYHurTr;ZjpvJNTjB^LJBS58z>!{rxMz@(?DYfa zssL*XKo^v?+!+<`UwdazYvRSf9 z-0o6I>E)9|%9$cP-^HNj?k#}Yn*K$&T|_vQlA5wb{Wjd#!Ey%NL`rXCX+0=^VqLRV zFBPS-wla9js0x0zV0^uW(jZ*o9LKjGNxqio9MWFr3kq(}Y=OnvOZ?{l{#HLfhF3TGje4J9& z%U1SaG|-gor)Ph*%@#GZ{Qdz&Gp0Q#ezpZ$Tpv-U1rz=Yufx9ePK`88-pJ z8P2yBN!1Ne*i3vQ8BU;2LuV*KcFIozK&k=K7FQw7Ye4iKPSAt@OiWC(mWc`3OhMXT zgpA(dlmE2|64R&$z?YZ0R#19N_;YCQpoL7R=!kEmorPVYRKn%$T`XocM~4+k*ohB{ z%n%Fo5rlx(N6x~4TJ&w*&~`}Br!u;2x#BaYvGhfOvAj6K@W1Oe`+HM=w|<+pRGLV8 zF)qLrGxA*SlDb8i;k)xs*Leq3yT%;uDbBeUN??(&{p0}XMz4qSo}|ID#?QU57~#*8 zEZ~qoWVH~3GcQ_hwPJ;*=M1OuE?eYm&v9;e`Lbx?Xzz;zTT`e&X&nj0Ny6EcnK~1W zK60|X(sb;MM$bCi$XaxPb$0~n`j8YS1Q(83(nR3D%byg2s{nlAp+h}LKi@L8J1EyMw zwTWA8zQET(;g(j+G?cSrn^=b`nDQf>)d_f}2-=Ofd|evF4~ ze9lsxC&@%xUgR!ED!M(XCU`y-rjt_6(=$?nml+{NN=p1g1#$~WmAQuiW5dSe#=}Ye zn-QrBolJS!7)+y0*G~Up#(IwfPIyWgu8y`kR~$(o5deje!!#k}+MB`?&imyzZDtp| zf$|_S4|?|cwFk4f$)ejp(>=A#9&Khw_EvYrPpeM)=yHhE7HW8|TgdvQeM%~B;DCGewKQu;5j9gA_}ic>OS6sW~UR^qWhgCYsj z)**!D$eE~5`yn8dmUFc6;)|FQNGCWy54?#gBcurC2sJ{A(21uwtlZp@S&A>R1Vu}; zX?M_-$lp5-!TL@eP-7||Czoln75$9_-;O=@6!Pt%s=j28g)j~LS-@`JU2OBBfq<3q zX{|Y1EO+o%wW+?*w0S{~tg!u9lI$xD5eVE=qC7!M`(_~=E$xBoDu0QpgumIF?v_{t z0uB}VPd$q^HJ`TNy`db#u6Lzg^{$@fWzV&u?!~>j{a>$iWB@xp5KiZ;+zX6NYuX^) zmAll`nRL;kH1kM*gu^ZbE+$d-XTDO5dD$8>OK}ypvVutyTbV3 z!28_olv-4|ccd2|4|+Q{1g6L$U0Nn;XGG_6$G72YUs!zK{Aee8p|{OS8=^l*!#Lqo zkcLH7k>AzX7OgZWz+m{r~4+Be{n?&Vyt zgMU0JN_tmf&sc3X7d?$Or!fbeRYOFcTJusRGH=YhGqUM86-aSXT9#d?$+T;hMfXu!k+^1TT7FooamGy}jcW z_0JYOnkfYp(1Q!T;Vc0j(^P481vrV?4)tI2X!%qr@TIf%CX-qD6xhG6jTumS8RWz2 z-e{sO@m)64tcNgLXQ({$U=j2BSNjZ%@0#qSuY12#eNmEaGBg2?HUdvU@@&!e?qL$Y znR1iXUkA)C3O=|M9pvP`B(H*4ybWCL1{b$E$%R`B8xCD4NR%y{9%$dWUvR?raP_f- zj4XC^%?XnLg9>EAS%2+(xIEH8$@?)7*)_>bKwa7ufqEPCAE>u&zfo^1t2nN;2j&nZ5o z5@AHFM~f0LL-O1JQJTh!3w)SB@`ES&^7Tm?{U`znY-qRWK0cr7wYxRF7fV}{m7yTs&k_Yj;(>Drv>4=XfxHA3R%#HS!3;)%R!V9DY0R(I za43a4h+S?af7EGhH=cs+PNf8Kh)<^KC8LcxL7l^T0&OZ#w}Hu6p-ttp#U+@9l<&sG zT&V9^T+UZp)cX>Qu5T>28+*UZdKy;Mml%-b9cZtXb~MYbBiSAC(Te-h^7hIr@5kzQ z?tK%2Z{fSzK>k2PydJi!&ZR}|sLSj|wtlp5FXg3e!Y?-Vt0e!}wo@dr=c0h;ssr^K zpKxlKglH-)-(J6#Hj7qfuUw}mC_!t!$p7I;OCN-hZxwn&Pzm6UTWaK!ztBa}&fWaK z)4SU33{1;W?C7*GLVZcFp31VawR?hw5_==Gd+q+Ejnq!;H1|R2W9^qf0i5DXfT_MJ z+gNBV{Y~BYVgre@j$al~g)3@=W^DUP-|c_e?cq)S^hxx2(}R*OZ8na=J+e8BIk;92 zNCt460i4A@4loK=-q;+Ev~Lt2AX;RiK8E>#o@chjJfMH+SfpHmC&!M03F*nK>ek2$ zhB_@w`8`hcb7N1}a%1j}959gp?;z;a{N#Fz(UHc5O^rIsE*G8K&6_~z{o5=2dXP^j zhpZdOt#x5EBW&aP_u0GLJyRO@O2sI4<)f#xc}1d&|AT=l^4reLd|n}Sb?f-~NyGPv zTe03|HR9&DavS#H>&~7k^{kDHR7kfHNT#>5+i+Fg7P?3_&Q^p&@6l$69#mN6c!;Go z{E7^xN>+B=Ct718tf3t_!SBk8>JJswlsz%Hvga|`w8*n@)Kp3iz4_OGr>?I~sOXBX zKMg@3l_^+i+cvY;WP;BY8=qfg^4||@j{tY*=M`efLi;zg>^0w!9hzS}cU9-L7TXSc zsC$*!Y!6EO>q_a= zj4%iAOoF$OLMNG3DaP}WBa;pG?%lylsadryvbxDhl{0 zM5pmP=qa@2cCIRjW|8yPkD_Vg_2h}dU2F|*2-_HK2_G}GyYhZ=qxA6Y;}ND&6&1u+ z9QF5Ifa<@a12IFF#d~C+90KH6T&22o7ZIPAV~YzRT<;n~e;LVJU{4aq@l@!#5Gmm% zT=H-}1#ZSenXHgzYGk$%Tfe|AKDotm-Mjo+?ux_lJ0CnuHH<2^5H(iOOt> zP1{1P+3Pp&+Vm+v9>6uymVa>EpChpaN^s9$h!9doG~();IjnI?dBPj>`AkW(^I#yZQr_w)8t=^z@j!$sh&L+j!J|#;Wb5MRzCRTCF})Bj z39n)S(`M4~90KhTlBWvyv&LaWFpe&oU?Jde80BPaF8MqM1(}idP$QRkKEQTwi{ZSE ziMx30^d8iJ(@yi?t`ObERi`6GPT!Bvpxfps6r+JehzWm(po!@SBiNsgr7vR$M1K)# zQ(Y;rBae2yyVzzq@396!s?o~$s)Ud$b{#(oW{Utb0LUc3eid84h#}t-XB*@(t$$_D z@aKRITuQM1e!R7sq(X zRQE!xVn(*I?j$-oPJUCqVCQyqjzr#Ot4q=b)7x^E|F}_!!0`t-Af>l3+a4x=f^GB4 zd4}=t1wmlVy0OY;WCHDty=wKa>(p+(N8iDLthh}}g=zKv#%)IP)U-h^#e%3v#Ha4n zR*&}t`MpZ3L?@Mkz5ZK(FPohy5iWBqpzzhc?Fo$5`dRO7h>7Y~WurTZFxu4#25vYd zlLaoYbxGt5+X6EOq(!glA$U!(Rt?pO;|I$F;`pJBXWO!nR)avT?-DQVBB-P}G#nA$ zx>3f8og!r_^~ZrC+Jfi)Cs$QSiV>%I_fzs=n&qyx>rRW7$&4M^9-y1q^iiMS?D-EQ zsGE(yL#C9x^;Noexw~DgCz55UY0q<`J#I709DO7k_O5L>7;GB6rt9dvvvGt&-oM3F z&aX^9edxAN6D;i65vNl!FxKAwT}p1zZoKQCitq`P2=iv7t<40Y_$0KF}5HyIInyzAl{ z>vN*tUV0leXWjg+#%&R|{b$~eCv^P5OeV!J*Fvq zgvbCspE5M3ClUWJdJ^yEG0aC-5ksJ`f_l&qhzc1N%*QE(PYk`Z|JD^(vr|nAFrQW( z$&8JyGU}b{&y3xCe5(_Z>^{4_LL3wRgIMr8W0g4n0##p;_HKf8NU01AJPk~OX%fO} zD&Qc%hItCuFlCGH09qIMCX(O7d|P7_PMG5d23-mSJ1Y1+U=IjKMZ0;vU<4o601>zh z?A`|hu*^$si2Zw)4a?t-T0zYG*J9RZ1~$5JVkkb~p6_gSNH}t2qE8N~Agew(_3^~c zhLUHd`D~b%aLP83{IR@sA7yaPw+#V1K`$)lPZ_2Pz)004kXGEzQTU7C=z3v&^7Xv9 zRGXz6LK1|%{@IV|*3V<=JLU#^T}vx&+{RZW1dqZs)fJ~E4ovBh+u0p+#lGp)i5T{z z$9~$G6xin+I2ABIB`IR_lR(nHoihUmKlAY^%l;mvKN9@R_1{ghDcYRpXQT}nBzFk{2Zl`!k6JG>J|n)$igG^?aYT}RgPXvSA6#4zR&~J5 zC?{1SQ=kv>0}tzot%jIn;oTth9h z?Kc(Ic0jf0xU4}1(frbpXRzUt>jyelud`ue38;6Hl*`R!;K{479xMzu>1uRdV0ZdAwK{ z#@S}$)^nt-rUEX_|9SnChDFd`NEOe93@5uhBM%Pgh`2{(jVx6XCeGrYZr-zzK2nC5 zLh%MR>pFKQ>&%teSFAN-Jfz}@-nh>*7!?$RHq$ACM`Y@!Ee$6mzNz)I)dh{ z$m8a`qHM~@%QT}$EAD8`|8$ZBBOC!LiK$q_@+nF(RogTSRQ z;mknskf@DXMK8Hh(mZexYZH8(^pcq+uouMp@+ce)lga<_nTd@MT&_Q(-pP%8w(HYd zx&viF$ce6|Ou>GxHkCJW5yuq7ZbAwH`*-De38hPuXGHr11oWg2=}4z|h!QosE7{gcl z@QUYFZGoa2pqlns@=J)nU`^Zg;X(cG^-G^rN_+{(CUZLxF{j+pK@#y_Y#&|XsDaZ?O8hmSpB znFXvbL(gUf84PSnK0JHh59WVK7|0&+eyRON)&5-vO}qXyNtM6kl`@@vElkox(E(UM zGxk_aYb;yBSd=Bl{o^F1L2t~ZY4?fgGYyAZ{WS%L%7wx=;(@2^#Qs>ykXVwC;)J%E z%96VY6SdBq+9~0_$XakXW_QCmP?j$PA$q&Khudm{XJqZEkSh0_otF9ea!KM6y@|Ir z`Ba5PURRON-(dLFA@=DD?<)sA#S>wT5>lra;wQy z(GPp_->9yhfDRISH+WU#M|bNpm4^{S;X8fx_&Wfn0bT+zsnl(d=3UhS@!e@YK|t=? zfesBQiCFV~Y{7MCMNg3RDc(_A>9y7FfUBZ$5oIlTs0`{avFnd*7v5uZ4GLzJEs`+Sg<3WrM}duie8`BDepf7 z<;DSbDVP+Q9W7{I;1aUi>oMNUcnM`X6yeR1pbOo>ow!uApzq`f z9~)_>^o0kCU2d=7m|80?J_ezRmpk5kH>tEscQ|>UTPT#tfMQkcN}_47cCB5jzWJ5S zSlTen;fZb$?Ix0U6#D9@()&2)A=G`(J8xz6^)~(wGE+m5ka1k^8jkd-LB`RXVIa;p z24i)2UKL_Nz@1zn;X2rcKhGM{BOchw{2Eb-`Nd_6{U`iuykz@w`^bP$4w`eP*}|>W z4er+m6G-*XgFZZb+3b1OY)Reh(+N^@OuB7Kfp|{gmbrW5Rn^B4OPx}ps^bXfckMoK z-3uK!yYX)`rQc%SVxscAEINy8G{QHe_RPj5MAg(X{gQo3ZAuqkDwFG%vWzoDI=z%Z zVs0D0a|2Z6$KgkeR{@atjRx(MM84SHa6*Jrzer!_rm#+%Xm_4e_Gq`|OkXggb2$qx zqGM1xG#!EJ7H`M&4N0r`p?Hw3k@*@Eg&kEjuWcI4Uf|C{WBYb6M@4G>p2?jNMo!Ei z!kX_ZmH(lMme}UMKSt+fqZ0NY?zUfPDSmFC)k#?s~u4J1`SR;8OR9Ra`&8K?hbXE7|+hng2UW?*D z0%4lp-hTM95Jr*9;#55Ul(rcIU=YbY#0q}?v4gFhT_0R35S|y zC*w9h8AOkoNfh27LaXyE@hHDo@zL}%AO0U0A3T1&fpYn>p2I6f62^u#cc3@Q8_Zzl zV@f#h+xTF;MHAvz+O~K0OM|s|h()tu0R8#w&3Ah2PVHD3o)c7H0j>#IF5Csu^#oMWPv#Elk! zstOmvH57<-&;)7$d9L^@NM1`{#x@}x;xa%cd~%_UG}N_~l(E*B5OL%=u0;*| zkXLReI)GKOt>L;CaI0$vE{VCy#~r_WX9KtNV2#)6?CFz&@a_S7uy3+%*dqhg+^dx4 zjR0Cz5MLfVVWn%yKtqIbXzK?lnvdAF%id7!a39bA{;!5j=T}e z$1oUR2lG4z`1uc`LE+pd2}ehm!)#6bobxcV)lsOB)#4atD0#o@$@3j|V)bhyrH;M+ zAO$Gu9NOxmThreUzkMw`JNky-___Y`D?#=CS=kCXl+M~k;TeQ$;CTgbLJ_%R%Jsm~ zD?%-b=6aC+6L7o-82&FR2v(NX`^!YRNGG`CmHxwbAtJ<}Vhk{%(csQbbc2VfHN&iR zbCGl9!=f|geE<>6UX@4Z%0jnkV#OHQ=w@=(2{P+V(=sEx4A&dzt;!~HzkqY!%Vt0# zD4c+-tbt6G{AT-Jfe!vEUQ2Y(HFG0%Qlnjiar~_${n?W^-7)GJ5-r>QnHEc=miC6a zt+HAfD(J$*@-l}&iCpb@1f5xy9(T$;*H^Vk^CM*8z zvgQ19se25uhRCWU8Di?&k@PFAmR5y;&9L`icTf)u#VG75b zTxXKd8AyeUV8IRJg=f#Eb?^L(a0bjq2M#U2S~N}H<*Xg)t$WvOt3LRs`j)Q6!$)5< zjB?#g$Wu>RSrYa?Hq|^rguLB0!p)%wEfZPmqmnKVhGx>x!93)ZvB%5u`n zl4!@mPp%s$T%SOisxfxdBr(~Co*(_MO_Hb{R0&|p(mx?RIQ$9XL+s+Oc8nHHD?}tb z(2sS^nT&J8yq2eE=$6mT8pkQRCS((_qFihntC1ha6nR1Lce@__1HHf;++w(q&L1}# z4dU1J1S)z)N}C^t`Hv`8t$o(FYd!fLO3pq6Mj{AHZzU-nSRp^9> zL>UM}=g{~Jw+h=1ojwovqG~LJMQVF;p-s(wb9HBRUtZIt%TEVW1SM6dHE~55L5+~I zb^Xf}8)AE`P%%TI)m8cceugZHLzifChDHgPHS~di`SK`%o$M$uO}S0~V2)dQLd97> zt}rmnJ0#FU@;2ksd}m_1EdvvJo*z$KIDBr^Y_GHR)>*Hds{w`{q>e|set0V)^F2s- z#%?mU8DFLhCJv_82VFhGwg_>5wq1i_GjO0tx?dH;de_j^E#2&Z@#Wx#?ky+5M5lxEE15y#O>gXcmG8=pFl!MhNH6vc;s`Ct9+%gy+6a-bJ-=r@o60sM02rdfr3{7x1H_RSxg>lZ@0_KXYZ=? zmh2hpv!mYaM7&Pl-~Fow6MUu3#Gvl4F+}8Vp@;-aK>_^MF98JQ*Ej?MqJQc*HrM{U zU_tD+L6M2!)F1KASpx5wGUH<;ra>V~`kF>UND4X@pL+m$E(R{TfbIAD8uXwhlx8~f zG4TTB5Hm#ePGIZC>(1BHuG3f8eF~F^F}g6EK)Sc`>^m(aG)krT2tJ=MTM*9C6TYX_ zkRcFV0j~HW0;9A4q`Tz?qj8M-K&b33p>X9~+kpi?6>=}!fe(*=;!YBm1Q(${voQa+ zC86SyQK@Bt_qE=upJ+V8Nsw0F8fizE!D+q9uNq_80aP_}{Zh*_!T5{u&bgxR-SIW0 zBMm)JUo1+s7IPhWfgymq!4hiOHo9N#9sen3^I&-*FlqtdjY?0{z3Lt1DOzm%tQA#( zk<^f*0~^O@;gQtG2M_zJWK0>aH)q+FQ()JOLmgMVLncU0@wSodN-ED(;Q|julyQw? zy}x4UCc$R!U}!|8c)aOEiUih)ix96u%T%-#CJwH-QJ;VN;!$@9740n9?v@itK8NZbpbK* z9Qmgs@T$a!V?@v)cn;#*AZY#}2fgyOHny+*d zU^lRqSJUx|`qgdpzLs*~*p(R3)gVF2=%^nBr}U$};?OE6Se9r!e88ILF%f)jHnn4( zB!8#q1^_@IUb-;ROU#YuJw*Lj{u#opf;xQ8=qtYMj_~w47A$hZ3oz=%C>V9UfzbB* zZh_rUw|e?B3jRuUI_F6@6a)=DHk0y%T@&|_1k3}&PzGJN?G&!vxAA_F!muVi2<+B? zX2W>D*RaE?AN#afFf8O0(zeKOixC9-tGz`d2O_jtn)rm1p0qmq`RZrE5}Y+6_}(sf zGb_%$_Qu$bJW9B-N)cgss8{~frc2Y^VYa#H<+AzV?Qw)s&e$*)gR%a`Ww3_Low#bG zwIuGPKg4MCCoSF7$Sj1Dkc5-Hx@Oo?n z+M9=7gIS2Zl1GfXW=FW0G<#Pu7%=A9Z12lB(KPt_#ZkbQYFn5<;+?{S(G}NRy1eVU z%7t?0kAc^BkR9My(a$E3K19D;p`5aH`vtWn8^zM7rQ2^Z%9SGV!q~xyFrz9eB1Jy% zbNPw{>FM!KFH9ZW%OuHUw{XtJF`O^+68n+#-(> zgs{f}b;zr-W(L0<;T{#A4T>$aE(hoC_*n#}4qUYIsl@c}0!p~LrL8HBRmbEN2wnPF&?)l1^(t@@7Poe|+90@zn1r{6zxkg}H^@ z5luBM_kI)k;r~>roGdy5ZYLh#FW&x9ubA+QSF9k4Lu|>cm|4*etmR+Nldw*n10`f8 zy%3(wbSRwezTsx0S5omh_rUJefpzIX=MK|12YAaE6?G%#RiSYHhkvxXn@R-qOP9 z5!T6s4gapv2fv!^@bJl4FGdH-jklpuu=Y-(!0f3|nsDaOH&fA%{)T%Li!PcA5268J z$798m_$&UIF*ce%895U;l5_OHCf653HWI<*b`EJFt0GLht@ekhzvpQRpaaUrc%D!P z%WI{`IK^-m?%#jn!1-5+r;Tu!(dC*qoJg!i<h1)JDL67sv5 zBd-7hIU+fn$W0*PDgy8*vaTK4ytJr+teU!dbpw#Wd%io^1LrcWNz8_w6OC3+l}eU( zbmH**(pp`AxBUK1xMWA-knBHx!tY4l zFr&~F^HnM)^M+;xqgdx{1rSR6t0EN?iOP}i#S>!vt=g!{P;re$7; zhBMVTq;b8McFn!%Te#jq(sMhxaU_BC-U1Bcv>9_wq>R0mG^0DJDgI;0o7!A8So1~N zhuo#2*!zZy;}t^dO;g7l7MKu~8><7M=ljc8B=+ov$%m*nzUX1m5Q-sKe&N~gPnbl- zv;nP@gTBS*r;$do(2_uFP%M3^tS96G0&PlZ@9}k@TzuD#FSLI#`i~*B{QfNbkQSuR zwArlT+aQ9Xq>Is|ENSG$GfDTELi6i#f@fr zpy#t~E|nmYj4jTO9s5b5i}GB%)Fe-y(5dzc?H6f+H56m*CFy_V))5yzw7i`bO}+YAZVMsM-=<* zt@n4gB0~9wR-_1(?6FsR-`}^@D%bMLwVLB4S#M0pWDECKf}EhGH<~3&fE)-q;(&lO zlrpI6Rfj(#iDJ;jy>KNQ2Vbh8ACB~10zU%!{|K~r+~;;5F5_h+rghsubl-RT)36qV zqsd@~xFkm8q_s1{sVpl>2wB~D1`~ThjUU$A`1Z_2xmDg#q81>vWUZcYE6dnZ%8`^49-h@#ih(Y#-Y;8tSNilW}zU1^S| z->oe*p_Uf2=SeX=$(I>-r{NPN*k`Sze;5xJ);~2K&*d`*b%aw@fCwCO1}VAAraQ5 z+_B{y+JnFXAi)mF{Qd-4hM?Ti@>nE{yic>j+*%D5v{f3K>U{VTH(BzWA$u?q+8}uv zo{a|bn!UZ$Cn&3n6bOnQ`Ax;e;g^Z88WA_nBn!F#px7pXMSZ?8#gYaZoTvOjr-(YG z*_~_M<=hPtFXfLJy5rH`4F_XBHqtCZ!N-T+uJ>ZJ1R515-a|ijD1Az`zu{<{A;Axj zD48qP`*pvoHJOF!gi(fie^~n)WV_AO^XRXyGpeZ{T-)xy5UdtX$z3 ztz`$S0_v<6*EhbIbTIYZdj}#*U_oVa?es)n3g5$yip3BBBJ?(Esn@t`y^NPhT-yB%Vs0{AL zjlTUZ(T%d~JTwb)CoS-j8%1&+JP9&n{BL0p?AD(EN2}G@}77%L91>JU)=Kg4!2>`u+d!ebxT2h1s_KS;7?~ z%_Bl6^Bn=fYfUAy){_EXPT%|ecS-h)l8iA{W~0&S2c*DYPmnTP@hD|LhgU%+LNceM zHZgKUY<;LkLU*kDKFG7a?-sfYu9{@4I{TsMZXD;($aMAQzX)Cx#NJ?1y0UR57XG`F z0TK8mqB0?8KChsB7y%6O&1kb;8S&>{z1;iHK%;Vm`<`dTC}vhac@;!@y;3gnOI;Cd zK3k#uqOW;sul|0sGEUjDv);#-Nkl_D{BsM1)T?d9resRG-nKyr`M@u#CX3er6F&7s8Nrz`i%4TjSJTH%X%5`LLkCg{Q1c1y8e>i1oihIia@i&i;|Fnjo;a?TXPLC zXE@JnpqfUDpF3X2cyX~Kx4M1ELAFF?gqPOm{f{ccv{GVHwj!ba^8U`eVo^pZK&CP5 zr~TZ5?}NDDb`%xMMn$yI*$F?%8L#X{DbS6mC#(=L<M9j>#u#L={=7I7>P!h9F53!`ki`$*A<9TpsU1MPy2-vb$4X`J5 zeE^s(NtK)>UI!uCaQgSB5LFVa&Q}7q`&(w37f=pOD;Mh?+;c<7Y@V)CGtGcw#p!7m=g579J{(zR@T(K#@46=ekX8SPuPL~yQ- zp8V}gC!NggOl$oqkdSE(-Gey;R91uZW%7N*d_-nCz(?c)b@1l+8){0%cVYSE|6A90 zjB?;Eq|E}~vCm7Clx{9Eh-{A*c>dnp)iPfUPX3#pNtGal$7dff{B~}?e1Z72EuEnj z)E0TdL(yj_S&60`YWa%*hyQhgJ;1^xBeB4ux=j>|wllM3pf^s;MPXzCp^x}J{*Qjd zMI9i&ofK|)72}X7+3;Amz`nN`6o5>`k%ccrt zO$WjNis)i0QXW(;;<>UC1xk>-qfev@2AM6?YNBvnWC+Tc=S0P1LEnSqSR;6l4BUn# ziU^1{bTn@u;1 zYp>MLNQ$)Ro+m-{CRx}41N;y^p)zIfC%jsgRbMt=bk(<1MI$TO*Yc;wV04!kk|$=* zX?-#9#95vEu;%!~$mOjw$|BPb|GE|als}-5YwsG|cM&r11_>hn@C-j+XHcV^8zMcei=hZl>g7)=%)`V)><_%1L^~3 z4Axt#5?450@E5%4r#?klw~gVFUvUe@x*4R^>*rI(dU@JJ2sW{r=m*Y_KgB7pek771 zc!*Q<3Ou-EOaJ-k>5Ci|`LfjG{kx6r6ifKfoV5K<(p0aB1-ZIQ)ZEQ8-cO?T;MK}5 zD3qM-i@2ETh#P)=a;-{_Nf3&`RBaFgMmO>wpMZV42NXSW+E+2Dfb7_yW*`*oo?IV3 z?fjV?%0`Mg=0AOaLS472Uc}k`YB4vR)ov#F&R%?9ffnGusM2i4fQjT{vjv6oBa-5! zx8WbFON1y4A^r@=_-Etgjb#T|QAM$egUfWyQ(nF5IkBxRmCRZ4fb&&H^j(!h{O+EsGJOKRF z!6I9j$^`_g0NMcG*tCs|kNNNSKMUR=IKew`X-Q9RGgru}1DV=3 zp5yM%S@4W+yJXZCP0fSG!MR;7cmD7@5cWVcpZb$1(X!!VHE>n0-2ev>hIi*Un?9=f zvO|~}(>mkr*lq)n_7``SM#~pUyY+voh*cN{sz)Bw>;^^DLVgiSm=p+^%tc_Ojg=@P zpV9otpB8~^BL6HziC+(8Dn~4r5<;-v4n5?d7I$c2*)7xpa)k%C&Mf#MN8EAbngrjL zlaG!uR)?N&(Wu6Ix%NM1`Q)u6_tmvcY#>c?s*QEq{dsQaWEE>8*hNXPk+kj363QJO zoM=4-t0*@-5jeTzT8sOLpYzb%Cqmt}a3qhR-Q}HG8&k#VsJDEP3vGoah>O!-;zDh( z@kp%j2wo4arW0?#foc^dq)`CzXSOPdWcUnR3Rqlygn&xjLP6MHisw-hxwYV!DO~0V z4=GMxW9T_fT}yGTk&3b$P?7v}=FTIl@bP(!qsDVY(T2LnJ^PKwnVG9nZe}gvZ3QNJ zirLI=lWvpgB}F8jrZHs7A3%vOA4U)EE~X4F??r|M6g=!G9!Hypl&%0j76_77A%iS! zS^_QaS>T)*fZA~m*o-y%*7YlH11CSAv)1hfK@2jUJN1**Y^l(1_*fG|5U(FQ8SY+ z>hvqL-B~AlNAv&&#*N7i_padwC%UKEZ}dE~>eHv!o`#R6KPGQEnnO_N1qwrbs8l(N z_KJ|pj2R57=rY>gW(Yow+AD;bpbe7A#AM0jg@hU1)SG11uCxtbaCz*Y>7*z*&sX6a zJ=LT6OLjKRVI2=?n|3O2cU6C;fz9VFpY;jAjq3YOCq^^QB<5mCE& zh3L~;x#!-hP|Wtj@+ydFhNNe*X_>4nZfH)W@0`1zynHsSU$gLLAkQr@{wdHhy8KT! z71>~~Q+sNt#pu|?UI)uC;h8so5eyre&^XeCB>h6l)XApA&2%Xw*V)QC0+~EF{IYQG zy+@1Zzjid3Q8(3aaL?Pp*Bu-7#2h}J6o1EGb)~AsyaZx_RIKcjdG+Q-3i44qke^*R zCssH$#H1SLT5rP+7i@bRioY{yxOLN0ix-ti>m;2eliYy2fHvyhf!s(U@hEwy?Wp#K#`J<_PuGb>?kc)-<9u(;KSblosxHchNRAwDWniK1AAjdMKfv@&t67heMeh`Ua>? zez88zygRvAKU?~Er(_X7b<8?~Ujn~|B7PjOPM5!19jLVY`5}@6Z8D$xJ@{0h>uo;c z0N-{<@0X_#(DCsz**ZnWI@s_%J7>G$FDb{KNT0Kv5@T~371JjaY_;BOm1JZ_fbbQ` z+lRKdrz5?E$ZST#bKzRJ+vXvqr4Sm|oyRwrkjIOytVf!267_lTZtdW$73Z%LIf?6S z_?cH8y#AwNvX3wyz0C-qN@WxMSW)?&O6u~}bK@+HmwO(OSm0IzndbDLB9;G_!oB}5 zLd^g0op65QK<)+l)$?wFLA*bQ#}E@b?Y~0Qn!oJ$%Cs=mLw++sj>cC&b)_s`)1>{d zdf}{xkHdps?caM3)jg#!}dv6{O)&Bp1A1M>Xv=G^*QX!QVBH`$ErR9oJDRoVyl7yrMF-L@w zQqd-iid3q(-AD^2+1e~2ifS;jgv>BFX3m`ZdmXy>(!HPM`}uypkKgZ)-=l|RrZeNr z`~7;q_Sf@y2Ii$V5qg7^prhzY9q$N6ZxE0d+v~LSFWuQT^|JSicLqxaX|liA>Y?wD z10KIlU58Xb-I0`y80xeB2kyau+xoqGFhjn5XZn`=xl?BzUY2av$}C3*dBX;s(23Am zr>F|IGI9SRkMTb}D4%qAR*}}P9>eSW66wJ=v7>8NTPa0#&;fMHpl93LHWysB_Q2tq z16xyOFIkaz$}S6A5NMEg4=?B6nOA18iK()C@20#bYmDMTS3`*oknRUN25*Kaq(+QX z<>ncRHM*I^NC0y0#X|v;fRzf(4{|`qsn_j3aq|@CO;5BVe!Z#NoqB+fkcDuc=3ckS zH_nRpvxQ5O54g{IM^}y>ciSQ#Tl_~#;x9qqUxUNxyJUXPLwr9H_|G2er+$jJbwcV` z{}Ahrdz~J|=(I84O2MoEq~LsU6%l3#VR$B6JT_5*0&P?N_QiVpH*@_#+}e{^rq!GC-3?~WYuIj_?@0kWx_ZgOd6a2D1NnimNO zW*DavP*SGCCkz4SM0xdcX#SzH8bfgGm`@IT2X(h!MtE+p+vW%?GFZdcOaqaZRNkPs zVL#`o7e3vPh_4D|o$T)eB~Ha4CM%qlCMVKgKpy1~H0LNlbB+xVbN!xcs-${&{^%1K zD9Jac3IN-mke=>CKPNWCaW zz#y0LLLhY3Qtjk4V%7kfnh!`$QIKPRbVC3Q?%4$L^#fc%nN(KpYP8Xx$s2Z_zFSI% z!wuQKZZL456kpT;O}!8n*Ek@TX@})3gXXICOeTqgti@(;O&s=w5yI|VXRZ$^CN)en zdGmqqNq5leOn|?91n`$hOfhn2Mv6xne%m=(@ z3|Ty^YiXtVI>o!!c1xy*-s@x+XEofM8t{(Ow5X>%Qny{7?-HN)i9*6p;|T;&N7$l; zi^UY7$e+Js78(}K@4teogoI=G(SXsgnlI_)6r<0eNA7*pHUIx*e=N`cdkdluaQwRl zb}NKXM7tI)?xPDio&xk#TGTO?R(jQbgXGMzyJ1`7{pWB#cW5l*G&_>=B8;zMu2D$b zBZC1@Rk!RkUnfr8s&8Vxv7G$WuZc=F^*zxC>giz#)5>pOoA>d<`{kOeqw2Srs^GR@ z4|7nK>EQ~Pro6WpzUqhaLN?`o{Gl@ zoTq_DWSEKlrc*I8eL0fR0ZU4`+d9G=v&Z7;{@H3(cRf$OKKN_*_E(x+;4c(spFsuo z*pZ|t{B|M{jstN6F~I`mPZFKAL*M82kI0PZqRDz7*biKR!0SjNsNT=C7Sc$UP*$zo zfBTmC4w-Fx|4m2p>y0d9tvMq61eY~pm1NWj8jf%fK zMUPe8n^e7UQPds9>2IeRP6ZbAiA%#ZvuG-Xuy@eP^QA{6iCM zvVyh(X|()@xeVBNk&C0KT)2c>UUeMiRZ$X3SLcOM#L34zDFzn(ntISY$2@9qUpfC?+S$>Sw4Qp_u>t!Y(iX{T1gd< zi<}ud-b>cBH0W9`2CUh;NBO#02xfr_I@TuFAnSe!yd3`xC4J+2kb7BW8S~edIWMvh8}X5w(r{B zCM}R;AE9Gd2QiuRbBlVCT#bDiHhY(yAR~DPXKK}5vhtKh&vD_Knvxo->ar|m`I&g2 zv>kiod}EejkEd`t8!X&Oa$KeH^85#EH{YqTE^i!Zv0a;MMTv3R7eTs4VYxY^+>yda zN^YUbd4(Z+!ke!)SH$2mOYA=BrkhRugcn2Ar}QG+W=Mb{r0?^0$edbt?n(2FaRAo89l}IC3r#albS$@~8 z0W31!C~8~lpUtLJD|RlwZ37YDTF)+yYdy>}JsoLIPQ75Ea3L!|NFJjUqIQw`^M z^<$893_rvzM8)G>rP#9=EXN{yAy#@Bg}s)J`kHulVIsYd{!XXI98ZN*Othf)nECR$ zoBO5HKQQJUELLrFXlZlrF^?`VTOA3Fx7!7~Wc;8V(bLKyBzDiy41}-MvJZiQ?>Jz1 z;k_>@ zg;xHd=j-p(S|PR_rR~m^k+KPcE3F3$O4g)@?D-{QopD5>r57%ufoaotjK(GgQ-DW11&X9$B65|caXgT+z zN(ck7ih@t&rH8kVEGs??v3LzgD~>myjkfGAw2rx)LVTwi^WNpwdvu)0p7q`&R5a?c zgNI7RNA;iP6q=F(qP1siOhQ;Tylrq{0PlYB>68ao7ebt4W2Mc5*V6Eb>^{vzDGG@A zzUb3N8fl=1P9}|s?zw6O`fn!f*tTkJ4V2oh7~;MrPv6?p`bD$qvmv2;zrHCndoJM} z;wZSA6q^wD;K=A#(l$fJ70wwG#rHLUEIMS zj~a-#;B*Sp8{)qnr4tmWH>5eAZ_6d06oQ#h08nqZ*gxY(1de)Ol%kJU+p{_YT#*9k>N<2-j#I$m)I^;7oz@8& zLtfQ!`c#cLViCN^N+o$?wmM{#e4wnvS;iH~IITsFZg*4_E~-xnw;a<<+bCR>_}KHJ zhF5}ge#_5ST9|Y!XVX-jo}CADblP3mFM}*>NNq)d(Or5Zwb>U)ZMtBg6(|}V0rlbi zS1LZOXU`a6z2$w$ZiEa)>*^S?%i#~c932RD@H3B4q4UwB*~S4I-gKiQzZ}1{af#dK z%$x6j$z+`q?wm7!Xvk|$WW}7&FI{nt)xY;n+^J!{sz$=7?-UkIjpwQWt?1YjTng) ztmsaG%E+f(jq%oqN$kRu)mDm)XEIzO0*$*)>B$P2t9-EL34UM08JL z<|9~?6TruzSoe1@-$L!i5A1m6(X>`peaLy(z@*`DKK?@WY1f5rQ7>ZL=?T)C`ynd6PV1}s`ioi%6WoCn^P{Sx?NHHEn8}g8 z`>8ik_S>?ba;vbH^-I!;iw!i+0@&}a1CmXRC?Kjh_rj#D->@^hWdGiFuMmkf-c;4z zURZ1J{Me)|5y-0L`T8%OqMbltkFHzRiw8KV$m6jM;+nqFZEOl zxREH=Wn=V3Pey+ibSk(Yeik=HAD=cT!7Rj~eR`8a=qv1f^7TYUNVqID%06M)SjnOm zM$cG=Sor`n;`KiVQ>0DH3sK=*cyhlCe%o)N@-Sp!YxQMbtaD0r5Msk$vG1CP?C{dH z=bt9Zy;-bg^uG@9Wx<=k^=4oNGkl->=StPN3FpmS%D7jUxSl>Lc=>4_jr7Boa++?B zF|i-E-Q%gGVi=N>G*PoW;`Cx2Wf^faNSR%qm-(TDFI>qg?)sp<`^*@w;K#3C=7*!c za>#Pd+cVE1!85=>FL$g0gI+}{*g&4bqy9Ifw~##gwo8%fX&RwJ^)v}F2_JmG-F;y} z3c#?3y;v(qksgIqT4|{F+jj&+C1-Ee*-1v`QpO1(F9jGw`Q%R~-c*xZX6I}-W_OD! z8Dy{`XiQ`T1#9+>EsTUNeQ8t+)2k{E6*$uS7MOqrzXzEa!TVEgdBF$(c_`|)TPZ9n z(BHc@%IPQ1+SS2+7B`SRrvly5YLgnehbmKmZ(-&zx^eUyWk^2ze>@*JjOrSp6lrST zP>ty*IsjN^?a8o4hI~v+pf7{WmqIIw61y%cxFpYx(acgGFTME1aTtA(?l1}KlKPSP5-pU*{0J~uc>9)bJ8$l=(p3$>Yx8b3_%XJShPu*@bwrWR zxQ;M@@)E`Oc@#?N?lAs=^(bSx(Vl4YVJ5XNzi{3@c0!12;D1j4fR*-ed%;QA3Sbip z_+)rGWo9J`(#=Q%|A!gtn8T;-D4g7!*b)CJJ2OJZ1Jd*7eBV4Pne}+i%mH% znfR=*^&L+yk*!!e^na&)JUwD^%L>PlbpIu1=Ua_5s}fP~+6yAx4}C$NV3y2j{j;@> zOSRS^feb3mIrz(#74N+&X@_EEkmXeAeQs^SIPxuf>@0IT#__OIlOBxmo92_C?E01c zwW@>%%W)xEC)e*}ymv{j zLAPs*{TGI?&YRuWPpUH8fmGnopB`n^at!CUa$|Fax#JpJJ>ceq@cX z;PTw)uQY}ren9*kn`v!Xg5_LS&QT}FU}uTM_VzwjnViX9TB3;Kv}IZS>44NCWa5YU zD@JJQ)kmM6rzbla6wF=$>wj*Q}%vtoM-qM5%k%ct0l6T)Re7AaNQw9Myq0pO zNTUNBukR|UE5EJrJb*W6!2$tVt)1SLGO&Y7zwKZxTAPM;t~Un(1vBW_q=m3xBZlk- zwA*j>>gCoA|6_C3S!TihEU?3;oAc6u>`T5rz3f6Bx8>^Vy8*X_QI7*H+_-`UNf^NW zcK552Y>D(t4tk$`oUhO^d7T~X4n>ApJlfZ8+&rQ^ODbg zTfH+vNd4KrT= zOB1CaxyqGCDXxS4;&AC+MsbkkDbRcGp=qS_7_S$CHdmXW3ri${=KBwi!YoLfJR8pY z8R#G4S!*0Jv=Uum(EE<`61VooX;vu@B9IZfM>pBJfA|eano#^<44N6}-f^JNd>eNd zT6?h;%XRj3)P}PKXcv-UelQYv&}Zechr?jFcW}>SBcQXT`Hs&qt2#DX+qe8qHD(~e z+kZCp%V?hX#>$2j$i2E2rRGE4;Sja$-hn7#W*f( zh?JJMFg2And~KvA>uV#4041Ru3T_OP%*T`$*6C;7`j`-}A}q^mvV|?9Z&TgX*@w*Khy!C$GMLYC!8dAC7zN z7U_+7pnVH5YRUR>V~yZF{iFKnjAe(=85;oP?F9&;YpLd~tt#YFZt@1QUOECXi_ll| zTw`W0t1;R;!CKD+cUY@dFgLn&WoEmC8;U4wh-N=y7g>)}nR>n$TX6vp7=S3;g_>oT zDbj@T^tl~YG$Wq@GzQ=fSEJw;9`+k@ zfZGEtFB=om)agzD&!aJrb4|Of%uA6kHqL1wT3>85>=-6l3UwRC#VH_>M=DS0f2})JJBPm8 zv#!NT9^fG%tqtx{!%pE{v@7=pp_lhWtfGL+sj%T{p^nz$p@C|o&HAYfH{>V~Y zwIrmsW%;D3TMl*DE_7As+YDa<}wz)-ha(bhZ7s`x^V<{uoWP{u+YwKgQ z_bt_D34lgc&do0gk-`G}buXZyA5IZu(av(}K)scuh*bJsx?eP6o&4ZC9$0kxpj4P>br>FEF&&L$S^^xxDjnww6qL@ucUVV6J@O zJL<`sYhM%`2|QLoQt)h=3%GID9g9KAh9o;Jpxe0FKZg`Z^8#HF&o zdJ`>j$_)u=$Chn&F_pUcJlt|uPp1C-CG{9JT_Y%a3oeifSyA}z0jfFXlXdt?xc#3^ zr8m2k(9c06l;|%gd#`-xzq|&ZX@GMm-zhxezSd#*tdS$+Rigq78~o6b{a_?9r21 z<+DQGczoW*5L)fQn^w;OaHa9ou721CNHu_n14x6Cdl8U5T;&O_5pHJ+d#EbCj&^q= zFKwqoZFr<4s*R$=!Hf?{th)=!e?w+z^i}*gIv|td-2P1MUk|=z zj8DtAqhIFJva(+rjDDdsw|CbzJKB}&W(*@!Pntkn5gJ8}7YJb|CxPHAy4MR9nZoCQ zdOqg{w>D{>9JIVUKdfG&gw@(IXP3(S9FFFp`06oo~N(=TOE%Xqj1N7{|StLQ`i6qerEs>e9Z#y zLMND`fV2s&pHYaa5u@>SNxKv16~&h@W8Q_SiQ0|^%{DhVuQA+@UU>B#fZRh?e>%)| zrh-t;+~XLL=AqBz4bqRo2;guNzx2*HsK(> zW^PhfG8RA6k#_m|lnu+gE;p{0cK8#`oD^>DWp?MmPh%ik0pa9IsL8FPnu4fUF9%AA z|B4Hhc8B_h$bLWMmQHjjrAwg~BgXEK-fyMqt?TN+VEmtB_GJoZzT zYJcYvA3B#tzK|Le?~=)#>@RfAl}z_FSdJeT@}G$(JlZo`^yFqwkvuus+0?&^OFqLo z_gg{{hI0l^Xpc6EPw#ko5K`B}K>*z_JQgDs`P3Rp73BNi?#46ipmaJ1%gwL^$D%Zm z9Sc+O#+@1MMaeZ}j+1kEpkOOnr~2sahvb?R%PkW{g^8Ss2;vzm1AzFXL)X<80P#Wa zSERO$u$2t!B`yOd!nGlfI2U&%(r=6>6|98~aSYXLJIO3+`P*o1xhueC*j= z^WJu*S96ZM99N!L4w9gTw{ESsT~lqSYL>59aqnuor}9EsJS(+#$v+rQBq82eYK#QZ z{}dC*by0T;(mPaeUISrXIZx1uM!_@;OsY{JBZD3UTVbv!2>QTX@JkD_{OOaS6yYAc`wwjoArpSMt zFI4q>K;fwd(nRnClyZCri`=X5cUC{md8FbLg|L+*GWx-RjYU#rnOdF_whcSVlPA`> z*Q29-y=NL9_Ud*B&fG^&6sdgu@=&w8gDP7D%Zskps+g*!7|%lb@$T*61agisWV%a6 z<<=JMNV{?O+VmX=QP?QbDa%$wp+p+Bke}Z%0H~|N1YDyWcVD65Ac)KVGaHb41K%O> z3D|;P;TMg6mM}#)-?=$9{blvXn$x_@$Zm#zHpTA@Zg9gRUqhfC1wa@?%6$NOz?+bN z6uglAOwZYRbfLiu9bMdwPs(!z8Z=TLvT4~DC<(m22h<*qe8lOzY3GFW9UWL!&9QX@ zLVLHVPPZ*uP*r=7J3GDhqx}A?r5lKpm@)e@g-^AwXC*`+yX@uk%*U>}o^UltYv??x zLC;t2ShV&RX6L^3-0phxtUc}7S}+Wj6P@JGt~K4iuPB9bKB=%+Sa;DS|0`Nc=^uNvHVM_va3ROj`n4

rL`zx!1EJJc%Xs2KNJR|EyWGker<&j{QuPyfTt@XP9Z$TxPb7z9@KE^dx0e534z7}!SzclDTp!;&Y{&im1Y3+#-EqW{uF@*lM& z?jObYZ>6P_?@LR+yP^S#L(&a^ME|ENN^#Sq4!EMwwf1jv(-<58OvW3@7B``hm}(~e z_VpLHjX9?Z1lhI(+BHUE*+4rDEbTKWr}}%i0>>|(zjy5t>96KjhQL2yjvh9|X@f+| zW-Rd^aN76Z%hgAZB8q9TeiQWUJzaf*u2ob`=c#2X-Cm2Qt{AR4#uEk`W$#^m$ip|Y zpp6rJ8I}bJqhZuF4d@`mgLI$*OveOk?;Ku*Ul1tN{3J+j)1Fi@ zqSnVF<>7g&d6~6=2A!%9%ALZR*S_TnRJhNAOurc1gL?ZyFm)=%Uc?9(=^lCk>Wooy z0iNPN=K>s`aRtlRio3JbaM@cmyQ>2W;`dg=v~J!GwUcE&AgQJnHYb?(o~;$P2>f*36J)W6Du0SY!|FmRq%~&* zR^(;Hvvj?>e`f`O-Bb3Ig`a3(ypH-*oIKN&zp(xk{@DIBp{>A?M%MJXO z7oh98wI_I;E6nYnaR#fXYHnwFU^&y%M|p+v6wQ~o)wh)AUU2x?^OV-|%=25LpUY); zfy5L77qYsDO(-yhiApac_}Ea%{12|k#Cje<8h1a?u>y*yN)`5u4?`M?{&wUDnCy)v z-(o5OjGL^IPsR_@g|jbvdrv#Q_SB-#nT0xuP~AV;op>OT`jX9JLBXV-Z|61+ESBgM zPVww+F^JG`Cgiv71FD9I3=2YCeoqb#`FYDJJAG^7MvKO4hZN7<3T#Q)dkY=Exr&&L zf@4$#gY|2mhZ8xdxa#g4!bO0>?Kx7nR|X{k{+|y^X^qfy{DfTU1)`(_qU6}9#R8{0 zXH#u-eabQOX@((hGM8&*ZeLNot1Fms`L3B`Qxu_Jq6u-y%~XsMVZ^(_5wyO7Pa!Ir zv73i0@SuP7`zzMP+t@xSGn|^9m2tMRaejEjy6RWmDo#NPBCq!)A=-H;9$E#Oo%$<|Q_!rFG|+4=v3*A~g*sGFBkCQbZe8x5~GP47J?^ z01_|-H&jn0Dw##JY1DR{8oQFA@Fh{^tZgb5y6@0}viHS;5)Er3S9hIA70a+7oqmOT z$1QeUW4fPKa%kFxLu>Zl`{Bu@?BK`PUvejL_tGhl!lD@n@jEhu}^GA=#Q=& z$~;AHbU#RbvNj2vzoBAa0_5k}=L99dqj%QXjnu9%oW=ZFr1)eW`-+OsP|f-;59Ysd zf#eV=&j71x#_s-t-3b5#J6K?t5U=FSHD7ouz(jf_dacklwBSfe;6+Y#`tZ3n3BE4? zi}Tj5FYZx8_a=#Slhhc@IC{?PIStZBQp6f>0t@Hi#d5Qju+6bh#+Ti3+m2z#!9Rw|! zVQ}sjJf%)?{ntgC&~a`VCgU`xuKu9E)AHHlTYuC6h8*=l^#SU6;F4aI1gPCkL(3)W zq*{($CnRpzi&r?00?2nWGgTz8ughnWU+VFF!kQD9(~AN3`BzcRTY^bRTVv% z3h>ccept>aAhuiVLM(11LK1992^Abv zc~FnK9(D9y7`8|s=w2F%$b^u=H{g@>7f@V(`?G+;y6gBEHn4<514ex79Jz1dw&~8~ z*2Pc=8Pp&0PoS3qjom8Vc35ee6guuA>AqQKBIcAQPmZZQWzff|3|O9eavQF=Mnt!? z>+{7r82)55`41-*gQh`>@B%o@iFZ24;VAwF=E7%lAeFs|M~uXTx4-Dgb{OJQPkscjS85YViz;YN^L&IHeYaT2Q3_&BX zo=4r6!P~Ec)O8BJ8Zy9@XP}EV6;jo9f;%(xhK*XO;Xd-^rcL>t;qmt-t?pAu00%Lt zb)_}p`l7CVhU0AUj?0f4?pqyYmqCu_yI0GakYn%YkVn=7wg$V-E_`wMtfX<)DNfdt znqLeHM#s#M4vd01e&v|%t|8j!q?oE)vpaHt@IOoI-3`r z{C=E&v?sgH$;e*Li_mehkq*1f6=!vz z+SFBfj9lX%pzf!&>zgNl4Rr)8yTyyXcLcUIf2Rp3ZP4dPD(vtvPH8(=e`(UWy3;DT z1^LVRm=8HQ^Xq?RqMd)-w6qyQvv};qRXp_zS3Ih!R=?FseM#P9u5^SKCYTIBz(acyE&pM9{-Ufp%hkmTrDOG^=dV{JG zih6?PV}myjAcF?&o56|*gxt^d^woJFO$l(-Tzz}>i3Am8#H>f9_FM-q!;6Slg|})V z+@J13zE5;?#SV2H0FJ>kmo)c)Ibukc&WMqIi5T?oG9HS|3K$DQ5uW|%o|L`+!h?tB zKNcs-?25Z7c_ViG)GeSB6IN2)rI6`820M8zkzU|6R1EZlvpFGjBH;(f&v4MN#uwhM$bgIQW8-cdwG`iPc7NJ3TU(-2`^Ilvcz3 z7lOW})l#EOE-GN@h3)h+y2O)|Ub}S67?N~jT>ypqv$evt)iEPh2QB2ZggZg43-X)N zy!BrW`ccvqnZWD4X*693v6TL5L+BPoCQrE%9 zk;}$#T`m#t8iKWEbVD8chU|PwksKm|2hQt-!D__`&4-%8IaJ^jIIqO+zQFLk$D{`z zirCZVT#}H%TmyG#S{nsNSDOXt20tj0v)0%T13j9!-w30=$w=VH4Cj}MYPf(cInOSo zx@fY)g|jN#SnIx82f}9^tkgJh{nZRNZK!5|W@t8G+Liuyb%{Ye51-L~F8jE|IIAFj z`1mgTnBrHJLv;54-&p&RwXo%(bBg=NdWgXC@h( z0a#B^xY<}?Ps=6?oX8Y!$#Q)9!UGzQ#(Wx`TAZhCF`xP7XUudcGW?A{k_-KQ=AdN5 z;K$%tIQ<8C5p%`A&5O{f65anIFYDZGdb~4{hdTV0B8thd*FXenTpugalZ$l3s3a38b2XEI(b%2D=gd zFhG@*c1R(v`oFn?^V*QYA~@j;1+=>mIqc*>d_4HCaw4GW&U|19PQP20M()8+`>x<&M_N8<$mpxIg2`c$PW1UQTSmjCuo&(FWCp` zz@Anka?Fnf^k&!EuTN4pw)AxP^_;l5Uad3J)8$Cbr|jqDh8I@fd+^w<+o#~yls7>- zeWfFA&9~h)(n#;QGE#5Vnm@~rTgW{5nD*3e#L9YX+$Fh78pFK|qGS8w(b_w|A#zC0 z;SUG9TV+};kXgH5J01if?!p?RQrvZ)I?0dRD%N5iS0SFe2PA4Mo< zFn<*~K1<|WZ&@u(z|sYW_VZJceVYg~lyB2JliR@|vycF5>p<9Ur6~N3^p&Bc70Q-2 z|MLqTqqS#1oEIez$nR}KEh@&doJm*+5|Zm@t@e;(r{wLp?Cz7%l}L|j)^aPB+n1$3 z2fCl+Bh{slUBqvIkQ`11ofv1sH{&<_S|Z^|{h9dw z3kAXZ*1RIcAVE+%=L)PX&5ik+g%VH%zgD%h2doC$SP#fA+5hR4Kw2oc9z_ETu)w~nV~wUjS{>ns8j}ezaXvjVxNF1J?cWHHyJnc z?z4#th9e+5xfF$d(Xx->gl`R8Z%nQ;5h{cYa-G1FLJkDTg6nh^l3spgK)Q;G^jbR# zikBLEh9MZNO<3<$ZBsWZJUny86qC}A$1fL|YU~3)d54hXK%1;v}j*S8LJ{um&2-RAd8K}U9*SDaRBPP+J)_N4*A62lguwmemZc$-dxA2J- zh{OS33sNhATSdA7ps{2I@gYd#f+C629A;A4$PYg%aEI7Y2@;CqpP%| z|45(G(`toE(0+D7&^!Ha7KW12$OiV=-vzLRNsRX)ZFQf?O2N;ih;5kh3^{BMI-HfC zEn7}1`wCN-xL>!nUCmAMw z<}kX(-;+T;Mu1~XPqv1;jR&o&!?CJb@>}X<0+o8vA-S|pPV<+cNs&L)$#=;TFQLOt zZoZWgr%AP^9FEsFv~1>^x8S}#xIn~lDPL0n0SzKlhc~gX=>d6-4uX91QO7=Y@|=t0 zgh&Ob+g?#5!he}&(t_C;S`+XzdXdWxf^}r_B=a2k1_wCr0e~9%o!WozcUrTk6gj-c ze%DYq-|Pffcux#4Y?e^g{mQHX3>y;^AZ#X6X0P;o5Sn<4`f${!QSf`fS?vgd0-p3H zGxc15!|eS(8)k#hnI%r^TbKEl`j;X@27tVz-sIm500F^*@p0LRh`EuceH^r7-a7kl z3sT?s%igpz^XI%8ZIZlfpZ~!mBo^7sUNJ9Fub+P?NO}CqWil|ja7lO7!<4c)MF?vK zmgAxo)lN_HbjI%Y^P`PE>Xo~f?j!-W$|-YUk`;Vcd)WQNLZ*!CXn9S0*`#$!KRj@( zyqq4mYn%LYP5(8(0jIiK{3bonZGJmN@^p}MN5iplfE@1o|Co&IUwF;3;%H;P1TfS? z;t~!kj2hz|u_Z6|huL<%M6&f!&B8FJ&^id~JSgCqrrE31yiZ zZ9i;R3e?wmu{ncQmhkG#DKc%wR=q6TfT9!64Of3S#9va^st6XXxb8g#m*st@xPCd+3M)tF^l~G z{PAi!A$VDIq{6BYQ$nwLl_D~4+e^7OY^S|H={l)RC{WU`@v$21b^5@Ilq=H~o2y$d ziqc6o10p~e)M|38omPU^c#N(fVw0LBp6bAP<{Q?I=0~0rLUnW~vTW^H`c>#QeBJA7>{|MvH)45uZXfXS8A72z`aOmu{|R+sHoyosBP=tXUvs##SJKB@4&#HL104T28F3-_K6 zvL_!ka>oJjK#FU-&Te`U8H}DSr9HdIbXvfiqZ+;agf{j$6BCc&v>d8k ztW(=`6di!P3SWu}P4Dr^O@$z03gm_C;b8g|d_dqR_yc>|q3D*SwrZAXTg0Wqcf^@0 z%uBhMvb`x;f4KT)?+E0CTJ)r@mNjGMp7ma)+SpQZN53`F+iFyhRbymB{pmQD2!+!= zv51?@<>-(VV-ii9j4Fr6DP^a%jhmZbjbJEKu)3$DeMf!m2Q5B)pxu2Zc8jaZx$x{L ziIZ=p_9|fs#qFsHlrlf3wnAHNDO$JN*MZ%4ESI`lzE3+m_pJZ9sBh1~46He15&p>x ztO7Lw3+I1&0c=1InjDx#`4Tys(S~~dij7T{=U0+iHIrt`HVIy}Zj8F5) znPK2?gHD}a#h>k)&w{>(9Ca2m*i6t$##agb5$R~!LX|PI%T6yHUzx-(%yt_VpVtKU zzT9e3SFu6guhJXq9QnPc4#x9@bN7%xaFbrJ*6wTtNSeFS6zEQU25o;O*;fZs+`EEB z2u5247yt=SW3eYNXk8c!HxbeYtqXsWAPjp%qbPl*^dVIRE2Sh0eK(D-aj<43 zQ1IuVvZ66qlSG`%n(z7S4D(H4`(laVE;QZn=$rXn$cpk^nH>k}d}>BI zkxYgI2_17X_zP=R5x7s6vcZ2Y#nymL>+k%14f8$C=uM0dt%V3{p5xGV42p_=@D+!6 zvM_d*Er+WLb$$|_I+x1Z{OMg>(!M6Q21W-&6z{1v*@2moK_>FsOo{8Dl1TqdiFO+G z4;j+C0pS!h+*WnH{;MgqV%WU-6Xp$B%`_oDGSgV;l6e8T;dx@MB$Gp4d#C7IF5nxq z0fco4$(S!RAcNGsK#k_X-2Z1|KnR$K)peocxXm)xNj7225cFZkZGS`NXqkv88oyx> zVg5K?@`Kw{SR_-dd2ZFyJv9P@_oy3qr!JeMH!@wl>TUhYMj1Su)H%U)ZdznN%UQw8 zc3BuTh*O`l?4yltvSsyL{$DElz)fWm>y`&kCv+*{C^E0$J1 zx?$su?Y{r`?p}FOU+PV0V~+|uTI(CK2JNjHkbt!Ra|r-{kW`SDjPqsoIFy;A2NHLy zI&y9-bX1+l=lm26Zs|CxMDpzmsuG5v?@!jTB_ly~UK@egS}MUT?n-M2ru9&f6A*9+ zxa10xFsx0d-`E@Bi%9EPU`u6t_C-+rTj8&X6>uQrE{prg4jWn1uu@rDX+M8q+*K}En(qXIeVi#m$}u~N z3UTMgpgo5;lcjf#Fz&NYWm!Jufr)tyvDkO;>rR=jka~I^SN01z zG3$*H?r}ivr+H>BlUylgxzPE@L^m!Dd8F}hs9?oc$}-;lasEnK@tviO>IvE4+`^QRpL}HID8zbU>7NKPCHyu zAtTCJ!M&dI}YgBFIMY(Wx? za~&gB{yY-AP7uLj^p}D4=frT`ysAw$Nv~%fU>xs$wQNtvI72-fxdr^Ftu0pC=KgOM zf7z8dD%Ow)n~Am#_%tE4C~<~MgY<@(;b-P+^29$zbh4-U{q7!q`zgCmCf{iG$HK}B znx$%Sz2Kjss9-gLQF`|>!eJ6$S#ea1Te+F9Se ztX9azh0iCxGc4cD0dXohLoL}lV& zRBq_ZDW?G1C}-;Cc_{c9y-S<5@alaR&q(jHW7-+H?QrIDhn(HZoMTw+9d75Is06k< zqb&P}fuoLg4V>M_K;Ohn^Gt`MI$bytU&nV}wm*LIg?7up^Cn-l9?F&jy4KvMB~#9= zy=U6W{I!rA`BqS|_keU63nSL)08TG!;&pkUT+)1Uz$l-by2sPKhe?hso-Z)T1DRt( zb?QqB?GjQzOV;Znno#mcA60zOs9;3xhaPAv zm~Ug`V>Qw-Qb~V~RsN#bg^7sw%H>7CPyObfey2dfCcNI+`xN_{K~`|yo?SMvAHCx+ zcHMWh?AuKP3|N<-0b`XxSiX{l+z7K?6Q)@fKeCY&k!q!Ue6*g5ijO9St zFiPZvh%E=rhS85=R5Q5818z5g#yZx|VtwpKgs<_yT5-&Xljnr!(d2-)Dlc3oUDqMl z7oY+({RO)ZD|xmT%nQgT4{SJ)x?tia$sTV>J_(2#$zJcP*hfAo6nan~QKyj6zWIKI zZOwB5OX?_A+W*`m9hjM`SOa5zNLv5*^uACK?z>=y-Ju<`0$R@Aiv5aMp!RayAvzmb z1wH*Yp>BOcwsQ@2?~$6W=`(^?jWBHpND}DD#5i0S@p!wvjk!H-r64!z*r$n9bjJ|S z0Yu4K(tO%gS2CdAcr#yfb0@QpWlsdpGi?j_NBP)KpT;yj^Vdyz2M@uzYfKgXkf49E zy!wocIarddRtya+$C2AmX-pS?g#dkA)z?Bo#Y?AWQ-z6Ul({$VShu|9ojT&7oawJ_ z3J^&#{iz!Lbt^UPhmEmT?p$8mleBre@Ql?+`B7DZ#Mnh|3a=z0+V3IeigmV7oWVB_ zfw;p%&9r3qL71*(%?EU()r*V$sB}~^G_kGF_Z1m=H}8_sOXinSuN{8%Levr)?XaM6 z5%79Ve{}>gt&frUMO+?T6Xm3K(_9Gv4cO$!)GlAwf{X2X{pEfkDE)0%jtVpnISKE6 zLjBF4VZvS1;&aLt2D4Y~Y79c-y$0JQW00W?S0bK%(ig~q*+PyCzZ9^Z!B(*c+o~$< zuq;N;ekPs`T+-yWxzwBkd48tS9uFaKQ-Fagym71AZ20t|+O3nMCklZUhcc&^nM*PC8R1`4MYgT}rEegw82cj;A|0FKEgd z^)fE96d6e~nmaj54`CT4%~yQiVl^7le*ZIUz@Y=FiAp{FYcSz2-^X9cr+qe8aE1la zNc&psCW?Dggv|1$T&;$4z8v6WgN*ePZTFLSAsb)kY(P~Ud185>l=sTxcFz66?d~-s z!`#i%GM7i;%hH|E&tI9+(Upg&?aKx?D0n#of03qzzOh5&18fSDsp`(~Xe-Pu?K!_K z9BlvBg_^E+CL`SwlO0P+9hQtK)P*5ltpd~f3$n?RC;3FoJxljT=0>-yFsnbkJ$hAR z(0EhRVW%h47N-1py5Nkh{L@eSKIk0$_3147<6;JCY<_0Bs?T`!HYDeos3<+~vk{ssH7P%)fAZ1;C0Ws9>mY zoBiHm9kmd2Amlj{-QMt~r41AyWnU^-#6%pTktN0OD@LOlv=kS^j8*s_@9psazxQ9( z#P*U^-4WD@0DmsHa_Nb{+9gk>rXGU2O!i1-b$I=ah0qw)v<~el#X^0Y7vS;rQO7Ko z_(pGjTp5Xe$u4`^Cid=5R9fpnk83>dI#2U7t?c5Q5eCv-GS~jI8B`rm`rwX5dw^@h zsUym`W8HgN`=Q{c*SJIKB)2b_HKp`-n5khLr~BLUIUX+BpBdO4f6Imf6H2IT(Vo-zrR0sdZmxWbRi7$VYBZNmdKR@fe-*Zy z5s?S|{$01g1I{4<+;c|1Af9|7t?R4mavZvN*GTn{`V1-Ej3Xr&bx@VNP6=Z32lgjM zJZ6jy6;`zFhNS^k`u#iiLnw#Zg=MM)(>v^-S2d0|^mUB^uEI_EyfO7*=s5RY|7Sn= zsDJon5i8D~rVswF?3tI_{mX-06DI&Wur5FWRzykQE6Iz~NSz8l>fW+^5h9`hG>rC(o=jd96Orr%F~uT!uoF z9=d@cT;j&;h5YkLKuWmi1uu>?>9zaJEK4EgK|T-oEz&rW9t<#DsmoE3&YxG$O5iJ> z>#ZVGxOGioq&8tIXwL{YI_xn;-s6z@!IxGpM@pRV2|)_}j}#m$FWE(b!Ent&3@@B1 zCD-C1OXb&1Wv^y(nZNe7wF_#TamCu2R_GaM$#C@9D0kLv%z2;a7mZo=tklIHSoOoXCD3zf6mA@gQxq=k(sD}Y5r8cgS zqbM!!bEyAx=iy}Ca+UtQHYD^R_o@LY10Ys+L+dVNtH`qP#iITX3h&?a(}TT!t@)FG z&g`^Im+H~qhu1l{l6pVuVKwtfV*yI2q|~W~uo%RAL3d$zfQ9R|l4+GyXFo2RInNX- z(=n$c#r|v7JdraWy~bWX1GqX6TSV{=OlA z?|R}VXt5MtoTS%fWp|XEBspH z#0t2{|3=kRksl97wJ+xXq_5L%VI{6QvX%S)w0G@MP25>Hh#1hWSP%nJL=6@}YD7T; zqJjDVwJl)Lv``q_Qc+pV!hw<_k01)10ut0()~p&ZIlu4%Rs@y{+%lCCuuOA~5#rtbyL~ z|3_tgk33Gv+8A8d*wm^lqV2WJ{96Sag4>SORzQ4P3+Te63=c<>lr+w@CXdS0R~y>0 zR)yuc5F_@Pb_?UTozIA$sAbUfwTvXuCb$`mSdBpI*PNtcKM8kWB_bv$~$U@K+;uqtOMcD?ehvs}7GA>vkf`cSEr!fz&g5E(V#x#T*eth%(k6FRZo-nQk_YMQ z3lMDn{^K<-=SDjX&noGOe4Cu*6Ex#Qo1}VCB=)oU|9V=+&_L{xaXlqc!GtzhUg)w5 z**^ZstoE-zs5pNk(u5dsC&ou*p2*(aMcSG!u2E^QFEaMrQ1wYNg6Al%VPxsmP*HB-I8Xd>$vlJ`aPiIE&?po zz&L9w_Azq`3=udaK%Dy&{Utz}>6p7}#}h_JVW8|76q9;I$fz8764M&W%~knB#2zmv zpj5(;r}U05rUYgkT@i0`~4UV_zE4Nib3s4C%`^c z6l;8BOPfR61M=csFht2wlXGh9(>+e8_dbI@^jurv>BXC(TuHil!uVAZsXO( z@&ylf$nTzBS(?H*+^{M??$mcmc3q?TuZ?*;r4s^p2i7GJYpI4X%1vDla1V&-dmwu> z+FLQ|4V1BG5W_Ka!}3pe_zD#Zs_LS%G2#(fwfGJWxq9-g`y%HCPT?To8y={r1pAsF z3i96B?EA_{)q1aT#~D)9jlQUJF?OuY4)5m7Goz6rT+lr2st)QucQXiP2sb{wq5sRd ze(`?nJ_eL9;&|fRjgW{o9j85=Z7G9)3+abv4}t;QLG@Y-&e+`tlz|@8kDh7D;GK0vGgHx3P5CIokt^q9f1jAVFZRaQPWVisjhwD=kFG&R ztq9O?0~90Zdv-%Gzd+Z-E?x>4MW9PJ?AV|V05CzuR_`Ns+NrHY=$f6Jm@sL~;uCx{ zq(>2PZtH$MBC$YOaz1??nNO1M>N=`*RbQ4r6sXb`2w!daeow_C1>Y<9IFbaUS$7FW z=l3^Szvx(EM5}|~V(eTb!gdlfhck@rGndWdkV2^8fqRXJ5 z6|0D@T4Y_wXV}!RbrbSU^vV%l4R3?-P1fdr2IUT%*0$_10ky!vj26pi4Var8i~{-g zv0x$d2rrs$A@Qy*Z9A|)9{Y)f_r>m8m!`b=Kg9!p08k@#;3PJoXKyb_2~v+xE>WWe zNuif>+GnnLop={V7xLZ%N~K>uDRiKWOo3#y{tZ|B$2j=P1#z@2Ns+umO&3?zvu7b& zC}zq)e8H+qwo$E0{1!}5(~{)WIdA3ol?&TFBa`P9E#w&h92jyh%yJ+iEIrLt6W&bG z5X926p~4j#=x+G*zjHxqs_cL1Q^YtKQ|AojOmK$g{WsZ1DY0&^T{%*}=5Xe?GklIT jnLwOOe;PeJBP7oBeVM*5(>5?|1JgF}>o#EWW81$0SlEJ@ literal 0 HcmV?d00001 From ca00327581e19b0fad5c58bd3ca29f75f23cfeca Mon Sep 17 00:00:00 2001 From: duzx16 Date: Thu, 13 Apr 2023 15:38:55 +0800 Subject: [PATCH 105/110] Add slack and wechat --- README.md | 2 +- README_en.md | 8 ++++++++ resources/WECHAT.md | 8 ++++++-- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 44a3442..2648dde 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ 🌐 Blog • 🤗 HF Repo • 🐦 Twitter • 📃 [GLM@ACL 22] [GitHub] • 📃 [GLM-130B@ICLR 23] [GitHub]

- 👋 Join our Slack and WeChat + 👋 加入我们的 SlackWeChat

## 介绍 diff --git a/README_en.md b/README_en.md index 632a22a..0d4f1ac 100644 --- a/README_en.md +++ b/README_en.md @@ -1,5 +1,13 @@ # ChatGLM-6B + +

+ 🌐 Blog • 🤗 HF Repo • 🐦 Twitter • 📃 [GLM@ACL 22] [GitHub] • 📃 [GLM-130B@ICLR 23] [GitHub]
+

+

+ 👋 Join our Slack and WeChat +

+ ## Introduction ChatGLM-6B is an open bilingual language model based on [General Language Model (GLM)](https://github.com/THUDM/GLM) framework, with 6.2 billion parameters. With the quantization technique, users can deploy locally on consumer-grade graphics cards (only 6GB of GPU memory is required at the INT4 quantization level). diff --git a/resources/WECHAT.md b/resources/WECHAT.md index ffe3ec5..c9ee867 100644 --- a/resources/WECHAT.md +++ b/resources/WECHAT.md @@ -1,3 +1,7 @@ -![wechat](wechat.jpg) +
+ + +

扫码关注公众号,加入「ChatGLM交流群」

+

Scan the QR code to follow the official account and join the "ChatGLM Discussion Group"

+
-扫码关注公众号,获得入群二维码 \ No newline at end of file From 63b1e4e8043aae846e1cda44480b4e7f35ae9ab7 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Thu, 13 Apr 2023 15:47:56 +0800 Subject: [PATCH 106/110] Add line instruction --- ptuning/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ptuning/README.md b/ptuning/README.md index 2cc20dc..ffa45ba 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -176,7 +176,7 @@ response, history = model.chat(tokenizer, "你好", history=[]) ``` ## 使用自己的数据集 -修改 `train.sh` 和 `evaluate.sh` 中的 `train_file`、`validation_file`和`test_file`为你自己的 JSON 格式数据集路径,并将 `prompt_column` 和 `response_column` 改为 JSON 文件中输入文本和输出文本对应的 KEY。可能还需要更改 `max_source_length` 和 `max_target_length` 来匹配你自己的数据集中的最大输入输出长度。 +修改 `train.sh` 和 `evaluate.sh` 中的 `train_file`、`validation_file`和`test_file`为你自己的 JSON 格式数据集路径,并将 `prompt_column` 和 `response_column` 改为 JSON 文件中输入文本和输出文本对应的 KEY。可能还需要增大 `max_source_length` 和 `max_target_length` 来匹配你自己的数据集中的最大输入输出长度。 ## 对话数据集 From 9ac7187cc12f2e66f28e924a9868c524f1a172a6 Mon Sep 17 00:00:00 2001 From: rainatam Date: Thu, 13 Apr 2023 15:58:19 +0800 Subject: [PATCH 107/110] Add finetune evaluation script --- ptuning/evaluate_finetune.sh | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 ptuning/evaluate_finetune.sh diff --git a/ptuning/evaluate_finetune.sh b/ptuning/evaluate_finetune.sh new file mode 100644 index 0000000..e275c3c --- /dev/null +++ b/ptuning/evaluate_finetune.sh @@ -0,0 +1,18 @@ +CHECKPOINT=adgen-chatglm-6b-ft-1e-4 +STEP=3000 + +CUDA_VISIBLE_DEVICES=0 python3 main.py \ + --do_predict \ + --validation_file AdvertiseGen/dev.json \ + --test_file AdvertiseGen/dev.json \ + --overwrite_cache \ + --prompt_column content \ + --response_column summary \ + --model_name_or_path ./output/$CHECKPOINT/checkpoint-$STEP \ + --output_dir ./output/$CHECKPOINT \ + --overwrite_output_dir \ + --max_source_length 256 \ + --max_target_length 256 \ + --per_device_eval_batch_size 1 \ + --predict_with_generate \ + --fp16_full_eval From 5ade1e405593d4acc857a704b3fdbb346e1506c0 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Thu, 13 Apr 2023 16:10:10 +0800 Subject: [PATCH 108/110] Update loading instructions --- ptuning/README.md | 5 +++-- ptuning/main.py | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/ptuning/README.md b/ptuning/README.md index ffa45ba..acd8ca7 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -152,7 +152,8 @@ model = AutoModel.from_pretrained("THUDM/chatglm-6b", config=config, trust_remot prefix_state_dict = torch.load(os.path.join(CHECKPOINT_PATH, "pytorch_model.bin")) new_prefix_state_dict = {} for k, v in prefix_state_dict.items(): - new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v + if k.startswith("transformer.prefix_encoder."): + new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict) ``` 注意你可能需要将 `pre_seq_len` 改成你训练时的实际值。如果你是[从本地加载模型的话](https://github.com/THUDM/ChatGLM-6B#%E4%BB%8E%E6%9C%AC%E5%9C%B0%E5%8A%A0%E8%BD%BD%E6%A8%A1%E5%9E%8B),需要将 `THUDM/chatglm-6b` 改成本地的模型路径(注意不是checkpoint路径)。 @@ -160,7 +161,7 @@ model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict) (2) 如果需要加载的是旧 Checkpoint(包含 ChatGLM-6B 以及 PrefixEncoder 参数),或者进行的全参数微调,则直接加载整个 Checkpoint: ```python -model = AutoModel.from_pretrained(CHECKPOINT_PATH, config=config, trust_remote_code=True) +model = AutoModel.from_pretrained(CHECKPOINT_PATH, trust_remote_code=True) ``` 之后根据需求可以进行量化,也可以直接使用: diff --git a/ptuning/main.py b/ptuning/main.py index 193a60d..43ecdf8 100644 --- a/ptuning/main.py +++ b/ptuning/main.py @@ -118,7 +118,8 @@ def main(): prefix_state_dict = torch.load(os.path.join(model_args.ptuning_checkpoint, "pytorch_model.bin")) new_prefix_state_dict = {} for k, v in prefix_state_dict.items(): - new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v + if k.startswith("transformer.prefix_encoder."): + new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict) else: model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True) From c34ad400929ffed96599054b11527343e634993d Mon Sep 17 00:00:00 2001 From: duzx16 Date: Sat, 15 Apr 2023 11:05:12 +0800 Subject: [PATCH 109/110] Add instructions --- ptuning/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ptuning/README.md b/ptuning/README.md index acd8ca7..6e7d842 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -36,6 +36,8 @@ bash train.sh 在默认配置 `quantization_bit=4`、`per_device_train_batch_size=1`、`gradient_accumulation_steps=16` 下,INT4 的模型参数被冻结,一次训练迭代会以 1 的批处理大小进行 16 次累加的前后向传播,等效为 16 的总批处理大小,此时最低只需 6.7G 显存。若想在同等批处理大小下提升训练效率,可在二者乘积不变的情况下,加大 `per_device_train_batch_size` 的值,但也会带来更多的显存消耗,请根据实际情况酌情调整。 +如果你想要[从本地加载模型](https://github.com/THUDM/ChatGLM-6B#%E4%BB%8E%E6%9C%AC%E5%9C%B0%E5%8A%A0%E8%BD%BD%E6%A8%A1%E5%9E%8B),可以将 `train.sh` 中的 `THUDM/chatglm-6b` 改为你本地的模型路径。 + #### Finetune 如果需要进行全参数的 Finetune,需要安装 [Deepspeed](https://github.com/microsoft/DeepSpeed),然后运行以下指令: From e182e13cf51474b5f5a445a5f024a144a941d701 Mon Sep 17 00:00:00 2001 From: duzx16 Date: Sat, 15 Apr 2023 11:17:41 +0800 Subject: [PATCH 110/110] Update instructions --- ptuning/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ptuning/README.md b/ptuning/README.md index 6e7d842..e3339ce 100644 --- a/ptuning/README.md +++ b/ptuning/README.md @@ -146,7 +146,7 @@ from transformers import AutoConfig, AutoModel, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) ``` -(1) 如果需要加载的是新 Checkpoint(只包含 PrefixEncoder 参数): +1. 如果需要加载的是新 Checkpoint(只包含 PrefixEncoder 参数): ```python config = AutoConfig.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True, pre_seq_len=128) @@ -158,9 +158,9 @@ for k, v in prefix_state_dict.items(): new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict) ``` -注意你可能需要将 `pre_seq_len` 改成你训练时的实际值。如果你是[从本地加载模型的话](https://github.com/THUDM/ChatGLM-6B#%E4%BB%8E%E6%9C%AC%E5%9C%B0%E5%8A%A0%E8%BD%BD%E6%A8%A1%E5%9E%8B),需要将 `THUDM/chatglm-6b` 改成本地的模型路径(注意不是checkpoint路径)。 +注意你可能需要将 `pre_seq_len` 改成你训练时的实际值。如果你是[从本地加载模型](https://github.com/THUDM/ChatGLM-6B#%E4%BB%8E%E6%9C%AC%E5%9C%B0%E5%8A%A0%E8%BD%BD%E6%A8%A1%E5%9E%8B)的话,需要将 `THUDM/chatglm-6b` 改成本地的模型路径(注意不是checkpoint路径)。 -(2) 如果需要加载的是旧 Checkpoint(包含 ChatGLM-6B 以及 PrefixEncoder 参数),或者进行的全参数微调,则直接加载整个 Checkpoint: +2. 如果需要加载的是旧 Checkpoint(包含 ChatGLM-6B 以及 PrefixEncoder 参数),或者进行的是全参数微调,则直接加载整个 Checkpoint: ```python model = AutoModel.from_pretrained(CHECKPOINT_PATH, trust_remote_code=True) @@ -169,7 +169,7 @@ model = AutoModel.from_pretrained(CHECKPOINT_PATH, trust_remote_code=True) 之后根据需求可以进行量化,也可以直接使用: ```python -print(f"Quantized to 4 bit") +# Comment out the following line if you don't use quantization model = model.quantize(4) model = model.half().cuda() model.transformer.prefix_encoder.float()