add notify

pull/650/head
kkscilife 2024-01-23 10:05:44 +08:00
parent 267771bad2
commit c5d073934e
3 changed files with 64 additions and 23 deletions

View File

@ -5,23 +5,41 @@ on:
schedule: schedule:
- cron: '48 19 * * *' - cron: '48 19 * * *'
env: env:
WORKSPACE_PREFIX: $(echo $GITHUB_WORKSPACE |cut -d '/' -f 1-4)
SLURM_PARTITION: llm_s SLURM_PARTITION: llm_s
CONDA_BASE_ENV: python310-torch212-cu117
jobs: jobs:
HF_model: HF_model:
runs-on: [t_cluster] runs-on: [t_cluster]
steps: steps:
- name: mask env
run: |
echo "::add-mask::${{env.WORKSPACE_PREFIX}}"
steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: load_hf_model - name: load_hf_model
run: | run: |
conda create -n internlm-model-latest --clone internlm-model-base conda create -n internlm-model-latest --clone ${CONDA_BASE_ENV}
source activate internlm-model-latest source activate internlm-model-latest
pip install transformers pip install transformers
pip install sentencepiece pip install sentencepiece
srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 pytest -s -v --color=yes ./tests/test_hf_model.py srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 pytest -s -v --color=yes ./tests/test_hf_model.py
- name: clear_env - name: clear_env
run: |
conda deactivate
conda env remove --name internlm-model-latest
notify_to_feishu:
if: ${{ always() && !cancelled() && contains(needs.*.result, 'failure') && (github.ref_name == 'develop' || github.ref_name == 'main') }}
needs: [HF_model]
runs-on: [test_machine]
steps:
- name: mask env
run: | run: |
conda deactivate echo "::add-mask::${{env.WORKSPACE_PREFIX}}"
conda env remove --name internlm-model-latest - name: notify
run: |
curl -X POST -H "Content-Type: application/json" -d '{"msg_type":"post","content":{"post":{"zh_cn":{"title":"Internlm GitHubAction Failed","content":[[{"tag":"text","text":""},{"tag":"a","text":"Please click here for details ","href":"https://github.com/pjlab-sys4nlp/train_internlm/actions/runs/'${GITHUB_RUN_ID}'"},{"tag":"at","user_id":"'${{ secrets.USER_ID }}'"}]]}}}}' ${{ secrets.WEBHOOK_URL }}

0
tests/__init__.py Normal file
View File

View File

@ -1,12 +1,9 @@
import pytest import pytest
import torch import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import AutoTokenizer, AutoModelForCausalLM prompts = ["你好", "what's your name"]
prompts = [
"你好",
"what's your name"
]
def assert_model(response): def assert_model(response):
assert len(response) != 0 assert len(response) != 0
@ -16,14 +13,24 @@ def assert_model(response):
class TestChat: class TestChat:
@pytest.mark.parametrize("model_name", [ """
"internlm/internlm2-chat-7b", Test cases for chat model.
"internlm/internlm2-chat-7b-sft", """
])
@pytest.mark.parametrize(
"model_name",
[
"internlm/internlm2-chat-7b",
"internlm/internlm2-chat-7b-sft",
],
)
def test_demo_default(self, model_name): def test_demo_default(self, model_name):
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error. # Set `torch_dtype=torch.float16` to load model in float16, otherwise
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, trust_remote_code=True).cuda() # it will be loaded as float32 and might cause OOM Error.
model = AutoModelForCausalLM.from_pretrained(
model_name, torch_dtype=torch.float16, trust_remote_code=True
).cuda()
model = model.eval() model = model.eval()
for prompt in prompts: for prompt in prompts:
response, history = model.chat(tokenizer, prompt, history=[]) response, history = model.chat(tokenizer, prompt, history=[])
@ -39,19 +46,35 @@ class TestChat:
class TestBase: class TestBase:
@pytest.mark.parametrize("model_name", [ """
"internlm/internlm2-7b", Test cases for base model.
"internlm/internlm2-base-7b", """
])
@pytest.mark.parametrize(
"model_name",
[
"internlm/internlm2-7b",
"internlm/internlm2-base-7b",
],
)
def test_demo_default(self, model_name): def test_demo_default(self, model_name):
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error. # Set `torch_dtype=torch.float16` to load model in float16, otherwise
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, trust_remote_code=True).cuda() # it will be loaded as float32 and might cause OOM Error.
model = AutoModelForCausalLM.from_pretrained(
model_name, torch_dtype=torch.float16, trust_remote_code=True
).cuda()
for prompt in prompts: for prompt in prompts:
inputs = tokenizer(prompt, return_tensors="pt") inputs = tokenizer(prompt, return_tensors="pt")
for k,v in inputs.items(): for k, v in inputs.items():
inputs[k] = v.cuda() inputs[k] = v.cuda()
gen_kwargs = {"max_length": 16280, "top_p": 10, "temperature": 1.0, "do_sample": True, "repetition_penalty": 1.0} gen_kwargs = {
"max_length": 16280,
"top_p": 10,
"temperature": 1.0,
"do_sample": True,
"repetition_penalty": 1.0,
}
output = model.generate(**inputs, **gen_kwargs) output = model.generate(**inputs, **gen_kwargs)
output = tokenizer.decode(output[0].tolist(), skip_special_tokens=True) output = tokenizer.decode(output[0].tolist(), skip_special_tokens=True)
print(output) print(output)