diff --git a/.github/workflows/daily_tests.yaml b/.github/workflows/daily_tests.yaml index 8a2d694..839d7b3 100644 --- a/.github/workflows/daily_tests.yaml +++ b/.github/workflows/daily_tests.yaml @@ -5,23 +5,41 @@ on: schedule: - cron: '48 19 * * *' env: + WORKSPACE_PREFIX: $(echo $GITHUB_WORKSPACE |cut -d '/' -f 1-4) SLURM_PARTITION: llm_s + CONDA_BASE_ENV: python310-torch212-cu117 jobs: HF_model: runs-on: [t_cluster] steps: + - name: mask env + run: | + echo "::add-mask::${{env.WORKSPACE_PREFIX}}" + steps: - uses: actions/checkout@v3 - name: load_hf_model run: | - conda create -n internlm-model-latest --clone internlm-model-base + conda create -n internlm-model-latest --clone ${CONDA_BASE_ENV} source activate internlm-model-latest pip install transformers pip install sentencepiece srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 pytest -s -v --color=yes ./tests/test_hf_model.py - - name: clear_env + - name: clear_env + run: | + conda deactivate + conda env remove --name internlm-model-latest + + notify_to_feishu: + if: ${{ always() && !cancelled() && contains(needs.*.result, 'failure') && (github.ref_name == 'develop' || github.ref_name == 'main') }} + needs: [HF_model] + runs-on: [test_machine] + steps: + - name: mask env run: | - conda deactivate - conda env remove --name internlm-model-latest + echo "::add-mask::${{env.WORKSPACE_PREFIX}}" + - name: notify + run: | + curl -X POST -H "Content-Type: application/json" -d '{"msg_type":"post","content":{"post":{"zh_cn":{"title":"Internlm GitHubAction Failed","content":[[{"tag":"text","text":""},{"tag":"a","text":"Please click here for details ","href":"https://github.com/pjlab-sys4nlp/train_internlm/actions/runs/'${GITHUB_RUN_ID}'"},{"tag":"at","user_id":"'${{ secrets.USER_ID }}'"}]]}}}}' ${{ secrets.WEBHOOK_URL }} diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_hf_model.py b/tests/test_hf_model.py index 114bf21..0e30463 100644 --- a/tests/test_hf_model.py +++ b/tests/test_hf_model.py @@ -1,12 +1,9 @@ import pytest import torch +from transformers import AutoModelForCausalLM, AutoTokenizer -from transformers import AutoTokenizer, AutoModelForCausalLM +prompts = ["你好", "what's your name"] -prompts = [ - "你好", - "what's your name" -] def assert_model(response): assert len(response) != 0 @@ -16,14 +13,24 @@ def assert_model(response): class TestChat: - @pytest.mark.parametrize("model_name", [ - "internlm/internlm2-chat-7b", - "internlm/internlm2-chat-7b-sft", - ]) + """ + Test cases for chat model. + """ + + @pytest.mark.parametrize( + "model_name", + [ + "internlm/internlm2-chat-7b", + "internlm/internlm2-chat-7b-sft", + ], + ) def test_demo_default(self, model_name): tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) - # Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error. - model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, trust_remote_code=True).cuda() + # Set `torch_dtype=torch.float16` to load model in float16, otherwise + # it will be loaded as float32 and might cause OOM Error. + model = AutoModelForCausalLM.from_pretrained( + model_name, torch_dtype=torch.float16, trust_remote_code=True + ).cuda() model = model.eval() for prompt in prompts: response, history = model.chat(tokenizer, prompt, history=[]) @@ -39,19 +46,35 @@ class TestChat: class TestBase: - @pytest.mark.parametrize("model_name", [ - "internlm/internlm2-7b", - "internlm/internlm2-base-7b", - ]) + """ + Test cases for base model. + """ + + @pytest.mark.parametrize( + "model_name", + [ + "internlm/internlm2-7b", + "internlm/internlm2-base-7b", + ], + ) def test_demo_default(self, model_name): tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) - # Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error. - model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, trust_remote_code=True).cuda() + # Set `torch_dtype=torch.float16` to load model in float16, otherwise + # it will be loaded as float32 and might cause OOM Error. + model = AutoModelForCausalLM.from_pretrained( + model_name, torch_dtype=torch.float16, trust_remote_code=True + ).cuda() for prompt in prompts: inputs = tokenizer(prompt, return_tensors="pt") - for k,v in inputs.items(): + for k, v in inputs.items(): inputs[k] = v.cuda() - gen_kwargs = {"max_length": 16280, "top_p": 10, "temperature": 1.0, "do_sample": True, "repetition_penalty": 1.0} + gen_kwargs = { + "max_length": 16280, + "top_p": 10, + "temperature": 1.0, + "do_sample": True, + "repetition_penalty": 1.0, + } output = model.generate(**inputs, **gen_kwargs) output = tokenizer.decode(output[0].tolist(), skip_special_tokens=True) print(output)