diff --git a/.github/workflows/daily_tests.yaml b/.github/workflows/daily_tests.yaml
new file mode 100644
index 0000000..2bc64dd
--- /dev/null
+++ b/.github/workflows/daily_tests.yaml
@@ -0,0 +1,57 @@
+name: basic-model-tests-daily
+on:
+  workflow_dispatch:
+  schedule:
+    - cron:  '48 19 * * *'
+env:
+  WORKSPACE_PREFIX: $(echo $GITHUB_WORKSPACE |cut -d '/' -f 1-4)
+  SLURM_PARTITION: llm_s
+  CONDA_BASE_ENV: internlm-model-base
+
+jobs:
+  HF_model:
+    runs-on: [t_cluster]
+    steps:
+    - name: mask env
+      run: |
+        echo "::add-mask::${{env.WORKSPACE_PREFIX}}"
+        echo "::add-mask::$path_prefix"
+    - uses: actions/checkout@v3
+
+    - name: load_hf_model
+      run: |
+        conda create -n internlm-model-latest --clone ${CONDA_BASE_ENV}
+        source activate internlm-model-latest
+        # TODO:test other version of transformers
+        pip install transformers
+        pip install sentencepiece
+        srun -p ${SLURM_PARTITION} --kill-on-bad-exit=1 --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 pytest -s -v --color=yes ./tests/test_hf_model.py
+        conda deactivate 
+    
+  clear_env:
+      if: ${{ !cancelled() }}
+      needs: [HF_model]
+      runs-on: [t_cluster]
+      timeout-minutes: 10
+      steps:
+      - name: mask env
+        run: |
+          echo "::add-mask::${{env.WORKSPACE_PREFIX}}"
+          echo "::add-mask::$path_prefix"
+
+      - name: remove_env
+        run: |
+          conda env remove --name internlm-model-latest
+
+  notify_to_feishu:
+    if: ${{ always() && !cancelled() && contains(needs.*.result, 'failure') && (github.ref_name == 'develop' || github.ref_name == 'main') }}
+    needs: [HF_model,clear_env]
+    runs-on: [t_cluster]
+    steps:
+    - name: mask env
+      run: |
+        echo "::add-mask::${{env.WORKSPACE_PREFIX}}"
+        echo "::add-mask::$path_prefix"
+    - name: notify
+      run: |
+        curl -X POST -H "Content-Type: application/json" -d '{"msg_type":"post","content":{"post":{"zh_cn":{"title":"Internlm GitHubAction Failed","content":[[{"tag":"text","text":""},{"tag":"a","text":"Please click here for details ","href":"https://github.com/'${{ github.repository }}'/actions/runs/'${GITHUB_RUN_ID}'"},{"tag":"at","user_id":"'${{ secrets.USER_ID }}'"}]]}}}}'  ${{ secrets.WEBHOOK_URL }}
diff --git a/tests/test_hf_model.py b/tests/test_hf_model.py
new file mode 100644
index 0000000..897b205
--- /dev/null
+++ b/tests/test_hf_model.py
@@ -0,0 +1,79 @@
+import pytest
+import torch
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+prompts = ["你好", "what's your name"]
+
+
+def assert_model(response):
+    assert len(response) != 0
+    assert "UNUSED_TOKEN" not in response
+
+
+class TestChat:
+    """
+    Test cases for chat model.
+    """
+
+    @pytest.mark.parametrize(
+        "model_name",
+        [
+            "internlm/internlm2-chat-7b",
+            "internlm/internlm2-chat-7b-sft",
+        ],
+    )
+    def test_demo_default(self, model_name):
+        tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
+        # Set `torch_dtype=torch.float16` to load model in float16, otherwise
+        # it will be loaded as float32 and might cause OOM Error.
+        model = AutoModelForCausalLM.from_pretrained(
+            model_name, torch_dtype=torch.float16, trust_remote_code=True
+        ).cuda()
+        model = model.eval()
+        for prompt in prompts:
+            response, history = model.chat(tokenizer, prompt, history=[])
+            print(response)
+            assert_model(response)
+
+        for prompt in prompts:
+            length = 0
+            for response, history in model.stream_chat(tokenizer, prompt, history=[]):
+                print(response[length:], flush=True, end="")
+                length = len(response)
+            assert_model(response)
+
+
+class TestBase:
+    """
+    Test cases for base model.
+    """
+
+    @pytest.mark.parametrize(
+        "model_name",
+        [
+            "internlm/internlm2-7b",
+            "internlm/internlm2-base-7b",
+        ],
+    )
+    def test_demo_default(self, model_name):
+        tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
+        # Set `torch_dtype=torch.float16` to load model in float16, otherwise
+        # it will be loaded as float32 and might cause OOM Error.
+        model = AutoModelForCausalLM.from_pretrained(
+            model_name, torch_dtype=torch.float16, trust_remote_code=True
+        ).cuda()
+        for prompt in prompts:
+            inputs = tokenizer(prompt, return_tensors="pt")
+            for k, v in inputs.items():
+                inputs[k] = v.cuda()
+            gen_kwargs = {
+                "max_length": 128,
+                "top_p": 10,
+                "temperature": 1.0,
+                "do_sample": True,
+                "repetition_penalty": 1.0,
+            }
+            output = model.generate(**inputs, **gen_kwargs)
+            output = tokenizer.decode(output[0].tolist(), skip_special_tokens=True)
+            print(output)
+            assert_model(output)