mirror of https://github.com/InternLM/InternLM
merge upstream/develop into feature_add_moe_data
commit
80d4744c42
|
@ -3,7 +3,6 @@
|
|||
# This file is distributed under the same license as the InternLM package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2023.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: InternLM \n"
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
# This file is distributed under the same license as the InternLM package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2023.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: InternLM \n"
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
# This file is distributed under the same license as the InternLM package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2023.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: InternLM \n"
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
# This file is distributed under the same license as the InternLM package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2023.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: InternLM \n"
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
# This file is distributed under the same license as the InternLM package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2023.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: InternLM \n"
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
# This file is distributed under the same license as the InternLM package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2023.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: InternLM \n"
|
||||
|
|
|
@ -3,12 +3,11 @@
|
|||
# This file is distributed under the same license as the InternLM package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2023.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: InternLM \n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2023-09-26 17:04+0800\n"
|
||||
"POT-Creation-Date: 2023-09-27 10:59+0800\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language: en\n"
|
||||
|
@ -83,3 +82,35 @@ msgstr ""
|
|||
#: ../../source/mixed_precision.rst:16
|
||||
msgid "例如:"
|
||||
msgstr "For example:"
|
||||
|
||||
#: ../../source/mixed_precision.rst:40
|
||||
msgid "TF32训练"
|
||||
msgstr "TF32 Training"
|
||||
|
||||
#: ../../source/mixed_precision.rst:41
|
||||
msgid "TensorFloat-32(TF32)是Nvidia在Ampere架构GPU上推出的专门运用于TensorCore的一种计算格式。其与其他常用数据格式的比较如下图:"
|
||||
msgstr "TensorFloat-32 (TF32) is a computational format introduced by Nvidia on Ampere Architecture GPUs for TensorCore. A comparison with other data formats is shown below."
|
||||
|
||||
#: ../../source/mixed_precision.rst:47
|
||||
msgid "使用TF32的前置条件:"
|
||||
msgstr "Prerequisites for using TF32."
|
||||
|
||||
#: ../../source/mixed_precision.rst:49
|
||||
msgid "输入数据类型为FP32,且计算为矩阵乘法及卷积相关运算,才可以使用TF32作为TensorCore的中间计算类型。"
|
||||
msgstr "The input data type should be FP32 and TF32 is designed for matrix multiplication, convolutions, and other relative computations."
|
||||
|
||||
#: ../../source/mixed_precision.rst:51
|
||||
msgid "Ampere架构的GPU。"
|
||||
msgstr "Ampere Architecture GPU"
|
||||
|
||||
#: ../../source/mixed_precision.rst:53
|
||||
msgid "InternLM支持使用TF32训练模型,允许用户在config文件中将 ``dtype`` 设置为 ``torch.tf32``。"
|
||||
msgstr "InternLM supports training model in TF32 and allows user to set the ``dtype`` in config as ``torch.tf32``."
|
||||
|
||||
#: ../../source/mixed_precision.rst:75
|
||||
msgid ""
|
||||
"值得注意的是,TF32仅仅是在使用TensorCore时的一种中间计算格式,并不是一个完全的数据类型。因此,在InternLM中,尽管用户将 "
|
||||
"``dtype`` 设置成了 ``torch.tf32``,模型的数据类型依旧是 ``torch.float32``。InternLM会针对 "
|
||||
"``dtype`` 为 ``torch.tf32`` 的情况,设置以下变量来开启TF32训练。"
|
||||
msgstr "It is noticed that TF32 is an intermediate format in TensorCore instead of a data type. Therefore, InternLM could set the following environment variables to enable TF32 when the ``dtype`` is ``torch.tf32``, which is actually ``torch.float32``."
|
||||
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
# This file is distributed under the same license as the InternLM package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2023.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: InternLM \n"
|
||||
|
@ -272,7 +271,6 @@ msgid "A dictionary containing message data to be included in the heartbeat."
|
|||
msgstr ""
|
||||
|
||||
#: internlm.monitor.alert.send_heartbeat:10 of
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
"Sending a heartbeat message for training metrics "
|
||||
"``send_heartbeat(\"train_metrics\", {\"loss\": 0.1, \"accuracy\": "
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
# This file is distributed under the same license as the InternLM package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2023.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: InternLM \n"
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
# This file is distributed under the same license as the InternLM package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2023.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: InternLM \n"
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
# This file is distributed under the same license as the InternLM package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2023.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: InternLM \n"
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
# This file is distributed under the same license as the InternLM package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2023.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: InternLM \n"
|
||||
|
|
|
@ -3,12 +3,11 @@
|
|||
# This file is distributed under the same license as the InternLM package.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2023.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: InternLM \n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"POT-Creation-Date: 2023-09-11 14:25+0800\n"
|
||||
"POT-Creation-Date: 2023-09-27 11:14+0800\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language: en\n"
|
||||
|
@ -175,7 +174,6 @@ msgid "训练配置"
|
|||
msgstr "Training Configuration"
|
||||
|
||||
#: ../../../usage.md:70
|
||||
#, fuzzy
|
||||
msgid "以 7B Demo 的配置文件`configs/7B_sft.py`为例:"
|
||||
msgstr ""
|
||||
"Taking the configuration file `configs/7B_sft.py` for the 7B demo as an "
|
||||
|
@ -360,6 +358,29 @@ msgstr ""
|
|||
"Taking the configuration of the demo training on a single machine with 8 "
|
||||
"GPUs on slurm as an example, the training result log is shown below:"
|
||||
|
||||
#: ../../../usage.md:373
|
||||
msgid "长文本生成"
|
||||
msgstr "Long Text Generation"
|
||||
|
||||
#: ../../../usage.md:375
|
||||
msgid ""
|
||||
"在推理阶段,您可以在模型配置中通过设置 `use_dynamic_ntk_rope=True` 开启 RoPE 的 Dynamic NTK "
|
||||
"选项,从而使得模型适应长文本输入输出,达到 16K 的外推效果:"
|
||||
msgstr "During the inference phase, you can turn on the Dynamic NTK option of RoPE by setting `use_dynamic_ntk_rope=True` in the model configuration, "
|
||||
"so that the model can adapt to long text input and output and achieve an extrapolation effect of 16K:"
|
||||
|
||||
#: ../../../usage.md:401
|
||||
msgid "关于 Dyanmic NTK 的原理,详细请参考"
|
||||
msgstr "Regarding the principle of Dyanmic NTK, please refer to"
|
||||
|
||||
#: ../../../usage.md:403
|
||||
msgid "https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases"
|
||||
msgstr ""
|
||||
|
||||
#: ../../../usage.md:404
|
||||
msgid "https://kexue.fm/archives/9675"
|
||||
msgstr ""
|
||||
|
||||
#~ msgid "`load_model_only_folder`与`load_ckpt_folder`不能同时设置"
|
||||
#~ msgstr ""
|
||||
#~ "`load_model_only_folder` and `load_ckpt_folder` "
|
||||
|
|
|
@ -34,3 +34,48 @@ InternLM默认将模型转换为16位浮点数类型进行训练(在配置文
|
|||
dtype=torch.bfloat16(),
|
||||
sync_buffer=False,
|
||||
)
|
||||
|
||||
|
||||
TF32训练
|
||||
-----------------
|
||||
TensorFloat-32(TF32)是Nvidia在Ampere架构GPU上推出的专门运用于TensorCore的一种计算格式。其与其他常用数据格式的比较如下图:
|
||||
|
||||
.. figure:: ../../imgs/tf32.png
|
||||
:scale: 50%
|
||||
:class: with-border
|
||||
|
||||
使用TF32的前置条件:
|
||||
|
||||
1. 输入数据类型为FP32,且计算为矩阵乘法及卷积相关运算,才可以使用TF32作为TensorCore的中间计算类型。
|
||||
|
||||
2. Ampere架构的GPU。
|
||||
|
||||
InternLM支持使用TF32训练模型,允许用户在config文件中将 ``dtype`` 设置为 ``torch.tf32``。
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
model = dict(
|
||||
checkpoint=False, # The proportion of layers for activation aheckpointing, the optional value are True/False/[0-1]
|
||||
num_attention_heads=NUM_ATTENTION_HEAD,
|
||||
embed_split_hidden=True,
|
||||
vocab_size=VOCAB_SIZE,
|
||||
embed_grad_scale=1,
|
||||
parallel_output=True,
|
||||
hidden_size=HIDDEN_SIZE,
|
||||
num_layers=NUM_LAYER,
|
||||
mlp_ratio=MLP_RATIO,
|
||||
apply_post_layer_norm=False,
|
||||
dtype="torch.tf32", # Support: "torch.float16", "torch.half", "torch.bfloat16", "torch.float32", "torch.tf32"
|
||||
norm_type="rmsnorm",
|
||||
layer_norm_epsilon=1e-5,
|
||||
use_flash_attn=True,
|
||||
num_chunks=1, # if num_chunks > 1, interleaved pipeline scheduler is used.
|
||||
)
|
||||
|
||||
值得注意的是,TF32仅仅是在使用TensorCore时的一种中间计算格式,并不是一个完全的数据类型。因此,在InternLM中,尽管用户将 ``dtype`` 设置成了 ``torch.tf32``,模型的数据类型依旧是 ``torch.float32``。InternLM会针对 ``dtype`` 为 ``torch.tf32`` 的情况,设置以下变量来开启TF32训练。
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
torch.backends.cudnn.allow_tf32 = True
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
|
||||
|
|
Binary file not shown.
Before Width: | Height: | Size: 208 KiB After Width: | Height: | Size: 213 KiB |
Binary file not shown.
After Width: | Height: | Size: 47 KiB |
Loading…
Reference in New Issue