mirror of https://github.com/hpcaitech/ColossalAI
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.cipre-commit-ci-update-config
parent
ed0abc53d6
commit
f360deb6d0
|
@ -81,11 +81,11 @@ with gr.Blocks(css=CSS) as demo:
|
|||
)
|
||||
with gr.Row():
|
||||
btn = gr.UploadButton("📁", file_types=["file"], file_count="multiple", size="sm")
|
||||
restart_btn = gr.Button(str("\u21BB"), elem_id="restart-btn", scale=1)
|
||||
restart_btn = gr.Button(str("\u21bb"), elem_id="restart-btn", scale=1)
|
||||
txt = gr.Textbox(
|
||||
scale=8,
|
||||
show_label=False,
|
||||
placeholder="Enter text and press enter, or use 📁 to upload files, click \u21BB to clear loaded files and restart chat",
|
||||
placeholder="Enter text and press enter, or use 📁 to upload files, click \u21bb to clear loaded files and restart chat",
|
||||
container=True,
|
||||
autofocus=True,
|
||||
)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
"""This code is adapted from Alpa
|
||||
https://github.com/alpa-projects/alpa/
|
||||
with some changes. """
|
||||
https://github.com/alpa-projects/alpa/
|
||||
with some changes."""
|
||||
|
||||
import multiprocessing
|
||||
import time
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
"""This code is adapted from Alpa
|
||||
https://github.com/alpa-projects/alpa/
|
||||
with some changes. """
|
||||
https://github.com/alpa-projects/alpa/
|
||||
with some changes."""
|
||||
|
||||
import operator
|
||||
from dataclasses import dataclass
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
""" PyTorch OpenMoE model."""
|
||||
"""PyTorch OpenMoE model."""
|
||||
import math
|
||||
from typing import List, Optional, Tuple, Union
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
"""This code is from NVIDIA apex:
|
||||
https://github.com/NVIDIA/apex
|
||||
with some changes. """
|
||||
https://github.com/NVIDIA/apex
|
||||
with some changes."""
|
||||
|
||||
import numbers
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
""" adapted from https://github.com/jiaweizzhao/GaLore/blob/master/galore_torch/adamw8bit.py"""
|
||||
"""adapted from https://github.com/jiaweizzhao/GaLore/blob/master/galore_torch/adamw8bit.py"""
|
||||
|
||||
import warnings
|
||||
from collections import defaultdict
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
""" adapted from https://github.com/jiaweizzhao/GaLore/blob/master/galore_torch/adamw8bit.py"""
|
||||
"""adapted from https://github.com/jiaweizzhao/GaLore/blob/master/galore_torch/adamw8bit.py"""
|
||||
|
||||
import warnings
|
||||
from typing import List
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
""" PyTorch ChatGLM model. """
|
||||
"""PyTorch ChatGLM model."""
|
||||
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
|
|
|
@ -34,8 +34,8 @@ class PreTrainingDataset:
|
|||
self.do_whole_word_mask = do_whole_word_mask
|
||||
self.max_predictions_per_seq = max_predictions_per_seq
|
||||
self.vocab_words = list(tokenizer.vocab.keys())
|
||||
self.rec = re.compile("[\u4E00-\u9FA5]")
|
||||
self.whole_rec = re.compile("##[\u4E00-\u9FA5]")
|
||||
self.rec = re.compile("[\u4e00-\u9fa5]")
|
||||
self.whole_rec = re.compile("##[\u4e00-\u9fa5]")
|
||||
|
||||
self.mlm_p = 0.15
|
||||
self.mlm_mask_p = 0.8
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
""" PyTorch DeBERTa-v2 model."""
|
||||
"""PyTorch DeBERTa-v2 model."""
|
||||
|
||||
import math
|
||||
from collections.abc import Sequence
|
||||
|
|
Loading…
Reference in New Issue