mirror of https://github.com/hpcaitech/ColossalAI
[exampe] fix llama example' loss error when using gemini plugin (#5060)
fix llama examplepull/5069/head
parent
3c08f17348
commit
bc09b95f50
|
@ -58,6 +58,7 @@ def tokenize_batch_for_finetune(batch, tokenizer: Optional[LlamaTokenizer] = Non
|
|||
|
||||
def all_reduce_mean(tensor: torch.Tensor) -> torch.Tensor:
|
||||
dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
|
||||
tensor = tensor.data
|
||||
tensor.div_(dist.get_world_size())
|
||||
return tensor
|
||||
|
||||
|
|
Loading…
Reference in New Issue