mirror of https://github.com/hpcaitech/ColossalAI
[shardformer] fix emerged bugs after updating transformers (#4526)
parent
c554b7f559
commit
0387a47e63
|
@ -123,6 +123,9 @@ def merge_batch(data: List[Any]) -> Any:
|
|||
merged_data = []
|
||||
for elem_batch in zip(*flattened_data):
|
||||
if isinstance(elem_batch[0], torch.Tensor):
|
||||
if len(elem_batch[0].shape) == 0: # set loss to None in pipeline outputs
|
||||
merged_data.append(None)
|
||||
else:
|
||||
merged_data.append(torch.cat(elem_batch, dim=0))
|
||||
else:
|
||||
merged_data.append(list(elem_batch))
|
||||
|
|
|
@ -195,7 +195,11 @@ def check_output_hidden_state(org_output: Tensor,
|
|||
sharded_hidden_state = sharded_output.last_hidden_state
|
||||
|
||||
if stage_manager and stage_manager.is_last_stage():
|
||||
sharded_hidden_state = torch.cat([output.last_hidden_state for output in sharded_output['outputs']], dim=dim)
|
||||
pipeline_output = sharded_output['outputs']
|
||||
if isinstance(pipeline_output, List):
|
||||
sharded_hidden_state = torch.cat([output.last_hidden_state for output in pipeline_output], dim=dim)
|
||||
else:
|
||||
sharded_hidden_state = pipeline_output.last_hidden_state
|
||||
|
||||
assert torch.allclose(org_hidden_state.float(), sharded_hidden_state.float(), atol=atol, rtol=rtol), \
|
||||
f"shard model's output hidden state is not equal to origin model's last hidden state\n{org_hidden_state}\n{sharded_hidden_state}"
|
||||
|
|
Loading…
Reference in New Issue