mirror of https://github.com/hpcaitech/ColossalAI
[git] fixed rebased files
parent
1513f20f4d
commit
1ded7e81ef
|
@ -227,4 +227,4 @@ class RequestHandler:
|
|||
|
||||
self.done_list.extend(finish_seqs)
|
||||
|
||||
return finish_seqs
|
||||
return finish_seqs
|
|
@ -58,9 +58,6 @@ def convert_kvcache(cache, lengths, block_tables, pad_id=0):
|
|||
seq_len = max(lengths)
|
||||
padded_cache = []
|
||||
for i in range(bsz):
|
||||
cache1 = cache[block_tables[i][: needed_blocks[i] - 1]].permute((0, 3, 1, 2)).reshape(-1, num_heads, head_size)
|
||||
cache2 = cache[block_tables[i][needed_blocks[i] - 1], :, :, : num_remaing_tokens[i]].permute(2, 0, 1)
|
||||
|
||||
_cache = torch.cat(
|
||||
(
|
||||
cache[block_tables[i][: needed_blocks[i] - 1]].permute((0, 3, 1, 2)).reshape(-1, num_heads, head_size),
|
||||
|
@ -317,4 +314,4 @@ class PagedAttention:
|
|||
):
|
||||
return self.pad_decoding_forward(
|
||||
q.unsqueeze(1), k.unsqueeze(1), v.unsqueeze(1), k_cache, v_cache, lengths, block_tables
|
||||
)
|
||||
)
|
|
@ -81,4 +81,4 @@ def test_inference_engine():
|
|||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_inference_engine()
|
||||
test_inference_engine()
|
Loading…
Reference in New Issue