2023-09-19 06:20:26 +00:00
|
|
|
import torch
|
2022-11-08 06:39:35 +00:00
|
|
|
import yaml
|
|
|
|
from diffusers import StableDiffusionPipeline
|
2023-04-11 06:10:45 +00:00
|
|
|
from ldm.modules.diffusionmodules.openaimodel import UNetModel
|
2023-04-06 12:22:52 +00:00
|
|
|
|
2022-11-08 06:39:35 +00:00
|
|
|
if __name__ == "__main__":
|
|
|
|
with torch.no_grad():
|
|
|
|
yaml_path = "../../train_colossalai.yaml"
|
2023-09-19 06:20:26 +00:00
|
|
|
with open(yaml_path, "r", encoding="utf-8") as f:
|
2022-11-08 06:39:35 +00:00
|
|
|
config = f.read()
|
|
|
|
base_config = yaml.load(config, Loader=yaml.FullLoader)
|
2023-09-19 06:20:26 +00:00
|
|
|
unet_config = base_config["model"]["params"]["unet_config"]
|
2023-04-11 06:10:45 +00:00
|
|
|
diffusion_model = UNetModel(**unet_config).to("cuda:0")
|
2022-11-08 06:39:35 +00:00
|
|
|
|
2023-09-19 06:20:26 +00:00
|
|
|
pipe = StableDiffusionPipeline.from_pretrained("/data/scratch/diffuser/stable-diffusion-v1-4").to("cuda:0")
|
2022-11-08 06:39:35 +00:00
|
|
|
dif_model_2 = pipe.unet
|
|
|
|
|
|
|
|
random_input_ = torch.rand((4, 4, 32, 32)).to("cuda:0")
|
|
|
|
random_input_2 = torch.clone(random_input_).to("cuda:0")
|
|
|
|
time_stamp = torch.randint(20, (4,)).to("cuda:0")
|
|
|
|
time_stamp2 = torch.clone(time_stamp).to("cuda:0")
|
|
|
|
context_ = torch.rand((4, 77, 768)).to("cuda:0")
|
|
|
|
context_2 = torch.clone(context_).to("cuda:0")
|
|
|
|
|
|
|
|
out_1 = diffusion_model(random_input_, time_stamp, context_)
|
|
|
|
out_2 = dif_model_2(random_input_2, time_stamp2, context_2)
|
|
|
|
print(out_1.shape)
|
2023-09-19 06:20:26 +00:00
|
|
|
print(out_2["sample"].shape)
|