164 pixel = images.movedim(-1, 1) # [B, H, W, C] => [B, C, H, W] 165 166 # Decoder requires dimension to be 64-aligned. 167 B, C, H, W = pixel.shape 168 assert H % 64 == 0, f"Height({H}) is not multiple of 64." 169 assert W % 64 == 0, f"Height({W}) is not multiple of 64." 170 171 decoded = []
165 166 # Decoder requires dimension to be 64-aligned. 167 B, C, H, W = pixel.shape 168 assert H % 64 == 0, f"Height({H}) is not multiple of 64." 169 assert W % 64 == 0, f"Height({W}) is not multiple of 64." 170 171 decoded = [] 172 for start_idx in range(0, samples["samples"].shape[0], sub_batch_size):
413 config: str, 414 weight: float, 415 ): 416 ld_model = [m for m in self.MODELS if m.config_string == config][0] 417 assert get_model_sd_version(model) == ld_model.sd_version 418 if ld_model.attn_sharing: 419 return ld_model.apply_layered_diffusion_attn_sharing(model) 420 else:
459 bg_cond: Optional[List[List[torch.TensorType]]] = None, 460 blended_cond: Optional[List[List[torch.TensorType]]] = None, 461 ): 462 ld_model = [m for m in self.MODELS if m.config_string == config][0] 463 assert get_model_sd_version(model) == ld_model.sd_version 464 assert ld_model.attn_sharing 465 work_model = ld_model.apply_layered_diffusion_attn_sharing(model)[0] 466 work_model.model_options.setdefault("transformer_options", {})
460 blended_cond: Optional[List[List[torch.TensorType]]] = None, 461 ): 462 ld_model = [m for m in self.MODELS if m.config_string == config][0] 463 assert get_model_sd_version(model) == ld_model.sd_version 464 assert ld_model.attn_sharing 465 work_model = ld_model.apply_layered_diffusion_attn_sharing(model)[0] 466 work_model.model_options.setdefault("transformer_options", {}) 467 work_model.model_options["transformer_options"]["cond_overwrite"] = [
524 config: str, 525 weight: float, 526 ): 527 ld_model = [m for m in self.MODELS if m.config_string == config][0] 528 assert get_model_sd_version(model) == ld_model.sd_version 529 c_concat = model.model.latent_format.process_in(latent["samples"]) 530 return ld_model.apply_layered_diffusion( 531 model, weight
582 cond: Optional[List[List[torch.TensorType]]] = None, 583 blended_cond: Optional[List[List[torch.TensorType]]] = None, 584 ): 585 ld_model = [m for m in self.MODELS if m.config_string == config][0] 586 assert get_model_sd_version(model) == ld_model.sd_version 587 assert ld_model.attn_sharing 588 work_model = ld_model.apply_layered_diffusion_attn_sharing( 589 model, control_img=image.movedim(-1, 1)
583 blended_cond: Optional[List[List[torch.TensorType]]] = None, 584 ): 585 ld_model = [m for m in self.MODELS if m.config_string == config][0] 586 assert get_model_sd_version(model) == ld_model.sd_version 587 assert ld_model.attn_sharing 588 work_model = ld_model.apply_layered_diffusion_attn_sharing( 589 model, control_img=image.movedim(-1, 1) 590 )[0]
650 config: str, 651 weight: float, 652 ): 653 ld_model = [m for m in self.MODELS if m.config_string == config][0] 654 assert get_model_sd_version(model) == ld_model.sd_version 655 c_concat = model.model.latent_format.process_in( 656 torch.cat([latent["samples"], blended_latent["samples"]], dim=1) 657 )
12 13 diffusers_version = importlib.metadata.version('diffusers') 14 15 def check_diffusers_version(min_version="0.25.0"): 16 assert parse(diffusers_version) >= parse( 17 min_version 18 ), f"diffusers>={min_version} requirement not satisfied. Please install correct diffusers version." 19 20 check_diffusers_version() 21
315 def decode_pixel( 316 self, pixel: torch.TensorType, latent: torch.TensorType 317 ) -> torch.TensorType: 318 # pixel.shape = [B, C=3, H, W] 319 assert pixel.shape[1] == 3 320 pixel_device = pixel.device 321 pixel_dtype = pixel.dtype 322
324 latent = latent.to(device=self.load_device, dtype=self.dtype) 325 # y.shape = [B, C=4, H, W] 326 y = self.estimate_augmented(pixel, latent) 327 y = y.clip(0, 1) 328 assert y.shape[1] == 4 329 # Restore image to original device of input image. 330 return y.to(pixel_device, dtype=pixel_dtype)