73 74 def set_insertion_weights(self, insertion_weights: list[float]): 75 if insertion_weights is None: 76 insertion_weights = [1.0, 1.0, 1.0, 1.0] 77 assert len(insertion_weights) == 4 78 self.insertion_weights = insertion_weights 79 80 def cleanup(self):
170 if use_conv: 171 self.operation = ops.conv_nd(dims, in_channels=self.channels, out_channels=self.out_channels, 172 kernel_size=3, stride=stride, padding=padding) 173 else: 174 assert self.channels == self.out_channels 175 self.operation = avg_pool_nd(dims, kernel_size=stride, stride=stride) # both are stride value on purpose 176 177 def forward(self, x: Tensor):
174 assert self.channels == self.out_channels 175 self.operation = avg_pool_nd(dims, kernel_size=stride, stride=stride) # both are stride value on purpose 176 177 def forward(self, x: Tensor): 178 assert x.shape[1] == self.channels 179 180 kernel_size = (2, 2) 181
313 if use_conv: 314 self.operation = ops.conv_nd(dims, in_channels=self.channels, out_channels=self.out_channels, 315 kernel_size=3, stride=stride, padding=padding) 316 else: 317 assert self.channels == self.out_channels 318 self.operation = avg_pool_nd(dims, kernel_size=stride, stride=stride) # both are stride value on purpose 319 320 def forward(self, x: Tensor):
317 assert self.channels == self.out_channels 318 self.operation = avg_pool_nd(dims, kernel_size=stride, stride=stride) # both are stride value on purpose 319 320 def forward(self, x: Tensor): 321 assert x.shape[1] == self.channels 322 return self.operation(x)
1124 *args, 1125 **kwargs, 1126 ): 1127 super().__init__(operations=ops, *args, **kwargs) 1128 assert attention_mode == "Temporal" 1129 1130 self.attention_mode = attention_mode 1131 self.is_cross_attention = kwargs["context_dim"] is not None
144 # now that images are the expected size, VAEEncode them 145 try: # account for old ComfyUI versions (TODO: remove this when other changes require ComfyUI update) 146 if not hasattr(vae, "vae_encode_crop_pixels"): 147 image = VAEEncode.vae_encode_crop_pixels(image) 148 except Exception: 149 pass 150 return ({"samples": vae.encode(image[:,:,:,:3])},)
20 21 22 class CameraMotion: 23 def __init__(self, rotate: tuple[float], translate: tuple[float]): 24 assert len(rotate) == 3 25 assert len(translate) == 3 26 self.rotate = np.array(rotate) 27 self.translate = np.array(translate)
21 22 class CameraMotion: 23 def __init__(self, rotate: tuple[float], translate: tuple[float]): 24 assert len(rotate) == 3 25 assert len(translate) == 3 26 self.rotate = np.array(rotate) 27 self.translate = np.array(translate) 28
1 import json 2 import os 3 import shutil 4 import subprocess 5 from typing import Dict, List 6 7 import numpy as np 8 import torch
262 263 env=os.environ.copy() 264 if "environment" in video_format: 265 env.update(video_format["environment"]) 266 with subprocess.Popen(args, stdin=subprocess.PIPE, env=env) as proc: 267 for frame in frames: 268 proc.stdin.write(frame.tobytes()) 269
271 # if mps device (Apple Silicon), disable batched conds to avoid black images with groupnorm hack 272 try: 273 if model.load_device.type == "mps": 274 model.model.memory_required = unlimited_memory_required 275 except Exception: 276 pass 277 # if img_encoder or camera_encoder present, inject apply_model to handle correctly 278 for motion_model in model.motion_models: 279 if (motion_model.model.img_encoder is not None) or (motion_model.model.camera_encoder is not None):
679 mask_strength = 1.0 680 if "mask_strength" in conds: 681 mask_strength = conds["mask_strength"] 682 mask = conds['mask'] 683 assert(mask.shape[1] == x_in.shape[2]) 684 assert(mask.shape[2] == x_in.shape[3]) 685 # make sure mask is capped at input_shape batch length to prevent 0 as dimension 686 mask = mask[:input_x.shape[0], area[2]:area[0] + area[2], area[3]:area[1] + area[3]] * mask_strength
680 if "mask_strength" in conds: 681 mask_strength = conds["mask_strength"] 682 mask = conds['mask'] 683 assert(mask.shape[1] == x_in.shape[2]) 684 assert(mask.shape[2] == x_in.shape[3]) 685 # make sure mask is capped at input_shape batch length to prevent 0 as dimension 686 mask = mask[:input_x.shape[0], area[2]:area[0] + area[2], area[3]:area[1] + area[3]] * mask_strength 687 mask = mask.unsqueeze(1).repeat(input_x.shape[0] // mask.shape[0], input_x.shape[1], 1, 1)
269 270 def try_mkdir(full_path: str): 271 try: 272 Path(full_path).mkdir() 273 except Exception: 274 pass 275 276 277 # register motion models folder(s)