107 num_patches = self.patch_embed.num_patches 108 109 self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) 110 self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) 111 assert num_register_tokens >= 0 112 self.register_tokens = ( 113 nn.Parameter(torch.zeros(1, num_register_tokens, embed_dim)) if num_register_tokens else None 114 )
203 mode="bicubic", 204 antialias=self.interpolate_antialias 205 ) 206 207 assert int(w0) == patch_pos_embed.shape[-2] 208 assert int(h0) == patch_pos_embed.shape[-1] 209 patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) 210 return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1).to(previous_dtype)
204 antialias=self.interpolate_antialias 205 ) 206 207 assert int(w0) == patch_pos_embed.shape[-2] 208 assert int(h0) == patch_pos_embed.shape[-1] 209 patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) 210 return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1).to(previous_dtype) 211
276 for i, blk in enumerate(self.blocks): 277 x = blk(x) 278 if i in blocks_to_take: 279 output.append(x) 280 assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found" 281 return output 282 283 def _get_intermediate_layers_chunked(self, x, n=1):
290 x = blk(x) 291 if i in blocks_to_take: 292 output.append(x) 293 i += 1 294 assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found" 295 return output 296 297 def get_intermediate_layers(
65 66 class MemEffAttention(Attention): 67 def forward(self, x: Tensor, attn_bias=None) -> Tensor: 68 if not XFORMERS_AVAILABLE: 69 assert attn_bias is None, "xFormers is required for nested tensors usage" 70 return super().forward(x) 71 72 B, N, C = x.shape
205 def forward_nested(self, x_list: List[Tensor]) -> List[Tensor]: 206 """ 207 x_list contains a list of tensors to nest together and run 208 """ 209 assert isinstance(self.attn, MemEffAttention) 210 211 if self.training and self.sample_drop_ratio > 0.0: 212
245 def forward(self, x_or_x_list): 246 if isinstance(x_or_x_list, Tensor): 247 return super().forward(x_or_x_list) 248 elif isinstance(x_or_x_list, list): 249 assert XFORMERS_AVAILABLE, "Please install xFormers for nested tensors usage" 250 return self.forward_nested(x_or_x_list) 251 else: 252 raise AssertionError
16 ops = comfy.ops.manual_cast 17 18 def make_2tuple(x): 19 if isinstance(x, tuple): 20 assert len(x) == 2 21 return x 22 23 assert isinstance(x, int)
19 if isinstance(x, tuple): 20 assert len(x) == 2 21 return x 22 23 assert isinstance(x, int) 24 return (x, x) 25 26
70 def forward(self, x: Tensor) -> Tensor: 71 _, _, H, W = x.shape 72 patch_H, patch_W = self.patch_size 73 74 assert H % patch_H == 0, f"Input image height {H} is not a multiple of patch height {patch_H}" 75 assert W % patch_W == 0, f"Input image width {W} is not a multiple of patch width: {patch_W}" 76 77 x = self.proj(x) # B C H W
71 _, _, H, W = x.shape 72 patch_H, patch_W = self.patch_size 73 74 assert H % patch_H == 0, f"Input image height {H} is not a multiple of patch height {patch_H}" 75 assert W % patch_W == 0, f"Input image width {W} is not a multiple of patch width: {patch_W}" 76 77 x = self.proj(x) # B C H W 78 H, W = x.size(2), x.size(3)
15 try: 16 from accelerate import init_empty_weights 17 from accelerate.utils import set_module_tensor_to_device 18 is_accelerate_available = True 19 except: 20 pass 21 22 class DownloadAndLoadDepthAnythingV2Model: 23 @classmethod