43 @PromptServer.instance.routes.get("/easyuse/reboot") 44 def reboot(request): 45 try: 46 sys.stdout.close_log() 47 except Exception as e: 48 pass 49 50 return os.execv(sys.executable, [sys.executable] + sys.argv) 51
46 sys.stdout.close_log() 47 except Exception as e: 48 pass 49 50 return os.execv(sys.executable, [sys.executable] + sys.argv) 51 52 # parse csv 53 @PromptServer.instance.routes.post("/easyuse/upload/csv")
52 Args: 53 tokens (Union[str, List[str]]): The tokens to be added. 54 """ 55 num_added_tokens = self.wrapped.add_tokens(tokens, *args, **kwargs) 56 assert num_added_tokens != 0, ( 57 f"The tokenizer already contains the token {tokens}. Please pass " 58 "a different `placeholder_token` that is not already in the " 59 "tokenizer." 60 ) 61 62 def get_token_info(self, token: str) -> dict: 63 """Get the information of a token, including its start and end index in
255 Args: 256 embeddings (List[dict]): A list of embedding to be check. 257 """ 258 names = [emb["name"] for emb in embeddings] 259 assert len(names) == len(set(names)), ( 260 "Found duplicated names in 'external_embeddings'. Name list: " f"'{names}'" 261 ) 262 263 def check_ids_overlap(self, embeddings): 264 """Check whether overlap exist in token ids of 'external_embeddings'.
270 ids_range.sort() # sort by 'start' 271 # check if 'end' has overlapping 272 for idx in range(len(ids_range) - 1): 273 name1, name2 = ids_range[idx][-1], ids_range[idx + 1][-1] 274 assert ids_range[idx][1] <= ids_range[idx + 1][0], ( 275 f"Found ids overlapping between embeddings '{name1}' " f"and '{name2}'." 276 ) 277 278 def add_embeddings(self, embeddings: Optional[Union[dict, List[dict]]]): 279 """Add external embeddings to this layer.
361 new_embedding.append(embedding[s_idx:e_idx]) 362 363 # check if the next embedding need to replace is valid 364 actually_ids_to_replace = [int(i) for i in input_ids[e_idx: e_idx + end - start]] 365 assert actually_ids_to_replace == target_ids_to_replace, ( 366 f"Invalid 'input_ids' in position: {s_idx} to {e_idx}. " 367 f"Expect '{target_ids_to_replace}' for embedding " 368 f"'{name}' but found '{actually_ids_to_replace}'." 369 ) 370 371 new_embedding.append(ext_emb) 372
391 will be used. Defaults to None. 392 393 input_ids: shape like [bz, LENGTH] or [LENGTH]. 394 """ 395 assert input_ids.ndim in [1, 2] 396 if input_ids.ndim == 1: 397 input_ids = input_ids.unsqueeze(0) 398
427 428 # TODO: support add tokens as dict, then we can load pretrained tokens. 429 """ 430 if initialize_tokens is not None: 431 assert len(initialize_tokens) == len( 432 placeholder_tokens 433 ), "placeholder_token should be the same length as initialize_token" 434 for ii in range(len(placeholder_tokens)): 435 tokenizer.add_placeholder_token(placeholder_tokens[ii], num_vec_per_token=num_vectors_per_token) 436
438 embedding_layer = text_encoder.text_model.embeddings.token_embedding 439 text_encoder.text_model.embeddings.token_embedding = EmbeddingLayerWithFixes(embedding_layer) 440 embedding_layer = text_encoder.text_model.embeddings.token_embedding 441 442 assert embedding_layer is not None, ( 443 "Do not support get embedding layer for current text encoder. " "Please check your configuration." 444 ) 445 initialize_embedding = [] 446 if initialize_tokens is not None: 447 for ii in range(len(placeholder_tokens)):
33 AssertionError: If the frequency tensor doesn't match the expected shape. 34 AssertionError: If the target tensor 'x' doesn't have the expected number of dimensions. 35 """ 36 ndim = x.ndim 37 assert 0 <= 1 < ndim 38 39 if isinstance(freqs_cis, tuple): 40 # freqs_cis: (cos, sin) in real space
38 39 if isinstance(freqs_cis, tuple): 40 # freqs_cis: (cos, sin) in real space 41 if head_first: 42 assert freqs_cis[0].shape == (x.shape[-2], x.shape[-1]), f'freqs_cis shape {freqs_cis[0].shape} does not match x shape {x.shape}' 43 shape = [d if i == ndim - 2 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)] 44 else: 45 assert freqs_cis[0].shape == (x.shape[1], x.shape[-1]), f'freqs_cis shape {freqs_cis[0].shape} does not match x shape {x.shape}'
41 if head_first: 42 assert freqs_cis[0].shape == (x.shape[-2], x.shape[-1]), f'freqs_cis shape {freqs_cis[0].shape} does not match x shape {x.shape}' 43 shape = [d if i == ndim - 2 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)] 44 else: 45 assert freqs_cis[0].shape == (x.shape[1], x.shape[-1]), f'freqs_cis shape {freqs_cis[0].shape} does not match x shape {x.shape}' 46 shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)] 47 return freqs_cis[0].view(*shape), freqs_cis[1].view(*shape) 48 else:
47 return freqs_cis[0].view(*shape), freqs_cis[1].view(*shape) 48 else: 49 # freqs_cis: values in complex space 50 if head_first: 51 assert freqs_cis.shape == (x.shape[-2], x.shape[-1]), f'freqs_cis shape {freqs_cis.shape} does not match x shape {x.shape}' 52 shape = [d if i == ndim - 2 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)] 53 else: 54 assert freqs_cis.shape == (x.shape[1], x.shape[-1]), f'freqs_cis shape {freqs_cis.shape} does not match x shape {x.shape}'
50 if head_first: 51 assert freqs_cis.shape == (x.shape[-2], x.shape[-1]), f'freqs_cis shape {freqs_cis.shape} does not match x shape {x.shape}' 52 shape = [d if i == ndim - 2 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)] 53 else: 54 assert freqs_cis.shape == (x.shape[1], x.shape[-1]), f'freqs_cis shape {freqs_cis.shape} does not match x shape {x.shape}' 55 shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)] 56 return freqs_cis.view(*shape) 57
121 factory_kwargs = {'device': device, 'dtype': dtype} 122 super().__init__() 123 self.dim = dim 124 self.num_heads = num_heads 125 assert self.dim % num_heads == 0, "self.kdim must be divisible by num_heads" 126 self.head_dim = self.dim // num_heads 127 assert self.head_dim % 8 == 0 and self.head_dim <= 128, "Only support head_dim <= 128 and divisible by 8" 128
123 self.dim = dim 124 self.num_heads = num_heads 125 assert self.dim % num_heads == 0, "self.kdim must be divisible by num_heads" 126 self.head_dim = self.dim // num_heads 127 assert self.head_dim % 8 == 0 and self.head_dim <= 128, "Only support head_dim <= 128 and divisible by 8" 128 129 self.Wqkv = nn.Linear(dim, 3 * dim, bias=qkv_bias, **factory_kwargs) 130 # TODO: eps should be 1 / 65530 if using fp16
153 154 # Apply RoPE if needed 155 if freqs_cis_img is not None: 156 qq, kk = apply_rotary_emb(q, k, freqs_cis_img) 157 assert qq.shape == q.shape and kk.shape == k.shape, f'qq: {qq.shape}, q: {q.shape}, kk: {kk.shape}, k: {k.shape}' 158 q, k = qq, kk 159 160 qkv = torch.stack([q, k, v], dim=2) # [b, s, 3, h, d]
187 super().__init__() 188 self.qdim = qdim 189 self.kdim = kdim 190 self.num_heads = num_heads 191 assert self.qdim % num_heads == 0, "self.qdim must be divisible by num_heads" 192 self.head_dim = self.qdim // num_heads 193 assert self.head_dim % 8 == 0 and self.head_dim <= 128, "Only support head_dim <= 128 and divisible by 8" 194
189 self.kdim = kdim 190 self.num_heads = num_heads 191 assert self.qdim % num_heads == 0, "self.qdim must be divisible by num_heads" 192 self.head_dim = self.qdim // num_heads 193 assert self.head_dim % 8 == 0 and self.head_dim <= 128, "Only support head_dim <= 128 and divisible by 8" 194 195 self.scale = self.head_dim ** -0.5 196
227 228 # Apply RoPE if needed 229 if freqs_cis_img is not None: 230 qq, _ = apply_rotary_emb(q, None, freqs_cis_img) 231 assert qq.shape == q.shape, f'qq: {qq.shape}, q: {q.shape}' 232 q = qq # [b, s1, h, d] 233 kv = torch.stack([k, v], dim=2) # [b, s1, 2, h, d] 234 context = self.inner_attn(q, kv) # [b, s1, h, d]
262 super().__init__() 263 self.qdim = qdim 264 self.kdim = kdim 265 self.num_heads = num_heads 266 assert self.qdim % num_heads == 0, "self.qdim must be divisible by num_heads" 267 self.head_dim = self.qdim // num_heads 268 assert self.head_dim % 8 == 0 and self.head_dim <= 128, "Only support head_dim <= 128 and divisible by 8" 269 self.scale = self.head_dim ** -0.5
264 self.kdim = kdim 265 self.num_heads = num_heads 266 assert self.qdim % num_heads == 0, "self.qdim must be divisible by num_heads" 267 self.head_dim = self.qdim // num_heads 268 assert self.head_dim % 8 == 0 and self.head_dim <= 128, "Only support head_dim <= 128 and divisible by 8" 269 self.scale = self.head_dim ** -0.5 270 271 self.q_proj = nn.Linear(qdim, qdim, bias=qkv_bias, **factory_kwargs)
300 301 # Apply RoPE if needed 302 if freqs_cis_img is not None: 303 qq, _ = apply_rotary_emb(q, None, freqs_cis_img) 304 assert qq.shape == q.shape, f'qq: {qq.shape}, q: {q.shape}' 305 q = qq 306 307 q = q * self.scale
332 ): 333 super().__init__() 334 self.dim = dim 335 self.num_heads = num_heads 336 assert self.dim % num_heads == 0, 'dim should be divisible by num_heads' 337 self.head_dim = self.dim // num_heads 338 # This assertion is aligned with flash attention 339 assert self.head_dim % 8 == 0 and self.head_dim <= 128, "Only support head_dim <= 128 and divisible by 8"
335 self.num_heads = num_heads 336 assert self.dim % num_heads == 0, 'dim should be divisible by num_heads' 337 self.head_dim = self.dim // num_heads 338 # This assertion is aligned with flash attention 339 assert self.head_dim % 8 == 0 and self.head_dim <= 128, "Only support head_dim <= 128 and divisible by 8" 340 self.scale = self.head_dim ** -0.5 341 342 # qkv --> Wqkv
357 358 # Apply RoPE if needed 359 if freqs_cis_img is not None: 360 qq, kk = apply_rotary_emb(q, k, freqs_cis_img, head_first=True) 361 assert qq.shape == q.shape and kk.shape == k.shape, \ 362 f'qq: {qq.shape}, q: {q.shape}, kk: {kk.shape}, k: {k.shape}' 363 q, k = qq, kk 364 365 q = q * self.scale
419 """ 420 c = self.unpatchify_channels 421 p = self.x_embedder.patch_size[0] 422 # h = w = int(x.shape[1] ** 0.5) 423 assert h * w == x.shape[1] 424 425 x = x.reshape(shape=(x.shape[0], h, w, p, p, c)) 426 x = torch.einsum('nhwpqc->nchpwq', x)
80 return pos_embed 81 82 83 def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): 84 assert embed_dim % 2 == 0 85 86 # use half of dimensions to encode grid_h 87 emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
96 embed_dim: output dimension for each position 97 pos: a list of positions to be encoded: size (W,H) 98 out: (M, D) 99 """ 100 assert embed_dim % 2 == 0 101 omega = np.arange(embed_dim // 2, dtype=np.float64) 102 omega /= embed_dim / 2. 103 omega = 1. / 10000**omega # (D/2,)
142 return pos_embed 143 144 145 def get_2d_rotary_pos_embed_from_grid(embed_dim, grid, use_real=False): 146 assert embed_dim % 4 == 0 147 148 # use half of dimensions to encode grid_h 149 emb_h = get_1d_rotary_pos_embed(embed_dim // 2, grid[0].reshape(-1), use_real=use_real) # (H*W, D/4)
189 """ 190 c = self.out_channels 191 p = self.x_embedder.patch_size[0] 192 h = w = int(x.shape[1] ** 0.5) 193 assert h * w == x.shape[1] 194 195 x = x.reshape(shape=(x.shape[0], h, w, p, p, c)) 196 x = torch.einsum('nhwpqc->nchpwq', x)
218 return pos_embed.astype(np.float32) 219 220 221 def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): 222 assert embed_dim % 2 == 0 223 224 # use half of dimensions to encode grid_h 225 emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
234 embed_dim: output dimension for each position 235 pos: a list of positions to be encoded: size (M,) 236 out: (M, D) 237 """ 238 assert embed_dim % 2 == 0 239 omega = np.arange(embed_dim // 2, dtype=np.float64) 240 omega /= embed_dim / 2. 241 omega = 1. / 10000 ** omega # (D/2,)
264 imgs: (N, H, W, C) 265 """ 266 c = self.out_channels 267 p = self.x_embedder.patch_size[0] 268 assert self.h * self.w == x.shape[1] 269 270 x = x.reshape(shape=(x.shape[0], self.h, self.w, p, p, c)) 271 x = torch.einsum('nhwpqc->nchpwq', x)
36 37 class MultiHeadCrossAttention(nn.Module): 38 def __init__(self, d_model, num_heads, attn_drop=0., proj_drop=0., **block_kwargs): 39 super(MultiHeadCrossAttention, self).__init__() 40 assert d_model % num_heads == 0, "d_model must be divisible by num_heads" 41 42 self.d_model = d_model 43 self.num_heads = num_heads
367 368 def forward(self, s, bs): 369 if s.ndim == 1: 370 s = s[:, None] 371 assert s.ndim == 2 372 if s.shape[0] != bs: 373 s = s.repeat(bs//s.shape[0], 1) 374 assert s.shape[0] == bs
370 s = s[:, None] 371 assert s.ndim == 2 372 if s.shape[0] != bs: 373 s = s.repeat(bs//s.shape[0], 1) 374 assert s.shape[0] == bs 375 b, dims = s.shape[0], s.shape[1] 376 s = rearrange(s, "b d -> (b d)") 377 s_freq = self.timestep_embedding(s, self.frequency_embedding_size)
434 return caption 435 436 def forward(self, caption, train, force_drop_ids=None): 437 if train: 438 assert caption.shape[2:] == self.y_embedding.shape 439 use_dropout = self.uncond_prob > 0 440 if (train and use_dropout) or (force_drop_ids is not None): 441 caption = self.token_drop(caption, force_drop_ids)
467 caption = torch.where(drop_ids[:, None, None, None], self.y_embedding, caption) 468 return global_caption, caption 469 470 def forward(self, caption, train, force_drop_ids=None): 471 assert caption.shape[2: ] == self.y_embedding.shape 472 global_caption = caption.mean(dim=2).squeeze() 473 use_dropout = self.uncond_prob > 0 474 if (train and use_dropout) or (force_drop_ids is not None):
187 imgs: (N, H, W, C) 188 """ 189 c = self.out_channels 190 p = self.x_embedder.patch_size[0] 191 assert self.h * self.w == x.shape[1] 192 193 x = x.reshape(shape=(x.shape[0], self.h, self.w, p, p, c)) 194 x = torch.einsum('nhwpqc->nchpwq', x)
15 to_1tuple = _ntuple(1) 16 to_2tuple = _ntuple(2) 17 18 def set_grad_checkpoint(model, use_fp32_attention=False, gc_step=1): 19 assert isinstance(model, nn.Module) 20 21 def set_attr(module): 22 module.grad_checkpointing = True
29 return torch.float8_e4m3fn 30 else: 31 raise NotImplementedError(f"Unknown 8bit dtype '{s}'") 32 elif "bnb" in s: 33 assert s in ["bnb8bit", "bnb4bit"], f"Unknown bnb mode '{s}'" 34 return s 35 elif s is None: 36 return None
76 for obj in (mean1, logvar1, mean2, logvar2): 77 if isinstance(obj, torch.Tensor): 78 tensor = obj 79 break 80 assert tensor is not None, "at least one argument must be a Tensor" 81 82 # Force variances to be Tensors. Broadcasting helps convert scalars to 83 # Tensors, but it does not work for torch.exp().
40 sname = self.m_name2s_name[key] 41 shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) 42 shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key])) 43 else: 44 assert not key in self.m_name2s_name 45 46 def copy_to(self, model): 47 m_param = dict(model.named_parameters())
49 for key in m_param: 50 if m_param[key].requires_grad: 51 m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) 52 else: 53 assert not key in self.m_name2s_name 54 55 def store(self, parameters): 56 """
29 self.image_key = image_key 30 self.encoder = Encoder(**ddconfig) 31 self.decoder = Decoder(**ddconfig) 32 self.loss = instantiate_from_config(lossconfig) 33 assert ddconfig["double_z"] 34 self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) 35 self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) 36 self.embed_dim = embed_dim
38 self.test = test 39 self.test_args = test_args 40 self.logdir = logdir 41 if colorize_nlabels is not None: 42 assert type(colorize_nlabels)==int 43 self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) 44 if monitor is not None: 45 self.monitor = monitor
69 if self.test_args.save_reconstruction: 70 os.makedirs(self.root_dec, exist_ok=True) 71 if self.test_args.save_input: 72 os.makedirs(self.root_inputs, exist_ok=True) 73 assert(self.test_args is not None) 74 self.test_maximum = getattr(self.test_args, 'test_maximum', None) 75 self.count = 0 76 self.eval_metrics = {}
182 if not only_inputs: 183 xrec, posterior = self(x) 184 if x.shape[1] > 3: 185 # colorize with random projection 186 assert xrec.shape[1] > 3 187 x = self.to_rgb(x) 188 xrec = self.to_rgb(xrec) 189 log["samples"] = self.decode(torch.randn_like(posterior.sample()))
191 log["inputs"] = x 192 return log 193 194 def to_rgb(self, x): 195 assert self.image_key == "segmentation" 196 if not hasattr(self, "colorize"): 197 self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) 198 x = F.conv2d(x, weight=self.colorize)
65 logvar_init=0., 66 rescale_betas_zero_snr=False, 67 ): 68 super().__init__() 69 assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' 70 self.parameterization = parameterization 71 mainlogger.info(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") 72 self.cond_stage_model = None
127 timesteps, = betas.shape 128 self.num_timesteps = int(timesteps) 129 self.linear_start = linear_start 130 self.linear_end = linear_end 131 assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' 132 133 to_torch = partial(torch.tensor, dtype=torch.float32) 134
172 raise NotImplementedError("mu not supported") 173 # TODO how to choose this term 174 lvlb_weights[0] = lvlb_weights[1] 175 self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) 176 assert not torch.isnan(self.lvlb_weights).all() 177 178 @contextmanager 179 def ema_scope(self, context=None):
379 perframe_ae=False, 380 *args, **kwargs): 381 self.num_timesteps_cond = default(num_timesteps_cond, 1) 382 self.scale_by_std = scale_by_std 383 assert self.num_timesteps_cond <= kwargs['timesteps'] 384 # for backwards compatibility after implementation of DiffusionWrapper 385 ckpt_path = kwargs.pop("ckpt_path", None) 386 ignore_keys = kwargs.pop("ignore_keys", [])
417 self.clip_denoised = False 418 419 self.cond_stage_forward = cond_stage_forward 420 self.encoder_type = encoder_type 421 assert(encoder_type in ["2d", "3d"]) 422 self.uncond_prob = uncond_prob 423 self.classifier_free_guidance = True if uncond_prob > 0 else False 424 assert(uncond_type in ["zero_embed", "empty_seq"])
420 self.encoder_type = encoder_type 421 assert(encoder_type in ["2d", "3d"]) 422 self.uncond_prob = uncond_prob 423 self.classifier_free_guidance = True if uncond_prob > 0 else False 424 assert(uncond_type in ["zero_embed", "empty_seq"]) 425 self.uncond_type = uncond_type 426 427 self.restarted_from_ckpt = False
461 c = c.mode() 462 else: 463 c = self.cond_stage_model(c) 464 else: 465 assert hasattr(self.cond_stage_model, self.cond_stage_forward) 466 c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) 467 return c 468
584 t_in = t 585 model_out = self.apply_model(x, t_in, c, **kwargs) 586 587 if score_corrector is not None: 588 assert self.parameterization == "eps" 589 model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) 590 591 if self.parameterization == "eps":
649 650 iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(range(0, timesteps)) 651 652 if mask is not None: 653 assert x0 is not None 654 assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match 655 656 for i in iterator:
650 iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(range(0, timesteps)) 651 652 if mask is not None: 653 assert x0 is not None 654 assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match 655 656 for i in iterator: 657 ts = torch.full((b,), i, device=device, dtype=torch.long)
655 656 for i in iterator: 657 ts = torch.full((b,), i, device=device, dtype=torch.long) 658 if self.shorten_cond_schedule: 659 assert self.model.conditioning_key != 'hybrid' 660 tc = self.cond_ids[ts].to(cond.device) 661 cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) 662
718 elif self.conditioning_key == 'adm': 719 cc = c_crossattn[0] 720 out = self.diffusion_model(x, t, y=cc) 721 elif self.conditioning_key == 'hybrid-adm': 722 assert c_adm is not None 723 xc = torch.cat([x] + c_concat, dim=1) 724 cc = torch.cat(c_crossattn, 1) 725 out = self.diffusion_model(xc, t, context=cc, y=c_adm, **kwargs)
723 xc = torch.cat([x] + c_concat, dim=1) 724 cc = torch.cat(c_crossattn, 1) 725 out = self.diffusion_model(xc, t, context=cc, y=c_adm, **kwargs) 726 elif self.conditioning_key == 'hybrid-time': 727 assert s is not None 728 xc = torch.cat([x] + c_concat, dim=1) 729 cc = torch.cat(c_crossattn, 1) 730 out = self.diffusion_model(xc, t, context=cc, s=s)
747 xc = x 748 out = self.diffusion_model(xc, t, context=cc, y=s, mask=mask) 749 elif self.conditioning_key == 'hybrid-time-adm': # adm means y, e.g., class index 750 # assert s is not None 751 assert c_adm is not None 752 xc = torch.cat([x] + c_concat, dim=1) 753 cc = torch.cat(c_crossattn, 1) 754 out = self.diffusion_model(xc, t, context=cc, s=s, y=c_adm)
752 xc = torch.cat([x] + c_concat, dim=1) 753 cc = torch.cat(c_crossattn, 1) 754 out = self.diffusion_model(xc, t, context=cc, s=s, y=c_adm) 755 elif self.conditioning_key == 'crossattn-adm': 756 assert c_adm is not None 757 cc = torch.cat(c_crossattn, 1) 758 out = self.diffusion_model(x, t, context=cc, y=c_adm) 759 else:
24 def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): 25 self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, 26 num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) 27 alphas_cumprod = self.model.alphas_cumprod 28 assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' 29 to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) 30 31 if self.model.use_dynamic_rescale:
171 ts = torch.full((b,), step, device=device, dtype=torch.long) 172 173 ## use mask to blend noised original latent (img_orig) & new sampled latent (img) 174 if mask is not None: 175 assert x0 is not None 176 if clean_cond: 177 img_orig = x0 178 else:
233 else: 234 e_t = model_output 235 236 if score_corrector is not None: 237 assert self.model.parameterization == "eps", 'not implemented' 238 e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) 239 240 alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
24 def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): 25 self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, 26 num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) 27 alphas_cumprod = self.model.alphas_cumprod 28 assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' 29 to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) 30 31 if self.model.use_dynamic_rescale:
175 ts = torch.full((b,), step, device=device, dtype=torch.long) 176 177 ## use mask to blend noised original latent (img_orig) & new sampled latent (img) 178 if mask is not None: 179 assert x0 is not None 180 if clean_cond: 181 img_orig = x0 182 else:
240 else: 241 e_t = model_output 242 243 if score_corrector is not None: 244 assert self.model.parameterization == "eps", 'not implemented' 245 e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) 246 247 alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
99 if schedule == 'discrete': 100 if betas is not None: 101 log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0) 102 else: 103 assert alphas_cumprod is not None 104 log_alphas = 0.5 * torch.log(alphas_cumprod) 105 self.total_N = len(log_alphas) 106 self.T = 1.
326 t_continuous = t_continuous.expand((x.shape[0])) 327 if guidance_type == "uncond": 328 return noise_pred_fn(x, t_continuous) 329 elif guidance_type == "classifier": 330 assert classifier_fn is not None 331 t_input = get_model_input_time(t_continuous) 332 cond_grad = cond_grad_fn(x, t_input) 333 sigma_t = noise_schedule.marginal_std(t_continuous)
342 c_in = torch.cat([unconditional_condition, condition]) 343 noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2) 344 return noise_uncond + guidance_scale * (noise - noise_uncond) 345 346 assert model_type in ["noise", "x_start", "v"] 347 assert guidance_type in ["uncond", "classifier", "classifier-free"] 348 return model_fn 349
343 noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2) 344 return noise_uncond + guidance_scale * (noise - noise_uncond) 345 346 assert model_type in ["noise", "x_start", "v"] 347 assert guidance_type in ["uncond", "classifier", "classifier-free"] 348 return model_fn 349 350
469 t = t.view(-1) 470 if 'bh' in self.variant: 471 return self.multistep_uni_pc_bh_update(x, model_prev_list, t_prev_list, t, order, **kwargs) 472 else: 473 assert self.variant == 'vary_coeff' 474 return self.multistep_uni_pc_vary_update(x, model_prev_list, t_prev_list, t, order, **kwargs) 475 476 def multistep_uni_pc_vary_update(self, x, model_prev_list, t_prev_list, t, order, use_corrector=True):
475 476 def multistep_uni_pc_vary_update(self, x, model_prev_list, t_prev_list, t, order, use_corrector=True): 477 print(f'using unified predictor-corrector with order {order} (solver type: vary coeff)') 478 ns = self.noise_schedule 479 assert order <= len(model_prev_list) 480 481 # first compute rks 482 t_prev_0 = t_prev_list[-1]
578 579 def multistep_uni_pc_bh_update(self, x, model_prev_list, t_prev_list, t, order, x_t=None, use_corrector=True): 580 print(f'using unified predictor-corrector with order {order} (solver type: B(h))') 581 ns = self.noise_schedule 582 assert order <= len(model_prev_list) 583 dims = x.dim() 584 585 # first compute rks
704 t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end 705 t_T = self.noise_schedule.T if t_start is None else t_start 706 device = x.device 707 if method == 'multistep': 708 assert steps >= order 709 timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device) 710 assert timesteps.shape[0] - 1 == steps 711 with torch.no_grad():
706 device = x.device 707 if method == 'multistep': 708 assert steps >= order 709 timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device) 710 assert timesteps.shape[0] - 1 == steps 711 with torch.no_grad(): 712 vec_t = timesteps[0].expand((x.shape[0])) 713 model_prev_list = [self.model_fn(x, vec_t)]
80 ) 81 82 self.relative_position = relative_position 83 if self.relative_position: 84 assert(temporal_length is not None) 85 self.relative_position_k = RelativePosition(num_units=dim_head, max_relative_position=temporal_length) 86 self.relative_position_v = RelativePosition(num_units=dim_head, max_relative_position=temporal_length) 87 else:
604 else: 605 self.proj_in = operations.Linear(in_channels, inner_dim, device=device, dtype=dtype) 606 607 if relative_position: 608 assert(temporal_length is not None) 609 attention_cls = partial(CrossAttention, relative_position=True, temporal_length=temporal_length, device=device, dtype=dtype) 610 else: 611 attention_cls = partial(CrossAttention, temporal_length=temporal_length, device=device, dtype=dtype)
609 attention_cls = partial(CrossAttention, relative_position=True, temporal_length=temporal_length, device=device, dtype=dtype) 610 else: 611 attention_cls = partial(CrossAttention, temporal_length=temporal_length, device=device, dtype=dtype) 612 if self.causal_attention: 613 assert(temporal_length is not None) 614 self.mask = torch.tril(torch.ones([1, temporal_length, temporal_length])) 615 616 if self.only_self_att:
96 97 def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77, 98 freeze=True, layer="last", layer_idx=None): # clip-vit-base-patch32 99 super().__init__() 100 assert layer in self.LAYERS 101 self.tokenizer = CLIPTokenizer.from_pretrained(version) 102 self.transformer = CLIPTextModel.from_pretrained(version) 103 self.device = device
106 self.freeze() 107 self.layer = layer 108 self.layer_idx = layer_idx 109 if layer == "hidden": 110 assert layer_idx is not None 111 assert 0 <= abs(layer_idx) <= 12 112 113 def freeze(self):
107 self.layer = layer 108 self.layer_idx = layer_idx 109 if layer == "hidden": 110 assert layer_idx is not None 111 assert 0 <= abs(layer_idx) <= 12 112 113 def freeze(self): 114 self.transformer = self.transformer.eval()
183 184 def __init__(self, arch="ViT-H-14", version="laion2b_s32b_b79k", device="cuda", max_length=77, 185 freeze=True, layer="last"): 186 super().__init__() 187 assert layer in self.LAYERS 188 model, _, _ = open_clip.create_model_and_transforms(arch, device=torch.device('cpu'), pretrained=version) 189 del model.visual 190 self.model = model
101 102 return x+h_ 103 104 def make_attn(in_channels, attn_type="vanilla", device=None, dtype=None): 105 assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown' 106 #print(f"making attention of type '{attn_type}' with {in_channels} in_channels") 107 if attn_type == "vanilla": 108 return AttnBlock(in_channels, device=device, dtype=dtype)
165 Build sinusoidal embeddings. 166 This matches the implementation in tensor2tensor, but differs slightly 167 from the description in Section 3.5 of "Attention Is All You Need". 168 """ 169 assert len(timesteps.shape) == 1 170 171 half_dim = embedding_dim // 2 172 emb = math.log(10000) / (half_dim - 1)
415 # assume aligned context, cat along channel axis 416 x = torch.cat((x, context), dim=1) 417 if self.use_timestep: 418 # timestep embedding 419 assert t is not None 420 temb = get_timestep_embedding(t, self.ch) 421 temb = self.temb.dense[0](temb) 422 temb = nonlinearity(temb)
914 915 class Upsampler(nn.Module): 916 def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2, device=None, dtype=None, operations=ops): 917 super().__init__() 918 assert out_size >= in_size 919 num_blocks = int(np.log2(out_size//in_size))+1 920 factor_up = 1.+ (out_size % in_size) 921 print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}")
938 self.mode = mode 939 if self.with_conv: 940 print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode") 941 raise NotImplementedError() 942 assert in_channels is not None 943 # no asymmetric padding in torch conv, must do it ourselves 944 self.conv = operations.Conv2d(in_channels, 945 in_channels,
968 dtype=None, 969 operations=ops): 970 super().__init__() 971 if pretrained_config is None: 972 assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' 973 self.pretrained_model = pretrained_model 974 else: 975 assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None'
971 if pretrained_config is None: 972 assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' 973 self.pretrained_model = pretrained_model 974 else: 975 assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' 976 self.instantiate_pretrained(pretrained_config) 977 978 self.do_reshape = reshape
77 self.op = operations.conv_nd( 78 dims, self.channels, self.out_channels, 3, stride=stride, padding=padding 79 ) 80 else: 81 assert self.channels == self.out_channels 82 self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) 83 84 def forward(self, x):
81 assert self.channels == self.out_channels 82 self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) 83 84 def forward(self, x): 85 assert x.shape[1] == self.channels 86 return self.op(x) 87 88 class Upsample(nn.Module):
103 if use_conv: 104 self.conv = operations.conv_nd(dims, self.channels, self.out_channels, 3, padding=padding, dtype=dtype, device=device) 105 106 def forward(self, x): 107 assert x.shape[1] == self.channels 108 if self.dims == 3: 109 x = F.interpolate(x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode='nearest') 110 else:
423 operations=ops 424 ): 425 super(UNetModel, self).__init__() 426 if num_heads == -1: 427 assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' 428 if num_head_channels == -1: 429 assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' 430
425 super(UNetModel, self).__init__() 426 if num_heads == -1: 427 assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' 428 if num_head_channels == -1: 429 assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' 430 431 self.in_channels = in_channels 432 self.model_channels = model_channels
781 patch = transformer_patches["input_block_patch_after_skip"] 782 for p in patch: 783 h = p(h, transformer_options) 784 if features_adapter is not None: 785 assert len(features_adapter)==adapter_idx, 'Wrong features_adapter' 786 transformer_options["block"] = ("middle", 0) 787 h = forward_timestep_embed( 788 self.middle_block,
404 self.has_pos_emb = position_infused_attn 405 self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None 406 self.rotary_pos_emb = always(None) 407 408 assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance' 409 self.rel_pos = None 410 411 self.pre_norm = pre_norm
433 if exists(custom_layers): 434 layer_types = custom_layers 435 elif exists(par_ratio): 436 par_depth = depth * len(default_block) 437 assert 1 < par_ratio <= par_depth, 'par ratio out of range' 438 default_block = tuple(filter(not_equals('f'), default_block)) 439 par_attn = par_depth // par_ratio 440 depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
438 default_block = tuple(filter(not_equals('f'), default_block)) 439 par_attn = par_depth // par_ratio 440 depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper 441 par_width = (depth_cut + depth_cut // par_attn) // par_attn 442 assert len(default_block) <= par_width, 'default block is too large for par_ratio' 443 par_block = default_block + ('f',) * (par_width - len(default_block)) 444 par_head = par_block * par_attn 445 layer_types = par_head + ('f',) * (par_depth - len(par_head))
443 par_block = default_block + ('f',) * (par_width - len(default_block)) 444 par_head = par_block * par_attn 445 layer_types = par_head + ('f',) * (par_depth - len(par_head)) 446 elif exists(sandwich_coef): 447 assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth' 448 layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef 449 else: 450 layer_types = default_block * depth
538 539 540 class Encoder(AttentionLayers): 541 def __init__(self, **kwargs): 542 assert 'causal' not in kwargs, 'cannot set causality on encoder' 543 super().__init__(causal=False, **kwargs) 544 545
558 tie_embedding=False, 559 use_pos_emb=True 560 ): 561 super().__init__() 562 assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' 563 564 dim = attn_layers.dim 565 emb_dim = default(emb_dim, dim)
4 import numpy as np 5 import comfy.utils, comfy.sample, comfy.samplers, comfy.controlnet, comfy.model_base, comfy.model_management 6 try: 7 import comfy.sampler_helpers, comfy.supported_models 8 except: 9 pass 10 from comfy.sd import CLIP, VAE 11 from comfy.model_patcher import ModelPatcher 12 from comfy_extras.chainner_models import model_loading
392 def INPUT_TYPES(s): 393 max_float_value = 1.95 394 prompt_path = os.path.join(RESOURCES_DIR, 'portrait_prompt.json') 395 if not os.path.exists(prompt_path): 396 response = urlopen('https://raw.githubusercontent.com/yolain/ComfyUI-Easy-Use/main/resources/portrait_prompt.json') 397 temp_prompt = json.loads(response.read()) 398 prompt_serialized = json.dumps(temp_prompt, indent=4) 399 with open(prompt_path, "w") as f:
1966 1967 def hydit_pipeloader(self, ckpt_name, model_name, vae_name, clip_name, mt5_name, device, dtype, resolution, empty_latent_width, empty_latent_height, positive, negative, batch_size, prompt=None, my_unique_id=None): 1968 dtype = string_to_dtype(dtype, "text_encoder") 1969 if device == "cpu": 1970 assert dtype in [None, torch.float32, 1971 torch.bfloat16], f"Can't use dtype '{dtype}' with CPU! Set dtype to 'default' or 'bf16'." 1972 1973 # Clean models from loaded_objects 1974 easyCache.update_loaded_objects(prompt)
27 def flip_back(output_flipped, matched_parts): 28 ''' 29 ouput_flipped: numpy.ndarray(batch_size, num_joints, height, width) 30 ''' 31 assert output_flipped.ndim == 4,\ 32 'output_flipped should be [batch_size, num_joints, height, width]' 33 34 output_flipped = output_flipped[:, :, :, ::-1] 35
22 23 24 class VAEEncodeArgMax(VAEEncode): 25 def encode(self, vae, pixels): 26 assert isinstance( 27 vae.first_stage_model, AutoencoderKL 28 ), "ArgMax only supported for AutoencoderKL" 29 original_sample_mode = vae.first_stage_model.regularization.sample 30 vae.first_stage_model.regularization.sample = False 31 ret = super().encode(vae, pixels)
143 144 def apply(self, ic_model_path, model: ModelPatcher, c_concat: dict, ic_model=None) -> Tuple[ModelPatcher]: 145 try: 146 ModelPatcher.calculate_weight = calculate_weight_adjust_channel(ModelPatcher.calculate_weight) 147 except: 148 pass 149 150 device = comfy.model_management.get_torch_device() 151 dtype = comfy.model_management.unet_dtype()
1015 if history[0]["role"] == "system" and "tools" in history[0]: 1016 content = "\n".join(content.split("\n")[1:-1]) 1017 def tool_call(**kwargs): 1018 return kwargs 1019 parameters = eval(content) 1020 content = {"name": metadata.strip(), "parameters": parameters} 1021 else: 1022 content = {"name": metadata.strip(), "content": content}
66 67 def compress_int4_weight(weight: torch.Tensor): # (n, m) 68 with torch.cuda.device(weight.device): 69 n, m = weight.size(0), weight.size(1) 70 assert m % 2 == 0 71 m = m // 2 72 out = torch.empty(n, m, dtype=torch.int8, device="cuda") 73 stream = torch.cuda.current_stream()
85 return out 86 87 88 def extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, source_bit_width: int): 89 assert scale_list.dtype in [torch.half, torch.bfloat16] 90 assert weight.dtype in [torch.int8] 91 if source_bit_width == 8: 92 return weight.to(scale_list.dtype) * scale_list[:, None]
86 87 88 def extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, source_bit_width: int): 89 assert scale_list.dtype in [torch.half, torch.bfloat16] 90 assert weight.dtype in [torch.int8] 91 if source_bit_width == 8: 92 return weight.to(scale_list.dtype) * scale_list[:, None] 93 elif source_bit_width == 4:
94 func = ( 95 kernels.int4WeightExtractionHalf if scale_list.dtype == torch.half else kernels.int4WeightExtractionBFloat16 96 ) 97 else: 98 assert False, "Unsupported bit-width" 99 100 with torch.cuda.device(weight.device): 101 n, m = weight.size(0), weight.size(1)
9 10 class SPTokenizer: 11 def __init__(self, model_path: str): 12 # reload tokenizer 13 assert os.path.isfile(model_path), model_path 14 self.sp_model = SentencePieceProcessor(model_file=model_path) 15 16 # BOS / EOS token IDs
17 self.n_words: int = self.sp_model.vocab_size() 18 self.bos_id: int = self.sp_model.bos_id() 19 self.eos_id: int = self.sp_model.eos_id() 20 self.pad_id: int = self.sp_model.unk_id() 21 assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() 22 23 role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"] 24 special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens
45 else: 46 return self.sp_model.EncodeAsPieces(s) 47 48 def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]: 49 assert type(s) is str 50 t = self.sp_model.encode(s) 51 if bos: 52 t = [self.bos_id] + t
110 111 def get_command(self, token): 112 if token in self.special_tokens: 113 return self.special_tokens[token] 114 assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}" 115 return self.tokenizer.special_tokens[token] 116 117 @property
190 prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")] 191 return prefix_tokens 192 193 def build_single_message(self, role, metadata, message): 194 assert role in ["system", "user", "assistant", "observation"], role 195 role_tokens = [self.get_command(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n") 196 message_tokens = self.tokenizer.encode(message) 197 tokens = role_tokens + message_tokens
266 return_attention_mask: 267 (optional) Set to False to avoid returning attention mask (default: set to model specifics) 268 """ 269 # Load from model defaults 270 assert self.padding_side == "left" 271 272 required_input = encoded_inputs[self.model_input_names[0]] 273 seq_length = len(required_input)
102 try: 103 from accelerate import init_empty_weights 104 from accelerate.utils import set_module_tensor_to_device 105 is_accelerate_available = True 106 except: 107 pass 108 109 from contextlib import nullcontext 110 with (init_empty_weights() if is_accelerate_available else nullcontext()):
12 # Function to randomly select an option from the brackets 13 14 def choose_random_option(match): 15 options = match.group(1).split('|') 16 return random.choice(options) 17 18 prompt = re.sub(r'\{([^{}]*)\}', choose_random_option, prompt) 19
59 image = image.movedim(-1, 1) 60 61 try: 62 ModelPatcher.calculate_weight = calculate_weight_adjust_channel(ModelPatcher.calculate_weight) 63 except: 64 pass 65 66 if method in [LayerMethod.FG_ONLY_CONV, LayerMethod.FG_ONLY_ATTN] and sd_version == 'sd1': 67 self.frames = 1
305 def decode_pixel( 306 self, pixel: torch.TensorType, latent: torch.TensorType 307 ) -> torch.TensorType: 308 # pixel.shape = [B, C=3, H, W] 309 assert pixel.shape[1] == 3 310 pixel_device = pixel.device 311 pixel_dtype = pixel.dtype 312
314 latent = latent.to(device=self.load_device, dtype=self.dtype) 315 # y.shape = [B, C=4, H, W] 316 y = self.estimate_augmented(pixel, latent) 317 y = y.clip(0, 1) 318 assert y.shape[1] == 4 319 # Restore image to original device of input image. 320 return y.to(pixel_device, dtype=pixel_dtype) 321
68 show_index: bool = False, 69 follow_symlinks: bool = False, 70 append_version: bool = False, 71 ) -> web.AbstractResource: 72 assert prefix.startswith("/") 73 if prefix.endswith("/"): 74 prefix = prefix[:-1] 75 resource = LimitResource(
44 eps (float): A small value added to the variance to avoid 45 divide-by-zero. Default: 1e-5. 46 """ 47 size = feat.size() 48 assert len(size) == 4, 'The input feature should be 4D tensor.' 49 b, c = size[:2] 50 feat_var = feat.view(b, c, -1).var(dim=2) + eps 51 feat_std = feat_var.sqrt().view(b, c, 1, 1)
59 mimic_scale = self.interpret_scale(self.mimic_scale, self.mimic_mode, self.mimic_scale_min) 60 cfg_scale = self.interpret_scale(cfg_scale, self.cfg_mode, self.cfg_scale_min) 61 # uncond shape is (batch, 4, height, width) 62 conds_per_batch = cond.shape[0] / uncond.shape[0] 63 assert conds_per_batch == int(conds_per_batch), "Expected # of conds per batch to be constant across batches" 64 cond_stacked = cond.reshape((-1, int(conds_per_batch)) + uncond.shape[1:]) 65 66 ### Normal first part of the CFG Scale logic, basically
117 elif self == ResizeMode.INNER_FIT: 118 return 1 119 elif self == ResizeMode.OUTER_FIT: 120 return 2 121 assert False, "NOTREACHED" 122 123 124
581 from comfy.sd3_clip import SD3Tokenizer, SD3ClipModel 582 import copy 583 584 clip = sd3_clip.clone() 585 assert clip.cond_stage_model.t5xxl is not None, "CLIP must have T5 loaded!" 586 587 # remove transformer 588 transformer = clip.cond_stage_model.t5xxl.transformer
33 34 def getAPIKeys(self): 35 if os.path.isfile(config_path): 36 with open(config_path, 'r') as f: 37 data = yaml.load(f, Loader=yaml.FullLoader) 38 if not data: 39 data = {'STABILITY_API_KEY': default_key, 'STABILITY_API_DEFAULT':0} 40 with open(config_path, 'w') as f:
60 if len(api_keys) > 0: 61 self.api_keys = api_keys 62 # load and save the yaml file 63 with open(config_path, 'r') as f: 64 data = yaml.load(f, Loader=yaml.FullLoader) 65 data['STABILITY_API_KEY'] = api_keys 66 with open(config_path, 'w') as f: 67 yaml.dump(data, f)
71 if current is not None: 72 self.api_current = current 73 # load and save the yaml file 74 with open(config_path, 'r') as f: 75 data = yaml.load(f, Loader=yaml.FullLoader) 76 data['STABILITY_API_DEFAULT'] = current 77 with open(config_path, 'w') as f: 78 yaml.dump(data, f)
100 image_byte = pil2byte(pil_image) 101 files = {"image": ("output.png", image_byte, 'image/png')} 102 data['strength'] = strength 103 104 response = requests.post(url, 105 headers={"authorization": f"{api_key}", "accept": "application/json"}, 106 files=files, 107 data=data, 108 ) 109 if response.status_code == 200: 110 PromptServer.instance.send_sync('stable-diffusion-api-generate-succeed',{"model":model}) 111 json_data = response.json()
129 name = self.api_keys[self.api_current]['name'] 130 if cache and name in self.user_info: 131 return self.user_info[name] 132 else: 133 response = requests.get(url, headers={"Authorization": f"Bearer {api_key}"}) 134 if response.status_code == 200: 135 user_info = response.json() 136 self.user_info[name] = user_info
142 # get user balance 143 async def getUserBalance(self): 144 url = f"{self.api_url}/v1/user/balance" 145 api_key = self.api_keys[self.api_current]['key'] 146 response = requests.get(url, headers={ 147 "Authorization": f"Bearer {api_key}" 148 }) 149 if response.status_code == 200: 150 return response.json() 151 else:
48 except: 49 run_install = False 50 51 if run_install: 52 import subprocess 53 package_command = package + '==' + v if v is not None else package 54 PromptServer.instance.send_sync("easyuse-toast", {'content': f"Installing {package_command}...", 'duration': 5000}) 55 result = subprocess.run([sys.executable, '-s', '-m', 'pip', 'install', package_command], capture_output=True, text=True)
51 if run_install: 52 import subprocess 53 package_command = package + '==' + v if v is not None else package 54 PromptServer.instance.send_sync("easyuse-toast", {'content': f"Installing {package_command}...", 'duration': 5000}) 55 result = subprocess.run([sys.executable, '-s', '-m', 'pip', 'install', package_command], capture_output=True, text=True) 56 if result.returncode == 0: 57 PromptServer.instance.send_sync("easyuse-toast", {'content': f"{package} installed successfully", 'type': 'success', 'duration': 5000}) 58 print(f"Package {package} installed successfully")
43 easy_wildcard_dict[key] = lines 44 elif file.endswith('.yaml'): 45 file_path = os.path.join(root, file) 46 with open(file_path, 'r') as f: 47 yaml_data = yaml.load(f, Loader=yaml.FullLoader) 48 49 for k, v in yaml_data.items(): 50 read_wildcard(k, v)
125 126 if select_range is None: 127 select_count = 1 128 else: 129 select_count = random.randint(select_range[0], select_range[1]) 130 131 if select_count > len(options): 132 selected_items = options
130 131 if select_count > len(options): 132 selected_items = options 133 else: 134 selected_items = random.choices(options, weights=normalized_probabilities, k=select_count) 135 selected_items = set(selected_items) 136 137 try_count = 0
136 137 try_count = 0 138 while len(selected_items) < select_count and try_count < 10: 139 remaining_count = select_count - len(selected_items) 140 additional_items = random.choices(options, weights=normalized_probabilities, k=remaining_count) 141 selected_items |= set(additional_items) 142 try_count += 1 143
163 for match in matches: 164 keyword = match.lower() 165 keyword = wildcard_normalize(keyword) 166 if keyword in easy_wildcard_dict: 167 replacement = random.choice(easy_wildcard_dict[keyword]) 168 replacements_found = True 169 string = string.replace(f"__{match}__", replacement, 1) 170 elif '*' in keyword:
176 total_patterns += v 177 found = True 178 179 if found: 180 replacement = random.choice(total_patterns) 181 replacements_found = True 182 string = string.replace(f"__{match}__", replacement, 1) 183 elif '/' not in keyword:
33 return True, None, None 34 def error_if_mismatched_list_args(args: Dict[str, List[Any]]) -> None: 35 is_valid, failed_key1, failed_key2 = validate_list_args(args) 36 if not is_valid: 37 assert failed_key1 is not None 38 assert failed_key2 is not None 39 raise ValueError( 40 f"Mismatched list inputs received. {failed_key1}({len(args[failed_key1])}) !== {failed_key2}({len(args[failed_key2])})"
34 def error_if_mismatched_list_args(args: Dict[str, List[Any]]) -> None: 35 is_valid, failed_key1, failed_key2 = validate_list_args(args) 36 if not is_valid: 37 assert failed_key1 is not None 38 assert failed_key2 is not None 39 raise ValueError( 40 f"Mismatched list inputs received. {failed_key1}({len(args[failed_key1])}) !== {failed_key2}({len(args[failed_key2])})" 41 )
33 self.base_value -= 1 34 if self.base_value < 0: 35 self.base_value = 1125899906842624 36 elif self.action == SGmode.RAND: 37 self.base_value = random.randint(0, 1125899906842624) 38 39 return seed 40
51 value = value - 1 52 if value < 0: 53 value = 1125899906842624 54 elif action == 'randomize' or action == 'randomize for each node': 55 value = random.randint(0, 1125899906842624) 56 if seed_is_global: 57 v['inputs']['value'] = value 58