154 def _random_vector(self, dimension): 155 if self.is_training: 156 lower_limit = math.sqrt(self.min_ratio) 157 upper_limit = math.sqrt(self.max_ratio) 158 mask_side = round((random.random() * (upper_limit - lower_limit) + lower_limit) * dimension) 159 u = random.randint(0, dimension-mask_side-1) 160 v = u+mask_side 161 else:
155 if self.is_training: 156 lower_limit = math.sqrt(self.min_ratio) 157 upper_limit = math.sqrt(self.max_ratio) 158 mask_side = round((random.random() * (upper_limit - lower_limit) + lower_limit) * dimension) 159 u = random.randint(0, dimension-mask_side-1) 160 v = u+mask_side 161 else: 162 margin = (math.sqrt(self.default_ratio) / 2) * dimension
183 self.max_padding_percent = max_padding_percent 184 self.probs = [left_padding_prob, top_padding_prob, right_padding_prob, bottom_padding_prob] 185 self.is_fixed_randomness = is_fixed_randomness 186 187 assert self.min_padding_percent <= self.max_padding_percent 188 assert self.max_padding_percent > 0 189 assert len([x for x in [self.min_padding_percent, self.max_padding_percent] if (x>=0 and x<=1)]) == 2, f"Padding percentage should be in [0,1]" 190 assert sum(self.probs) > 0, f"At least one of the padding probs should be greater than 0 - {self.probs}"
184 self.probs = [left_padding_prob, top_padding_prob, right_padding_prob, bottom_padding_prob] 185 self.is_fixed_randomness = is_fixed_randomness 186 187 assert self.min_padding_percent <= self.max_padding_percent 188 assert self.max_padding_percent > 0 189 assert len([x for x in [self.min_padding_percent, self.max_padding_percent] if (x>=0 and x<=1)]) == 2, f"Padding percentage should be in [0,1]" 190 assert sum(self.probs) > 0, f"At least one of the padding probs should be greater than 0 - {self.probs}" 191 assert len([x for x in self.probs if (x >= 0) and (x <= 1)]) == 4, f"At least one of padding probs is not in [0,1] - {self.probs}"
185 self.is_fixed_randomness = is_fixed_randomness 186 187 assert self.min_padding_percent <= self.max_padding_percent 188 assert self.max_padding_percent > 0 189 assert len([x for x in [self.min_padding_percent, self.max_padding_percent] if (x>=0 and x<=1)]) == 2, f"Padding percentage should be in [0,1]" 190 assert sum(self.probs) > 0, f"At least one of the padding probs should be greater than 0 - {self.probs}" 191 assert len([x for x in self.probs if (x >= 0) and (x <= 1)]) == 4, f"At least one of padding probs is not in [0,1] - {self.probs}" 192 if len([x for x in self.probs if x > 0]) == 1:
186 187 assert self.min_padding_percent <= self.max_padding_percent 188 assert self.max_padding_percent > 0 189 assert len([x for x in [self.min_padding_percent, self.max_padding_percent] if (x>=0 and x<=1)]) == 2, f"Padding percentage should be in [0,1]" 190 assert sum(self.probs) > 0, f"At least one of the padding probs should be greater than 0 - {self.probs}" 191 assert len([x for x in self.probs if (x >= 0) and (x <= 1)]) == 4, f"At least one of padding probs is not in [0,1] - {self.probs}" 192 if len([x for x in self.probs if x > 0]) == 1: 193 LOGGER.warning(f"Only one padding prob is greater than zero - {self.probs}. That means that the outpainting masks will be always on the same side")
187 assert self.min_padding_percent <= self.max_padding_percent 188 assert self.max_padding_percent > 0 189 assert len([x for x in [self.min_padding_percent, self.max_padding_percent] if (x>=0 and x<=1)]) == 2, f"Padding percentage should be in [0,1]" 190 assert sum(self.probs) > 0, f"At least one of the padding probs should be greater than 0 - {self.probs}" 191 assert len([x for x in self.probs if (x >= 0) and (x <= 1)]) == 4, f"At least one of padding probs is not in [0,1] - {self.probs}" 192 if len([x for x in self.probs if x > 0]) == 1: 193 LOGGER.warning(f"Only one padding prob is greater than zero - {self.probs}. That means that the outpainting masks will be always on the same side") 194
204 205 @staticmethod 206 def _img2rs(img): 207 arr = np.ascontiguousarray(img.astype(np.uint8)) 208 str_hash = hashlib.sha1(arr).hexdigest() 209 res = hash(str_hash)%(2**32) 210 return res 211
214 mask = np.zeros((self.img_h, self.img_w), np.float32) 215 at_least_one_mask_applied = False 216 217 if self.is_fixed_randomness: 218 assert raw_image is not None, f"Cant calculate hash on raw_image=None" 219 rs = self._img2rs(raw_image) 220 self.rnd = np.random.RandomState(rs) 221 else:
309 def __call__(self, img, iter_i=None, raw_image=None): 310 kind = np.random.choice(len(self.probas), p=self.probas) 311 gen = self.gens[kind] 312 result = gen(img, iter_i=iter_i, raw_image=raw_image) 313 if self.invert_proba > 0 and random.random() < self.invert_proba: 314 result = 1 - result 315 return result 316
58 """ 59 raise NotImplemented() 60 61 def interpolate_mask(self, mask, shape): 62 assert mask is not None 63 assert self.allow_scale_mask or shape == mask.shape[-2:] 64 if shape != mask.shape[-2:] and self.allow_scale_mask: 65 if self.mask_scale_mode == 'maxpool':
59 raise NotImplemented() 60 61 def interpolate_mask(self, mask, shape): 62 assert mask is not None 63 assert self.allow_scale_mask or shape == mask.shape[-2:] 64 if shape != mask.shape[-2:] and self.allow_scale_mask: 65 if self.mask_scale_mode == 'maxpool': 66 mask = F.adaptive_max_pool2d(mask, shape)
85 self.gp_coef = gp_coef 86 self.weight = weight 87 # use for discr => use for gen; 88 # otherwise we teach only the discr to pay attention to very small difference 89 assert use_unmasked_for_gen or (not use_unmasked_for_discr) 90 # mask as target => use unmasked for discr: 91 # if we don't care about unmasked regions at all 92 # then it doesn't matter if the value of mask_as_fake_target is true or false
89 assert use_unmasked_for_gen or (not use_unmasked_for_discr) 90 # mask as target => use unmasked for discr: 91 # if we don't care about unmasked regions at all 92 # then it doesn't matter if the value of mask_as_fake_target is true or false 93 assert use_unmasked_for_discr or (not mask_as_fake_target) 94 self.use_unmasked_for_gen = use_unmasked_for_gen 95 self.use_unmasked_for_discr = use_unmasked_for_discr 96 self.mask_as_fake_target = mask_as_fake_target
25 predict:(n, c, h, w) 26 target:(n, 1, h, w) 27 """ 28 target = target.long() 29 assert not target.requires_grad 30 assert predict.dim() == 4, "{0}".format(predict.size()) 31 assert target.dim() == 4, "{0}".format(target.size()) 32 assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0))
26 target:(n, 1, h, w) 27 """ 28 target = target.long() 29 assert not target.requires_grad 30 assert predict.dim() == 4, "{0}".format(predict.size()) 31 assert target.dim() == 4, "{0}".format(target.size()) 32 assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0)) 33 assert target.size(1) == 1, "{0}".format(target.size(1))
27 """ 28 target = target.long() 29 assert not target.requires_grad 30 assert predict.dim() == 4, "{0}".format(predict.size()) 31 assert target.dim() == 4, "{0}".format(target.size()) 32 assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0)) 33 assert target.size(1) == 1, "{0}".format(target.size(1)) 34 assert predict.size(2) == target.size(2), "{0} vs {1} ".format(predict.size(2), target.size(2))
28 target = target.long() 29 assert not target.requires_grad 30 assert predict.dim() == 4, "{0}".format(predict.size()) 31 assert target.dim() == 4, "{0}".format(target.size()) 32 assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0)) 33 assert target.size(1) == 1, "{0}".format(target.size(1)) 34 assert predict.size(2) == target.size(2), "{0} vs {1} ".format(predict.size(2), target.size(2)) 35 assert predict.size(3) == target.size(3), "{0} vs {1} ".format(predict.size(3), target.size(3))
29 assert not target.requires_grad 30 assert predict.dim() == 4, "{0}".format(predict.size()) 31 assert target.dim() == 4, "{0}".format(target.size()) 32 assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0)) 33 assert target.size(1) == 1, "{0}".format(target.size(1)) 34 assert predict.size(2) == target.size(2), "{0} vs {1} ".format(predict.size(2), target.size(2)) 35 assert predict.size(3) == target.size(3), "{0} vs {1} ".format(predict.size(3), target.size(3)) 36 target = target.squeeze(1)
30 assert predict.dim() == 4, "{0}".format(predict.size()) 31 assert target.dim() == 4, "{0}".format(target.size()) 32 assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0)) 33 assert target.size(1) == 1, "{0}".format(target.size(1)) 34 assert predict.size(2) == target.size(2), "{0} vs {1} ".format(predict.size(2), target.size(2)) 35 assert predict.size(3) == target.size(3), "{0} vs {1} ".format(predict.size(3), target.size(3)) 36 target = target.squeeze(1) 37 n, c, h, w = predict.size()
31 assert target.dim() == 4, "{0}".format(target.size()) 32 assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0)) 33 assert target.size(1) == 1, "{0}".format(target.size(1)) 34 assert predict.size(2) == target.size(2), "{0} vs {1} ".format(predict.size(2), target.size(2)) 35 assert predict.size(3) == target.size(3), "{0} vs {1} ".format(predict.size(3), target.size(3)) 36 target = target.squeeze(1) 37 n, c, h, w = predict.size() 38 target_mask = (target >= 0) * (target != self.ignore_label)
222 dilation=1, groups=1, bias=False, enable_lfu=True, 223 padding_type='reflect', gated=False, **spectral_kwargs): 224 super(FFC, self).__init__() 225 226 assert stride == 1 or stride == 2, "Stride should be 1 or 2." 227 self.stride = stride 228 229 in_cg = int(in_channels * ratio_gin)
345 346 347 class ConcatTupleLayer(nn.Module): 348 def forward(self, x): 349 assert isinstance(x, tuple) 350 x_l, x_g = x 351 assert torch.is_tensor(x_l) or torch.is_tensor(x_g) 352 if not torch.is_tensor(x_g):
347 class ConcatTupleLayer(nn.Module): 348 def forward(self, x): 349 assert isinstance(x, tuple) 350 x_l, x_g = x 351 assert torch.is_tensor(x_l) or torch.is_tensor(x_g) 352 if not torch.is_tensor(x_g): 353 return x_l 354 return torch.cat(x, dim=1)
360 up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True), 361 init_conv_kwargs={}, downsample_conv_kwargs={}, resnet_conv_kwargs={}, 362 spatial_transform_layers=None, spatial_transform_kwargs={}, 363 add_out_act=True, max_features=1024, out_ffc=False, out_ffc_kwargs={}): 364 assert (n_blocks >= 0) 365 super().__init__() 366 367 model = [nn.ReflectionPad2d(3),
8 shared_weights=False, padding=1, min_dilation=1, shuffle_in_channels=False, use_depthwise=False, **kwargs): 9 super().__init__() 10 convs = [] 11 self.equal_dim = equal_dim 12 assert comb_mode in ('cat_out', 'sum', 'cat_in', 'cat_both'), comb_mode 13 if comb_mode in ('cat_out', 'cat_both'): 14 self.cat_out = True 15 if equal_dim:
12 assert comb_mode in ('cat_out', 'sum', 'cat_in', 'cat_both'), comb_mode 13 if comb_mode in ('cat_out', 'cat_both'): 14 self.cat_out = True 15 if equal_dim: 16 assert out_dim % dilation_num == 0 17 out_dims = [out_dim // dilation_num] * dilation_num 18 self.index = sum([[i + j * (out_dims[0]) for j in range(dilation_num)] for i in range(out_dims[0])], []) 19 else:
26 for j in range(dilation_num): 27 index += list(range(starts[j], starts[j] + lengths[j])) 28 starts[j] += lengths[j] 29 self.index = index 30 assert(len(index) == out_dim) 31 self.out_dims = out_dims 32 else: 33 self.cat_out = False
34 self.out_dims = [out_dim] * dilation_num 35 36 if comb_mode in ('cat_in', 'cat_both'): 37 if equal_dim: 38 assert in_dim % dilation_num == 0 39 in_dims = [in_dim // dilation_num] * dilation_num 40 else: 41 in_dims = [in_dim // 2 ** (i + 1) for i in range(dilation_num - 1)]
10 11 class ResNetHead(nn.Module): 12 def __init__(self, input_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, 13 padding_type='reflect', conv_kind='default', activation=nn.ReLU(True)): 14 assert (n_blocks >= 0) 15 super(ResNetHead, self).__init__() 16 17 conv_layer = get_conv_block_ctor(conv_kind)
45 def __init__(self, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, 46 padding_type='reflect', conv_kind='default', activation=nn.ReLU(True), 47 up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True), add_out_act=False, out_extra_layers_n=0, 48 add_in_proj=None): 49 assert (n_blocks >= 0) 50 super(ResNetTail, self).__init__() 51 52 mult = 2 ** n_downsampling
129 True: Only the most HR output 130 False: List of outputs of different resolutions from HR to LR 131 """ 132 if smallest_scales_num is None: 133 assert len(self.heads) == len(ms_inputs), (len(self.heads), len(ms_inputs), smallest_scales_num) 134 smallest_scales_num = len(self.heads) 135 else: 136 assert smallest_scales_num == len(ms_inputs) <= len(self.heads), (len(self.heads), len(ms_inputs), smallest_scales_num)
132 if smallest_scales_num is None: 133 assert len(self.heads) == len(ms_inputs), (len(self.heads), len(ms_inputs), smallest_scales_num) 134 smallest_scales_num = len(self.heads) 135 else: 136 assert smallest_scales_num == len(ms_inputs) <= len(self.heads), (len(self.heads), len(ms_inputs), smallest_scales_num) 137 138 cur_heads = self.heads[-smallest_scales_num:] 139 ms_features = [cur_head(cur_inp) for cur_head, cur_inp in zip(cur_heads, ms_inputs)]
186 :param smallest_scales_num: int or None, number of smallest scales to take at input 187 :return: List of pairs (prediction, features) for different resolutions from HR to LR 188 """ 189 if smallest_scales_num is None: 190 assert len(self.ms_impl) == len(ms_inputs), (len(self.ms_impl), len(ms_inputs), smallest_scales_num) 191 smallest_scales_num = len(self.heads) 192 else: 193 assert smallest_scales_num == len(ms_inputs) <= len(self.ms_impl), \
189 if smallest_scales_num is None: 190 assert len(self.ms_impl) == len(ms_inputs), (len(self.ms_impl), len(ms_inputs), smallest_scales_num) 191 smallest_scales_num = len(self.heads) 192 else: 193 assert smallest_scales_num == len(ms_inputs) <= len(self.ms_impl), \ 194 (len(self.ms_impl), len(ms_inputs), smallest_scales_num) 195 196 return [cur_discr(cur_input) for cur_discr, cur_input in zip(self.ms_impl[-smallest_scales_num:], ms_inputs)] 197
182 deconv_kind='convtranspose', activation=nn.ReLU(True), 183 up_norm_layer=nn.BatchNorm2d, affine=None, up_activation=nn.ReLU(True), 184 add_out_act=True, max_features=1024, multidilation_kwargs={}, 185 ffc_positions=None, ffc_kwargs={}): 186 assert (n_blocks >= 0) 187 super().__init__() 188 189 conv_layer = get_conv_block_ctor(conv_kind)
247 resnet_block_kind='multidilatedresnetblock', 248 resnet_conv_kind='multidilated', 249 resnet_dilation=1, 250 multidilation_kwargs={}): 251 assert (n_blocks >= 0) 252 super().__init__() 253 254 conv_layer = get_conv_block_ctor(conv_kind)
347 add_out_act=True, 348 max_features=1024, is_resblock_depthwise=False, 349 ffc_positions=None, ffc_kwargs={}, dilation=1, second_dilation=None, 350 dilation_block_kind='simple', multidilation_kwargs={}): 351 assert (n_blocks >= 0) 352 super().__init__() 353 354 conv_layer = get_conv_block_ctor(conv_kind)
43 if __name__ == '__main__': 44 layer = LearnableSpatialTransformWrapper(nn.Identity()) 45 x = torch.arange(2* 3 * 15 * 15).view(2, 3, 15, 15).float() 46 y = layer(x) 47 assert x.shape == y.shape 48 assert torch.allclose(x[:, :, 1:, 1:][:, :, :-1, :-1], y[:, :, 1:, 1:][:, :, :-1, :-1]) 49 print('all ok')
44 layer = LearnableSpatialTransformWrapper(nn.Identity()) 45 x = torch.arange(2* 3 * 15 * 15).view(2, 3, 15, 15).float() 46 y = layer(x) 47 assert x.shape == y.shape 48 assert torch.allclose(x[:, :, 1:, 1:][:, :, :-1, :-1], y[:, :, 1:, 1:][:, :, :-1, :-1]) 49 print('all ok')
79 class LadderRamp: 80 def __init__(self, start_iters, values): 81 self.start_iters = start_iters 82 self.values = values 83 assert len(values) == len(start_iters) + 1, (len(values), len(start_iters)) 84 85 def __call__(self, i): 86 segment_i = bisect.bisect_right(self.start_iters, i)
144 # Trainer sets MASTER_PORT, NODE_RANK, LOCAL_RANK, WORLD_SIZE 145 parent_cwd = os.environ.get('TRAINING_PARENT_WORK_DIR', None) 146 has_parent = parent_cwd is not None 147 has_rank = get_has_ddp_rank() 148 assert has_parent == has_rank, f'Inconsistent state: has_parent={has_parent}, has_rank={has_rank}' 149 150 if has_parent: 151 # we are in the worker
165 def handle_ddp_parent_process(): 166 parent_cwd = os.environ.get('TRAINING_PARENT_WORK_DIR', None) 167 has_parent = parent_cwd is not None 168 has_rank = get_has_ddp_rank() 169 assert has_parent == has_rank, f'Inconsistent state: has_parent={has_parent}, has_rank={has_rank}' 170 171 if parent_cwd is None: 172 os.environ['TRAINING_PARENT_WORK_DIR'] = os.getcwd()
35 self.controlnet_info = {} 36 37 def post(self, uri, data): 38 url = f'http://{self.ipaddr}:{self.port}/{uri}' 39 return requests.post(url, data=json.dumps(data)) 40 41 def get(self, uri): 42 url = f'http://{self.ipaddr}:{self.port}/{uri}'
39 return requests.post(url, data=json.dumps(data)) 40 41 def get(self, uri): 42 url = f'http://{self.ipaddr}:{self.port}/{uri}' 43 resp = requests.get(url) 44 resp.raise_for_status() 45 return resp.json() 46
46 47 def get_all_info(self): 48 try: 49 self.update_all_info() 50 except: 51 # print(f'An error occured in API server {self.ipaddr}:{self.port}') 52 pass 53 54 def update_all_info(self): 55 resources = [
161 filename = os.path.basename(f) 162 split_name = filename[len(prefix)+1:].replace('.', '_').split('_') 163 try: 164 file_sequence = int(split_name[0]) 165 except: 166 continue 167 if file_sequence > sequence: 168 sequence = file_sequence 169 return sequence + 1
519 'image': ('IMAGE',), 520 'lora': ('BMAB lora',) 521 } 522 } 523 except: 524 pass 525 526 return { 527 'required': {
21 pred = model(image, conf=confidence, device='') 22 boxes = pred[0].boxes.xyxy.cpu().numpy() 23 boxes = boxes.tolist() 24 confs = pred[0].boxes.conf.tolist() 25 except: 26 pass 27 utils.torch_gc() 28 return boxes, confs 29
59 60 return self.last_prompt 61 62 def prompt(self, prompt: str, text: str, api_key, random_seed=None, **kwargs): 63 random_seed = random.randint(0, 65535) 64 if prompt.find('__prompt__') >= 0: 65 if self.last_text != text: 66 random_seed = random.randint(0, 65535)
62 def prompt(self, prompt: str, text: str, api_key, random_seed=None, **kwargs): 63 random_seed = random.randint(0, 65535) 64 if prompt.find('__prompt__') >= 0: 65 if self.last_text != text: 66 random_seed = random.randint(0, 65535) 67 self.last_text = text 68 self.get_prompt(text, api_key) 69 if self.last_prompt is None:
13 pred = model(image, conf=confidence, device=utils.get_device()) 14 boxes = pred[0].boxes.xyxy.cpu().numpy() 15 boxes = boxes.tolist() 16 confs = pred[0].boxes.conf.tolist() 17 except: 18 pass 19 del model 20 utils.torch_gc() 21 return boxes, confs