1 import warnings 2 warnings.filterwarnings("ignore", category=DeprecationWarning) 3 4 import subprocess 5 import os, sys 6 try: 7 from pkg_resources import get_distribution as distributions 8 except:
25 models_dir_path = os.path.join(models_dir, "insightface") 26 model_path = os.path.join(models_dir_path, model_name) 27 28 def run_pip(*args): 29 subprocess.run([sys.executable, "-m", "pip", "install", "--no-warn-script-location", *args]) 30 31 def is_installed ( 32 package: str, version: str = None, strict: bool = True
49 print(f"Status: {e}") 50 return False 51 52 def download(url, path, name): 53 request = urllib.request.urlopen(url) 54 total = int(request.headers.get('Content-Length', 0)) 55 with tqdm(total=total, desc=f'[ReActor] Downloading {name} to {path}', unit='B', unit_scale=True, unit_divisor=1024) as progress: 56 urllib.request.urlretrieve(url, path, reporthook=lambda count, block_size, total_size: progress.update(block_size))
52 def download(url, path, name): 53 request = urllib.request.urlopen(url) 54 total = int(request.headers.get('Content-Length', 0)) 55 with tqdm(total=total, desc=f'[ReActor] Downloading {name} to {path}', unit='B', unit_scale=True, unit_divisor=1024) as progress: 56 urllib.request.urlretrieve(url, path, reporthook=lambda count, block_size, total_size: progress.update(block_size)) 57 58 if not os.path.exists(models_dir_path): 59 os.makedirs(models_dir_path)
785 mask = torch.nn.functional.interpolate(mask.unsqueeze(1), size=(H, W), mode='nearest')[:,0,:,:] 786 MB, _, _ = mask.shape 787 788 if MB < B: 789 assert(B % MB == 0) 790 mask = mask.repeat(B // MB, 1, 1) 791 792 # masks_to_boxes errors if the tensor is all zeros, so we'll add a single pixel and zero it out at the end
845 MB = mask.shape[0] 846 PB = image_to_paste.shape[0] 847 848 if B < PB: 849 assert(PB % B == 0) 850 image_base = image_base.repeat(PB // B, 1, 1, 1) 851 B, H, W, C = image_base.shape 852 if MB < B:
849 assert(PB % B == 0) 850 image_base = image_base.repeat(PB // B, 1, 1, 1) 851 B, H, W, C = image_base.shape 852 if MB < B: 853 assert(B % MB == 0) 854 mask = mask.repeat(B // MB, 1, 1) 855 elif B < MB: 856 assert(MB % B == 0)
852 if MB < B: 853 assert(B % MB == 0) 854 mask = mask.repeat(B // MB, 1, 1) 855 elif B < MB: 856 assert(MB % B == 0) 857 image_base = image_base.repeat(MB // B, 1, 1, 1) 858 if PB < B: 859 assert(B % PB == 0)
855 elif B < MB: 856 assert(MB % B == 0) 857 image_base = image_base.repeat(MB // B, 1, 1, 1) 858 if PB < B: 859 assert(B % PB == 0) 860 image_to_paste = image_to_paste.repeat(B // PB, 1, 1, 1) 861 862 mask = torch.nn.functional.interpolate(mask.unsqueeze(1), size=(H, W), mode='nearest')[:,0,:,:]
129 130 Returns: 131 Tensor: Warped image or feature map. 132 """ 133 assert x.size()[-2:] == flow.size()[1:3] 134 _, _, h, w = x.size() 135 # create mesh grid 136 grid_y, grid_x = torch.meshgrid(torch.arange(0, h).type_as(x), torch.arange(0, w).type_as(x))
198 Tensor: the pixel unshuffled feature. 199 """ 200 b, c, hh, hw = x.size() 201 out_channel = c * (scale**2) 202 assert hh % scale == 0 and hw % scale == 0 203 h = hh // scale 204 w = hw // scale 205 x_view = x.view(b, c, h, scale, w, scale)
307 feats_ = feats_.view(n, t, -1, h, w) 308 feats['spatial'] = [feats_[:, i, :, :, :] for i in range(0, t)] 309 310 # compute optical flow using the low-res inputs 311 assert lqs_downsample.size(3) >= 64 and lqs_downsample.size(4) >= 64, ( 312 'The height and width of low-res inputs must be at least 64, ' 313 f'but got {h} and {w}.') 314 flows_forward, flows_backward = self.compute_flow(lqs_downsample) 315 316 # feature propgation
62 eps (float): A small value added to the variance to avoid 63 divide-by-zero. Default: 1e-5. 64 """ 65 size = feat.size() 66 assert len(size) == 4, 'The input feature should be 4D tensor.' 67 n, c = size[:2] 68 feat_var = feat.view(n, c, -1).var(dim=2) + eps 69 feat_std = feat_var.sqrt().view(n, c, 1, 1)
18 19 def __init__(self, num_in_ch, num_feat, input_size=128): 20 super(VGGStyleDiscriminator, self).__init__() 21 self.input_size = input_size 22 assert self.input_size == 128 or self.input_size == 256, ( 23 f'input size must be 128 or 256, but received {input_size}') 24 25 self.conv0_0 = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1, bias=True) 26 self.conv0_1 = nn.Conv2d(num_feat, num_feat, 4, 2, 1, bias=False)
58 # activation function 59 self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) 60 61 def forward(self, x): 62 assert x.size(2) == self.input_size, (f'Input size must be identical to input_size, but received {x.size()}.') 63 64 feat = self.lrelu(self.conv0_0(x)) 65 feat = self.lrelu(self.bn0_1(self.conv0_1(feat))) # output spatial size: /2
325 326 def forward(self, x): 327 b, t, c, h, w = x.size() 328 if self.hr_in: 329 assert h % 16 == 0 and w % 16 == 0, ('The height and width must be multiple of 16.') 330 else: 331 assert h % 4 == 0 and w % 4 == 0, ('The height and width must be multiple of 4.') 332
327 b, t, c, h, w = x.size() 328 if self.hr_in: 329 assert h % 16 == 0 and w % 16 == 0, ('The height and width must be multiple of 16.') 330 else: 331 assert h % 4 == 0 and w % 4 == 0, ('The height and width must be multiple of 4.') 332 333 x_center = x[:, self.center_frame_idx, :, :, :].contiguous() 334
13 14 def __init__(self, config_text, norm_nc, label_nc): 15 super().__init__() 16 17 assert config_text.startswith('spade') 18 parsed = re.search('spade(\\D+)(\\d)x\\d', config_text) 19 param_free_norm_type = str(parsed.group(1)) 20 ks = int(parsed.group(2))
70 self.normalize_input = normalize_input 71 self.output_blocks = sorted(output_blocks) 72 self.last_needed_block = max(output_blocks) 73 74 assert self.last_needed_block <= 3, ('Last possible output block index is 3') 75 76 self.blocks = nn.ModuleList() 77
78 79 return flow 80 81 def forward(self, ref, supp): 82 assert ref.size() == supp.size() 83 84 h, w = ref.size(2), ref.size(3) 85 w_floor = math.floor(math.ceil(w / 32.0) * 32.0)
559 else: # used for encoder with different latent code for each layer 560 latent = styles[0] 561 elif len(styles) == 2: # mixing noises 562 if inject_index is None: 563 inject_index = random.randint(1, self.num_latent - 1) 564 latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) 565 latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) 566 latent = torch.cat([latent1, latent2], 1)
234 if min(self.input_resolution) <= self.window_size: 235 # if window size is larger than input resolution, we don't partition windows 236 self.shift_size = 0 237 self.window_size = min(self.input_resolution) 238 assert 0 <= self.shift_size < self.window_size, 'shift_size must in 0-window_size' 239 240 self.norm1 = norm_layer(dim) 241 self.attn = WindowAttention(
362 x: b, h*w, c 363 """ 364 h, w = self.input_resolution 365 b, seq_len, c = x.shape 366 assert seq_len == h * w, 'input feature has wrong size' 367 assert h % 2 == 0 and w % 2 == 0, f'x size ({h}*{w}) are not even.' 368 369 x = x.view(b, h, w, c)
363 """ 364 h, w = self.input_resolution 365 b, seq_len, c = x.shape 366 assert seq_len == h * w, 'input feature has wrong size' 367 assert h % 2 == 0 and w % 2 == 0, f'x size ({h}*{w}) are not even.' 368 369 x = x.view(b, h, w, c) 370
841 self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch, 842 (patches_resolution[0], patches_resolution[1])) 843 elif self.upsampler == 'nearest+conv': 844 # for real-world SR (less artifacts) 845 assert self.upscale == 4, 'only support x4 now.' 846 self.conv_before_upsample = nn.Sequential( 847 nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True)) 848 self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
36 indices = [v % dataset_size for v in indices] 37 38 # subsample 39 indices = indices[self.rank:self.total_size:self.num_replicas] 40 assert len(indices) == self.num_samples 41 42 return iter(indices) 43
59 60 Returns: 61 list[int]: A list of indices. 62 """ 63 assert num_frames % 2 == 1, 'num_frames should be an odd number.' 64 assert padding in ('replicate', 'reflection', 'reflection_circle', 'circle'), f'Wrong padding mode: {padding}.' 65 66 max_frame_num = max_frame_num - 1 # start from 0
60 Returns: 61 list[int]: A list of indices. 62 """ 63 assert num_frames % 2 == 1, 'num_frames should be an odd number.' 64 assert padding in ('replicate', 'reflection', 'reflection_circle', 'circle'), f'Wrong padding mode: {padding}.' 65 66 max_frame_num = max_frame_num - 1 # start from 0 67 num_pad = num_frames // 2
126 127 Returns: 128 list[str]: Returned path list. 129 """ 130 assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. ' 131 f'But got {len(folders)}') 132 assert len(keys) == 2, f'The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}' 133 input_folder, gt_folder = folders 134 input_key, gt_key = keys
128 list[str]: Returned path list. 129 """ 130 assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. ' 131 f'But got {len(folders)}') 132 assert len(keys) == 2, f'The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}' 133 input_folder, gt_folder = folders 134 input_key, gt_key = keys 135
175 176 Returns: 177 list[str]: Returned path list. 178 """ 179 assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. ' 180 f'But got {len(folders)}') 181 assert len(keys) == 2, f'The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}' 182 input_folder, gt_folder = folders 183 input_key, gt_key = keys
177 list[str]: Returned path list. 178 """ 179 assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. ' 180 f'But got {len(folders)}') 181 assert len(keys) == 2, f'The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}' 182 input_folder, gt_folder = folders 183 input_key, gt_key = keys 184
209 210 Returns: 211 list[str]: Returned path list. 212 """ 213 assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. ' 214 f'But got {len(folders)}') 215 assert len(keys) == 2, f'The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}' 216 input_folder, gt_folder = folders 217 input_key, gt_key = keys
211 list[str]: Returned path list. 212 """ 213 assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. ' 214 f'But got {len(folders)}') 215 assert len(keys) == 2, f'The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}' 216 input_folder, gt_folder = folders 217 input_key, gt_key = keys 218
217 input_key, gt_key = keys 218 219 input_paths = list(scandir(input_folder)) 220 gt_paths = list(scandir(gt_folder)) 221 assert len(input_paths) == len(gt_paths), (f'{input_key} and {gt_key} datasets have different number of images: ' 222 f'{len(input_paths)}, {len(gt_paths)}.') 223 paths = [] 224 for gt_path in gt_paths: 225 basename, ext = osp.splitext(osp.basename(gt_path))
224 for gt_path in gt_paths: 225 basename, ext = osp.splitext(osp.basename(gt_path)) 226 input_name = f'{filename_tmpl.format(basename)}{ext}' 227 input_path = osp.join(input_folder, input_name) 228 assert input_name in input_paths, f'{input_name} is not in {input_key}_paths.' 229 gt_path = osp.join(gt_folder, gt_path) 230 paths.append(dict([(f'{input_key}_path', input_path), (f'{gt_key}_path', gt_path)])) 231 return paths
291 292 Returns: 293 Tensor: DUF downsampled frames. 294 """ 295 assert scale in (2, 3, 4), f'Only support scale (2, 3, 4), but got {scale}.' 296 297 squeeze_flag = False 298 if x.ndim == 4:
199 200 Returns: 201 kernel (ndarray): 202 """ 203 assert kernel_size % 2 == 1, 'Kernel size must be an odd number.' 204 assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.' 205 sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1]) 206 if isotropic is False:
200 Returns: 201 kernel (ndarray): 202 """ 203 assert kernel_size % 2 == 1, 'Kernel size must be an odd number.' 204 assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.' 205 sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1]) 206 if isotropic is False: 207 assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
203 assert kernel_size % 2 == 1, 'Kernel size must be an odd number.' 204 assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.' 205 sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1]) 206 if isotropic is False: 207 assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.' 208 assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.' 209 sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1]) 210 rotation = np.random.uniform(rotation_range[0], rotation_range[1])
204 assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.' 205 sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1]) 206 if isotropic is False: 207 assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.' 208 assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.' 209 sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1]) 210 rotation = np.random.uniform(rotation_range[0], rotation_range[1]) 211 else:
215 kernel = bivariate_Gaussian(kernel_size, sigma_x, sigma_y, rotation, isotropic=isotropic) 216 217 # add multiplicative noise 218 if noise_range is not None: 219 assert noise_range[0] < noise_range[1], 'Wrong noise range.' 220 noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape) 221 kernel = kernel * noise 222 kernel = kernel / np.sum(kernel)
245 246 Returns: 247 kernel (ndarray): 248 """ 249 assert kernel_size % 2 == 1, 'Kernel size must be an odd number.' 250 assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.' 251 sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1]) 252 if isotropic is False:
246 Returns: 247 kernel (ndarray): 248 """ 249 assert kernel_size % 2 == 1, 'Kernel size must be an odd number.' 250 assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.' 251 sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1]) 252 if isotropic is False: 253 assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
249 assert kernel_size % 2 == 1, 'Kernel size must be an odd number.' 250 assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.' 251 sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1]) 252 if isotropic is False: 253 assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.' 254 assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.' 255 sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1]) 256 rotation = np.random.uniform(rotation_range[0], rotation_range[1])
250 assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.' 251 sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1]) 252 if isotropic is False: 253 assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.' 254 assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.' 255 sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1]) 256 rotation = np.random.uniform(rotation_range[0], rotation_range[1]) 257 else:
267 kernel = bivariate_generalized_Gaussian(kernel_size, sigma_x, sigma_y, rotation, beta, isotropic=isotropic) 268 269 # add multiplicative noise 270 if noise_range is not None: 271 assert noise_range[0] < noise_range[1], 'Wrong noise range.' 272 noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape) 273 kernel = kernel * noise 274 kernel = kernel / np.sum(kernel)
297 298 Returns: 299 kernel (ndarray): 300 """ 301 assert kernel_size % 2 == 1, 'Kernel size must be an odd number.' 302 assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.' 303 sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1]) 304 if isotropic is False:
298 Returns: 299 kernel (ndarray): 300 """ 301 assert kernel_size % 2 == 1, 'Kernel size must be an odd number.' 302 assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.' 303 sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1]) 304 if isotropic is False: 305 assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
301 assert kernel_size % 2 == 1, 'Kernel size must be an odd number.' 302 assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.' 303 sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1]) 304 if isotropic is False: 305 assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.' 306 assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.' 307 sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1]) 308 rotation = np.random.uniform(rotation_range[0], rotation_range[1])
302 assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.' 303 sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1]) 304 if isotropic is False: 305 assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.' 306 assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.' 307 sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1]) 308 rotation = np.random.uniform(rotation_range[0], rotation_range[1]) 309 else:
318 319 kernel = bivariate_plateau(kernel_size, sigma_x, sigma_y, rotation, beta, isotropic=isotropic) 320 # add multiplicative noise 321 if noise_range is not None: 322 assert noise_range[0] < noise_range[1], 'Wrong noise range.' 323 noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape) 324 kernel = kernel * noise 325 kernel = kernel / np.sum(kernel)
354 355 Returns: 356 kernel (ndarray): 357 """ 358 kernel_type = random.choices(kernel_list, kernel_prob)[0] 359 if kernel_type == 'iso': 360 kernel = random_bivariate_Gaussian( 361 kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=True)
399 cutoff (float): cutoff frequency in radians (pi is max) 400 kernel_size (int): horizontal and vertical size, must be odd. 401 pad_to (int): pad kernel size to desired size, must be odd or zero. 402 """ 403 assert kernel_size % 2 == 1, 'Kernel size must be an odd number.' 404 kernel = np.fromfunction( 405 lambda x, y: cutoff * special.j1(cutoff * np.sqrt( 406 (x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)) / (2 * np.pi * np.sqrt(
58 except Exception as e: 59 logger = get_root_logger() 60 logger.warning(f'File client error: {e}, remaining retry times: {retry - 1}') 61 # change another file to read 62 index = random.randint(0, self.__len__()) 63 gt_path = self.paths[index] 64 time.sleep(1) # sleep 1s for occasional server congestion 65 else:
96 except (IOError, OSError) as e: 97 logger = get_root_logger() 98 logger.warn(f'File client error: {e}, remaining retry times: {retry - 1}') 99 # change another file to read 100 index = random.randint(0, self.__len__()) 101 gt_path = self.paths[index] 102 time.sleep(1) # sleep 1s for occasional server congestion 103 else:
121 # crop 122 if img_gt.shape[0] > crop_pad_size or img_gt.shape[1] > crop_pad_size: 123 h, w = img_gt.shape[0:2] 124 # randomly choose top and left coordinates 125 top = random.randint(0, h - crop_pad_size) 126 left = random.randint(0, w - crop_pad_size) 127 img_gt = img_gt[top:top + crop_pad_size, left:left + crop_pad_size, ...] 128
122 if img_gt.shape[0] > crop_pad_size or img_gt.shape[1] > crop_pad_size: 123 h, w = img_gt.shape[0:2] 124 # randomly choose top and left coordinates 125 top = random.randint(0, h - crop_pad_size) 126 left = random.randint(0, w - crop_pad_size) 127 img_gt = img_gt[top:top + crop_pad_size, left:left + crop_pad_size, ...] 128 129 # ------------------------ Generate kernels (used in the first degradation) ------------------------ #
126 left = random.randint(0, w - crop_pad_size) 127 img_gt = img_gt[top:top + crop_pad_size, left:left + crop_pad_size, ...] 128 129 # ------------------------ Generate kernels (used in the first degradation) ------------------------ # 130 kernel_size = random.choice(self.kernel_range) 131 if np.random.uniform() < self.opt['sinc_prob']: 132 # this sinc filter setting is for kernels ranging from [7, 21] 133 if kernel_size < 13:
149 pad_size = (21 - kernel_size) // 2 150 kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size))) 151 152 # ------------------------ Generate kernels (used in the second degradation) ------------------------ # 153 kernel_size = random.choice(self.kernel_range) 154 if np.random.uniform() < self.opt['sinc_prob2']: 155 if kernel_size < 13: 156 omega_c = np.random.uniform(np.pi / 3, np.pi)
173 kernel2 = np.pad(kernel2, ((pad_size, pad_size), (pad_size, pad_size))) 174 175 # ------------------------------------- the final sinc kernel ------------------------------------- # 176 if np.random.uniform() < self.opt['final_sinc_prob']: 177 kernel_size = random.choice(self.kernel_range) 178 omega_c = np.random.uniform(np.pi / 3, np.pi) 179 sinc_kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=21) 180 sinc_kernel = torch.FloatTensor(sinc_kernel)
54 super(REDSDataset, self).__init__() 55 self.opt = opt 56 self.gt_root, self.lq_root = Path(opt['dataroot_gt']), Path(opt['dataroot_lq']) 57 self.flow_root = Path(opt['dataroot_flow']) if opt['dataroot_flow'] is not None else None 58 assert opt['num_frame'] % 2 == 1, (f'num_frame should be odd number, but got {opt["num_frame"]}') 59 self.num_frame = opt['num_frame'] 60 self.num_half_frames = opt['num_frame'] // 2 61
106 clip_name, frame_name = key.split('/') # key example: 000/00000000 107 center_frame_idx = int(frame_name) 108 109 # determine the neighboring frames 110 interval = random.choice(self.interval_list) 111 112 # ensure not exceeding the borders 113 start_frame_idx = center_frame_idx - self.num_half_frames * interval
113 start_frame_idx = center_frame_idx - self.num_half_frames * interval 114 end_frame_idx = center_frame_idx + self.num_half_frames * interval 115 # each clip has 100 frames starting from 0 to 99 116 while (start_frame_idx < 0) or (end_frame_idx > 99): 117 center_frame_idx = random.randint(0, 99) 118 start_frame_idx = (center_frame_idx - self.num_half_frames * interval) 119 end_frame_idx = center_frame_idx + self.num_half_frames * interval 120 frame_name = f'{center_frame_idx:08d}'
119 end_frame_idx = center_frame_idx + self.num_half_frames * interval 120 frame_name = f'{center_frame_idx:08d}' 121 neighbor_list = list(range(start_frame_idx, end_frame_idx + 1, interval)) 122 # random reverse 123 if self.random_reverse and random.random() < 0.5: 124 neighbor_list.reverse() 125 126 assert len(neighbor_list) == self.num_frame, (f'Wrong length of neighbor list: {len(neighbor_list)}')
122 # random reverse 123 if self.random_reverse and random.random() < 0.5: 124 neighbor_list.reverse() 125 126 assert len(neighbor_list) == self.num_frame, (f'Wrong length of neighbor list: {len(neighbor_list)}') 127 128 # get the GT frame (as the center frame) 129 if self.is_lmdb:
304 key = self.keys[index] 305 clip_name, frame_name = key.split('/') # key example: 000/00000000 306 307 # determine the neighboring frames 308 interval = random.choice(self.interval_list) 309 310 # ensure not exceeding the borders 311 start_frame_idx = int(frame_name)
309 310 # ensure not exceeding the borders 311 start_frame_idx = int(frame_name) 312 if start_frame_idx > 100 - self.num_frame * interval: 313 start_frame_idx = random.randint(0, 100 - self.num_frame * interval) 314 end_frame_idx = start_frame_idx + self.num_frame * interval 315 316 neighbor_list = list(range(start_frame_idx, end_frame_idx, interval))
315 316 neighbor_list = list(range(start_frame_idx, end_frame_idx, interval)) 317 318 # random reverse 319 if self.random_reverse and random.random() < 0.5: 320 neighbor_list.reverse() 321 322 # get the neighboring LQ and GT frames
68 f'({lq_patch_size}, {lq_patch_size}). ' 69 f'Please remove {gt_path}.') 70 71 # randomly choose top and left coordinates for lq patch 72 top = random.randint(0, h_lq - lq_patch_size) 73 left = random.randint(0, w_lq - lq_patch_size) 74 75 # crop lq patch
69 f'Please remove {gt_path}.') 70 71 # randomly choose top and left coordinates for lq patch 72 top = random.randint(0, h_lq - lq_patch_size) 73 left = random.randint(0, w_lq - lq_patch_size) 74 75 # crop lq patch 76 if input_type == 'Tensor':
112 list[ndarray] | ndarray: Augmented images and flows. If returned 113 results only have one element, just return ndarray. 114 115 """ 116 hflip = hflip and random.random() < 0.5 117 vflip = rotation and random.random() < 0.5 118 rot90 = rotation and random.random() < 0.5 119
113 results only have one element, just return ndarray. 114 115 """ 116 hflip = hflip and random.random() < 0.5 117 vflip = rotation and random.random() < 0.5 118 rot90 = rotation and random.random() < 0.5 119 120 def _augment(img):
114 115 """ 116 hflip = hflip and random.random() < 0.5 117 vflip = rotation and random.random() < 0.5 118 rot90 = rotation and random.random() < 0.5 119 120 def _augment(img): 121 if hflip: # horizontal
50 self.data_info = {'lq_path': [], 'gt_path': [], 'folder': [], 'idx': [], 'border': []} 51 # file client (io backend) 52 self.file_client = None 53 self.io_backend_opt = opt['io_backend'] 54 assert self.io_backend_opt['type'] != 'lmdb', 'No need to use lmdb during validation/test.' 55 56 logger = get_root_logger() 57 logger.info(f'Generate data info for VideoTestDataset - {opt["name"]}')
72 img_paths_lq = sorted(list(scandir(subfolder_lq, full_path=True))) 73 img_paths_gt = sorted(list(scandir(subfolder_gt, full_path=True))) 74 75 max_idx = len(img_paths_lq) 76 assert max_idx == len(img_paths_gt), (f'Different number of images in lq ({max_idx})' 77 f' and gt folders ({len(img_paths_gt)})') 78 79 self.data_info['lq_path'].extend(img_paths_lq) 80 self.data_info['gt_path'].extend(img_paths_gt)
162 163 # file client (io backend) 164 self.file_client = None 165 self.io_backend_opt = opt['io_backend'] 166 assert self.io_backend_opt['type'] != 'lmdb', 'No need to use lmdb during validation/test.' 167 168 logger = get_root_logger() 169 logger.info(f'Generate data info for VideoTestDataset - {opt["name"]}')
78 if self.file_client is None: 79 self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) 80 81 # random reverse 82 if self.random_reverse and random.random() < 0.5: 83 self.neighbor_list.reverse() 84 85 scale = self.opt['scale']
139 if self.file_client is None: 140 self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) 141 142 # random reverse 143 if self.random_reverse and random.random() < 0.5: 144 self.neighbor_list.reverse() 145 146 scale = self.opt['scale']
36 Tensor: Loss values. 37 """ 38 # if weight is specified, apply element-wise weight 39 if weight is not None: 40 assert weight.dim() == loss.dim() 41 assert weight.size(1) == 1 or weight.size(1) == loss.size(1) 42 loss = loss * weight 43
37 """ 38 # if weight is specified, apply element-wise weight 39 if weight is not None: 40 assert weight.dim() == loss.dim() 41 assert weight.size(1) == 1 or weight.size(1) == loss.size(1) 42 loss = loss * weight 43 44 # if weight is not specified or reduction is sum, just reduce the loss
66 67 Returns: 68 float: The Frechet Distance. 69 """ 70 assert mu1.shape == mu2.shape, 'Two mean vectors have different lengths' 71 assert sigma1.shape == sigma2.shape, ('Two covariances have different dimensions') 72 73 cov_sqrt, _ = linalg.sqrtm(sigma1 @ sigma2, disp=False)
67 Returns: 68 float: The Frechet Distance. 69 """ 70 assert mu1.shape == mu2.shape, 'Two mean vectors have different lengths' 71 assert sigma1.shape == sigma2.shape, ('Two covariances have different dimensions') 72 73 cov_sqrt, _ = linalg.sqrtm(sigma1 @ sigma2, disp=False) 74
93 Default: 96 (the official recommended value). 94 block_size_w (int): Width of the blocks in to which image is divided. 95 Default: 96 (the official recommended value). 96 """ 97 assert img.ndim == 2, ('Input image must be a gray or Y (of YCbCr) image with shape (h, w).') 98 # crop image 99 h, w = img.shape 100 num_block_h = math.floor(h / block_size_h)
24 Returns: 25 float: PSNR result. 26 """ 27 28 assert img.shape == img2.shape, (f'Image shapes are different: {img.shape}, {img2.shape}.') 29 if input_order not in ['HWC', 'CHW']: 30 raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are "HWC" and "CHW"') 31 img = reorder_image(img, input_order=input_order)
63 Returns: 64 float: PSNR result. 65 """ 66 67 assert img.shape == img2.shape, (f'Image shapes are different: {img.shape}, {img2.shape}.') 68 69 if crop_border != 0: 70 img = img[:, :, crop_border:-crop_border, crop_border:-crop_border]
105 Returns: 106 float: SSIM result. 107 """ 108 109 assert img.shape == img2.shape, (f'Image shapes are different: {img.shape}, {img2.shape}.') 110 if input_order not in ['HWC', 'CHW']: 111 raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are "HWC" and "CHW"') 112 img = reorder_image(img, input_order=input_order)
151 Returns: 152 float: SSIM result. 153 """ 154 155 assert img.shape == img2.shape, (f'Image shapes are different: {img.shape}, {img2.shape}.') 156 157 if crop_border != 0: 158 img = img[:, :, crop_border:-crop_border, crop_border:-crop_border]
209 save_path = os.path.join(self.opt['path']['models'], save_filename) 210 211 net = net if isinstance(net, list) else [net] 212 param_key = param_key if isinstance(param_key, list) else [param_key] 213 assert len(net) == len(param_key), 'The lengths of net and param_key should be the same.' 214 215 save_dict = {} 216 for net_, param_key_ in zip(net, param_key):
344 resume_state (dict): Resume state. 345 """ 346 resume_optimizers = resume_state['optimizers'] 347 resume_schedulers = resume_state['schedulers'] 348 assert len(resume_optimizers) == len(self.optimizers), 'Wrong lengths of optimizers' 349 assert len(resume_schedulers) == len(self.schedulers), 'Wrong lengths of schedulers' 350 for i, o in enumerate(resume_optimizers): 351 self.optimizers[i].load_state_dict(o)
345 """ 346 resume_optimizers = resume_state['optimizers'] 347 resume_schedulers = resume_state['schedulers'] 348 assert len(resume_optimizers) == len(self.optimizers), 'Wrong lengths of optimizers' 349 assert len(resume_schedulers) == len(self.schedulers), 'Wrong lengths of schedulers' 350 for i, o in enumerate(resume_optimizers): 351 self.optimizers[i].load_state_dict(o) 352 for i, s in enumerate(resume_schedulers):
20 self.milestones = Counter(milestones) 21 self.gamma = gamma 22 self.restarts = restarts 23 self.restart_weights = restart_weights 24 assert len(self.restarts) == len(self.restart_weights), 'restarts and their weights do not match.' 25 super(MultiStepRestartLR, self).__init__(optimizer, last_epoch) 26 27 def get_lr(self):
77 def __init__(self, optimizer, periods, restart_weights=(1, ), eta_min=0, last_epoch=-1): 78 self.periods = periods 79 self.restart_weights = restart_weights 80 self.eta_min = eta_min 81 assert (len(self.periods) == len( 82 self.restart_weights)), 'periods and restart_weights should have the same length.' 83 self.cumulative_period = [sum(self.periods[0:i + 1]) for i in range(0, len(self.periods))] 84 super(CosineAnnealingRestartLR, self).__init__(optimizer, last_epoch) 85
38 """ 39 # initialize 40 b, c, h, w = self.lq.size() 41 if not hasattr(self, 'queue_lr'): 42 assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}' 43 self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda() 44 _, c, h, w = self.gt.size() 45 self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda()
83 # ----------------------- The first degradation process ----------------------- # 84 # blur 85 out = filter2D(self.gt_usm, self.kernel1) 86 # random resize 87 updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0] 88 if updown_type == 'up': 89 scale = np.random.uniform(1, self.opt['resize_range'][1]) 90 elif updown_type == 'down':
90 elif updown_type == 'down': 91 scale = np.random.uniform(self.opt['resize_range'][0], 1) 92 else: 93 scale = 1 94 mode = random.choice(['area', 'bilinear', 'bicubic']) 95 out = F.interpolate(out, scale_factor=scale, mode=mode) 96 # add noise 97 gray_noise_prob = self.opt['gray_noise_prob']
114 # blur 115 if np.random.uniform() < self.opt['second_blur_prob']: 116 out = filter2D(out, self.kernel2) 117 # random resize 118 updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0] 119 if updown_type == 'up': 120 scale = np.random.uniform(1, self.opt['resize_range2'][1]) 121 elif updown_type == 'down':
121 elif updown_type == 'down': 122 scale = np.random.uniform(self.opt['resize_range2'][0], 1) 123 else: 124 scale = 1 125 mode = random.choice(['area', 'bilinear', 'bicubic']) 126 out = F.interpolate( 127 out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode) 128 # add noise
146 # 2. JPEG compression + [resize back + sinc filter] 147 # Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines. 148 if np.random.uniform() < 0.5: 149 # resize back + the final sinc filter 150 mode = random.choice(['area', 'bilinear', 'bicubic']) 151 out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) 152 out = filter2D(out, self.sinc_kernel) 153 # JPEG compression
159 jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) 160 out = torch.clamp(out, 0, 1) 161 out = self.jpeger(out, quality=jpeg_p) 162 # resize back + the final sinc filter 163 mode = random.choice(['area', 'bilinear', 'bicubic']) 164 out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) 165 out = filter2D(out, self.sinc_kernel) 166
37 """ 38 # initialize 39 b, c, h, w = self.lq.size() 40 if not hasattr(self, 'queue_lr'): 41 assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}' 42 self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda() 43 _, c, h, w = self.gt.size() 44 self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda()
84 # ----------------------- The first degradation process ----------------------- # 85 # blur 86 out = filter2D(self.gt, self.kernel1) 87 # random resize 88 updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0] 89 if updown_type == 'up': 90 scale = np.random.uniform(1, self.opt['resize_range'][1]) 91 elif updown_type == 'down':
91 elif updown_type == 'down': 92 scale = np.random.uniform(self.opt['resize_range'][0], 1) 93 else: 94 scale = 1 95 mode = random.choice(['area', 'bilinear', 'bicubic']) 96 out = F.interpolate(out, scale_factor=scale, mode=mode) 97 # add noise 98 gray_noise_prob = self.opt['gray_noise_prob']
115 # blur 116 if np.random.uniform() < self.opt['second_blur_prob']: 117 out = filter2D(out, self.kernel2) 118 # random resize 119 updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0] 120 if updown_type == 'up': 121 scale = np.random.uniform(1, self.opt['resize_range2'][1]) 122 elif updown_type == 'down':
122 elif updown_type == 'down': 123 scale = np.random.uniform(self.opt['resize_range2'][0], 1) 124 else: 125 scale = 1 126 mode = random.choice(['area', 'bilinear', 'bicubic']) 127 out = F.interpolate( 128 out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode) 129 # add noise
147 # 2. JPEG compression + [resize back + sinc filter] 148 # Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines. 149 if np.random.uniform() < 0.5: 150 # resize back + the final sinc filter 151 mode = random.choice(['area', 'bilinear', 'bicubic']) 152 out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) 153 out = filter2D(out, self.sinc_kernel) 154 # JPEG compression
160 jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) 161 out = torch.clamp(out, 0, 1) 162 out = self.jpeger(out, quality=jpeg_p) 163 # resize back + the final sinc filter 164 mode = random.choice(['area', 'bilinear', 'bicubic']) 165 out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) 166 out = filter2D(out, self.sinc_kernel) 167
176 noises = torch.randn(num_noise, batch, self.num_style_feat, device=self.device).unbind(0) 177 return noises 178 179 def mixing_noise(self, batch, prob): 180 if random.random() < prob: 181 return self.make_noise(batch, 2) 182 else: 183 return [self.make_noise(batch, 1)]
262 if self.opt['rank'] == 0: 263 self.nondist_validation(dataloader, current_iter, tb_logger, save_img) 264 265 def nondist_validation(self, dataloader, current_iter, tb_logger, save_img): 266 assert dataloader is None, 'Validation dataloader should be None.' 267 self.test() 268 result = tensor2img(self.output, min_max=(-1, 1)) 269 if self.opt['is_train']:
61 if not input.is_cuda: 62 raise NotImplementedError 63 else: 64 cur_im2col_step = min(ctx.im2col_step, input.shape[0]) 65 assert (input.shape[0] % cur_im2col_step) == 0, 'im2col step must divide batchsize' 66 deform_conv_ext.deform_conv_forward(input, weight, 67 offset, output, ctx.bufs_[0], ctx.bufs_[1], weight.size(3), 68 weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1],
80 if not grad_output.is_cuda: 81 raise NotImplementedError 82 else: 83 cur_im2col_step = min(ctx.im2col_step, input.shape[0]) 84 assert (input.shape[0] % cur_im2col_step) == 0, 'im2col step must divide batchsize' 85 86 if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: 87 grad_input = torch.zeros_like(input)
201 deformable_groups=1, 202 bias=False): 203 super(DeformConv, self).__init__() 204 205 assert not bias 206 assert in_channels % groups == 0, f'in_channels {in_channels} is not divisible by groups {groups}' 207 assert out_channels % groups == 0, f'out_channels {out_channels} is not divisible by groups {groups}' 208
202 bias=False): 203 super(DeformConv, self).__init__() 204 205 assert not bias 206 assert in_channels % groups == 0, f'in_channels {in_channels} is not divisible by groups {groups}' 207 assert out_channels % groups == 0, f'out_channels {out_channels} is not divisible by groups {groups}' 208 209 self.in_channels = in_channels
203 super(DeformConv, self).__init__() 204 205 assert not bias 206 assert in_channels % groups == 0, f'in_channels {in_channels} is not divisible by groups {groups}' 207 assert out_channels % groups == 0, f'out_channels {out_channels} is not divisible by groups {groups}' 208 209 self.in_channels = in_channels 210 self.out_channels = out_channels
17 def init_tb_loggers(opt): 18 # initialize wandb logger before tensorboard logger to allow proper sync 19 if (opt['logger'].get('wandb') is not None) and (opt['logger']['wandb'].get('project') 20 is not None) and ('debug' not in opt['name']): 21 assert opt['logger'].get('use_tb_logger') is True, ('should turn on tensorboard when using wandb') 22 init_wandb_logger(opt) 23 tb_logger = None 24 if opt['logger'].get('use_tb_logger') and 'debug' not in opt['name']:
1 # Modified from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/dist_utils.py # noqa: E501 2 import functools 3 import os 4 import subprocess 5 import torch 6 import torch.distributed as dist 7 import torch.multiprocessing as mp 8
40 ntasks = int(os.environ['SLURM_NTASKS']) 41 node_list = os.environ['SLURM_NODELIST'] 42 num_gpus = torch.cuda.device_count() 43 torch.cuda.set_device(proc_id % num_gpus) 44 addr = subprocess.getoutput(f'scontrol show hostname {node_list} | head -n1') 45 # specify master port 46 if port is not None: 47 os.environ['MASTER_PORT'] = str(port)
103 if isinstance(db_paths, list): 104 self.db_paths = [str(v) for v in db_paths] 105 elif isinstance(db_paths, str): 106 self.db_paths = [str(db_paths)] 107 assert len(client_keys) == len(self.db_paths), ('client_keys and db_paths should have the same length, ' 108 f'but received {len(client_keys)} and {len(self.db_paths)}.') 109 110 self._client = {} 111 for client, path in zip(client_keys, self.db_paths):
118 filepath (str | obj:`Path`): Here, filepath is the lmdb key. 119 client_key (str): Used for distinguishing different lmdb envs. 120 """ 121 filepath = str(filepath) 122 assert client_key in self._client, (f'client_key {client_key} is not in lmdb clients.') 123 client = self._client[client_key] 124 with client.begin(write=False) as txn: 125 value_buf = txn.get(filepath.encode('ascii'))
17 Returns: 18 ndarray: Optical flow represented as a (h, w, 2) numpy array 19 """ 20 if quantize: 21 assert concat_axis in [0, 1] 22 cat_flow = cv2.imread(flow_path, cv2.IMREAD_UNCHANGED) 23 if cat_flow.ndim != 2: 24 raise IOError(f'{flow_path} is not a valid quantized flow file, its dimension is {cat_flow.ndim}.')
21 assert concat_axis in [0, 1] 22 cat_flow = cv2.imread(flow_path, cv2.IMREAD_UNCHANGED) 23 if cat_flow.ndim != 2: 24 raise IOError(f'{flow_path} is not a valid quantized flow file, its dimension is {cat_flow.ndim}.') 25 assert cat_flow.shape[concat_axis] % 2 == 0 26 dx, dy = np.split(cat_flow, 2, axis=concat_axis) 27 flow = dequantize_flow(dx, dy, *args, **kwargs) 28 else:
65 flow = flow.astype(np.float32) 66 flow.tofile(f) 67 f.flush() 68 else: 69 assert concat_axis in [0, 1] 70 dx, dy = quantize_flow(flow, *args, **kwargs) 71 dxdy = np.concatenate((dx, dy), axis=concat_axis) 72 os.makedirs(os.path.dirname(filename), exist_ok=True)
110 111 Returns: 112 ndarray: Dequantized flow. 113 """ 114 assert dx.shape == dy.shape 115 assert dx.ndim == 2 or (dx.ndim == 3 and dx.shape[-1] == 1) 116 117 dx, dy = [dequantize(d, -max_val, max_val, 255) for d in [dx, dy]]
111 Returns: 112 ndarray: Dequantized flow. 113 """ 114 assert dx.shape == dy.shape 115 assert dx.ndim == 2 or (dx.ndim == 3 and dx.shape[-1] == 1) 116 117 dx, dy = [dequantize(d, -max_val, max_val, 255) for d in [dx, dy]] 118
57 map_size (int | None): Map size for lmdb env. If None, use the 58 estimated size from images. Default: None 59 """ 60 61 assert len(img_path_list) == len(keys), ('img_path_list and keys should have the same length, ' 62 f'but got {len(img_path_list)} and {len(keys)}') 63 print(f'Create lmdb for {data_path}, save to {lmdb_path}...') 64 print(f'Totoal images: {len(img_path_list)}') 65 if not lmdb_path.endswith('.lmdb'):
73 elif value.replace('.', '', 1).isdigit() and value.count('.') < 2: 74 return float(value) 75 # list 76 if value.startswith('['): 77 return eval(value) 78 # str 79 return value 80
91 args = parser.parse_args() 92 93 # parse yml to dict 94 with open(args.opt, mode='r') as f: 95 opt = yaml.load(f, Loader=ordered_yaml()[0]) 96 97 # distributed settings 98 if args.launcher == 'none':
108 109 # random seed 110 seed = opt.get('manual_seed') 111 if seed is None: 112 seed = random.randint(1, 10000) 113 opt['manual_seed'] = seed 114 set_random_seed(seed + opt['rank']) 115
124 for key in keys.split(':'): 125 eval_str += f'["{key}"]' 126 eval_str += '=value' 127 # using exec function 128 exec(eval_str) 129 130 opt['auto_resume'] = args.auto_resume 131 opt['is_train'] = is_train
38 def _do_register(self, name, obj, suffix=None): 39 if isinstance(suffix, str): 40 name = name + '_' + suffix 41 42 assert (name not in self._obj_map), (f"An object named '{name}' was already registered " 43 f"in '{self._name}' registry!") 44 self._obj_map[name] = obj 45 46 def register(self, obj=None, suffix=None):
36 class SSH(nn.Module): 37 38 def __init__(self, in_channel, out_channel): 39 super(SSH, self).__init__() 40 assert out_channel % 4 == 0 41 leaky = 0 42 if (out_channel <= 64): 43 leaky = 0.1
184 no = na * (nc + 5) # number of outputs = anchors * (classes + 5) 185 186 layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out 187 for i, (f, n, m, args) in enumerate(d["backbone"] + d["head"]): # from, number, module, args 188 m = eval(m) if isinstance(m, str) else m # eval strings 189 for j, a in enumerate(args): 190 try: 191 args[j] = eval(a) if isinstance(a, str) else a # eval strings
187 for i, (f, n, m, args) in enumerate(d["backbone"] + d["head"]): # from, number, module, args 188 m = eval(m) if isinstance(m, str) else m # eval strings 189 for j, a in enumerate(args): 190 try: 191 args[j] = eval(a) if isinstance(a, str) else a # eval strings 192 except: 193 pass 194
188 m = eval(m) if isinstance(m, str) else m # eval strings 189 for j, a in enumerate(args): 190 try: 191 args[j] = eval(a) if isinstance(a, str) else a # eval strings 192 except: 193 pass 194 195 n = max(round(n * gd), 1) if n > 1 else n # depth gain 196 if m in [
29 self.norm = nn.LayerNorm(normalize_shape) 30 elif norm_type == 'none': 31 self.norm = lambda x: x * 1.0 32 else: 33 assert 1 == 0, f'Norm type {norm_type} not support.' 34 35 def forward(self, x, ref=None): 36 if self.norm_type == 'spade':
64 self.func = nn.SELU(True) 65 elif relu_type == 'none': 66 self.func = lambda x: x * 1.0 67 else: 68 assert 1 == 0, f'Relu type {relu_type} not support.' 69 70 def forward(self, x): 71 return self.func(x)
61 self.template_3points = template_3points # improve robustness 62 self.upscale_factor = upscale_factor 63 # the cropped face ratio based on the square face 64 self.crop_ratio = crop_ratio # (h, w) 65 assert (self.crop_ratio[0] >= 1 and self.crop_ratio[1] >= 1), 'crop ration only supports >=1' 66 self.face_size = (int(face_size * self.crop_ratio[1]), int(face_size * self.crop_ratio[0])) 67 68 if self.template_3points:
253 def align_warp_face(self, save_cropped_path=None, border_mode='constant'): 254 """Align and warp faces with face template. 255 """ 256 if self.pad_blur: 257 assert len(self.pad_input_imgs) == len( 258 self.all_landmarks_5), f'Mismatched samples: {len(self.pad_input_imgs)} and {len(self.all_landmarks_5)}' 259 for idx, landmark in enumerate(self.all_landmarks_5): 260 # use 5 landmarks to get affine matrix 261 # use cv2.LMEDS method for the equivalence to skimage transform
309 upsample_img = cv2.resize(self.input_img, (w_up, h_up), interpolation=cv2.INTER_LINEAR) 310 else: 311 upsample_img = cv2.resize(upsample_img, (w_up, h_up), interpolation=cv2.INTER_LANCZOS4) 312 313 assert len(self.restored_faces) == len( 314 self.inverse_affine_matrices), ('length of restored_faces and affine_matrices are different.') 315 316 inv_mask_borders = [] 317 for restored_face, inverse_affine in zip(self.restored_faces, self.inverse_affine_matrices):
55 self.models[model.taskname] = model 56 else: 57 print('duplicated model task type, ignore:', onnx_file, model.taskname) 58 del model 59 assert 'detection' in self.models 60 self.det_model = self.models['detection'] 61 62
61 62 63 def patched_faceanalysis_prepare(self, ctx_id, det_thresh=0.5, det_size=(640, 640)): 64 self.det_thresh = det_thresh 65 assert det_size is not None 66 self.det_size = det_size 67 for taskname, model in self.models.items(): 68 if taskname == 'detection':
89 output_names = [] 90 for out in outputs: 91 output_names.append(out.name) 92 self.output_names = output_names 93 assert len(self.output_names) == 1 94 input_cfg = inputs[0] 95 input_shape = input_cfg.shape 96 self.input_shape = input_shape
109 return torch.stack([r, g, b], dim=3) 110 111 112 def download(url, path, name): 113 request = urllib.request.urlopen(url) 114 total = int(request.headers.get('Content-Length', 0)) 115 with tqdm(total=total, desc=f'[ReActor] Downloading {name} to {path}', unit='B', unit_scale=True, unit_divisor=1024) as progress: 116 urllib.request.urlretrieve(url, path, reporthook=lambda count, block_size, total_size: progress.update(block_size))
112 def download(url, path, name): 113 request = urllib.request.urlopen(url) 114 total = int(request.headers.get('Content-Length', 0)) 115 with tqdm(total=total, desc=f'[ReActor] Downloading {name} to {path}', unit='B', unit_scale=True, unit_divisor=1024) as progress: 116 urllib.request.urlretrieve(url, path, reporthook=lambda count, block_size, total_size: progress.update(block_size)) 117 118 119 def move_path(old_path, new_path):
147 setattr(logging, methodName, logToRoot) 148 149 150 def get_image_md5hash(image: Image.Image): 151 md5hash = hashlib.md5(image.tobytes()) 152 return md5hash.hexdigest() 153 154
18 eps (float): A small value added to the variance to avoid 19 divide-by-zero. Default: 1e-5. 20 """ 21 size = feat.size() 22 assert len(size) == 4, 'The input feature should be 4D tensor.' 23 b, c = size[:2] 24 feat_var = feat.view(b, c, -1).var(dim=2) + eps 25 feat_std = feat_var.sqrt().view(b, c, 1, 1)
106 '__builtins__': {}, 107 '__name__': f'namedtuple_{typename}', 108 } 109 code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))' 110 __new__ = eval(code, namespace) 111 __new__.__name__ = '__new__' 112 __new__.__doc__ = f'Create new instance of {typename}({arg_list})' 113 if defaults is not None: