5 # push (backup): cog push r8.im/xinntao/gfpgan 6 7 import os 8 9 os.system('python setup.py develop') 10 os.system('pip install realesrgan') 11 12 import cv2
5 # push (backup): cog push r8.im/xinntao/gfpgan 6 7 import os 8 9 os.system('python setup.py develop') 10 os.system('pip install realesrgan') 11 12 import cv2
6 7 import os 8 9 os.system('python setup.py develop') 10 os.system('pip install realesrgan') 11 12 import cv2 13 import shutil
6 7 import os 8 9 os.system('python setup.py develop') 10 os.system('pip install realesrgan') 11 12 import cv2 13 import shutil
29 def setup(self): 30 os.makedirs('output', exist_ok=True) 31 # download weights 32 if not os.path.exists('gfpgan/weights/realesr-general-x4v3.pth'): 33 os.system( 34 'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth -P ./gfpgan/weights' 35 ) 36 if not os.path.exists('gfpgan/weights/GFPGANv1.2.pth'): 37 os.system( 38 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.2.pth -P ./gfpgan/weights')
29 def setup(self): 30 os.makedirs('output', exist_ok=True) 31 # download weights 32 if not os.path.exists('gfpgan/weights/realesr-general-x4v3.pth'): 33 os.system( 34 'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth -P ./gfpgan/weights' 35 ) 36 if not os.path.exists('gfpgan/weights/GFPGANv1.2.pth'): 37 os.system( 38 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.2.pth -P ./gfpgan/weights')
33 os.system( 34 'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth -P ./gfpgan/weights' 35 ) 36 if not os.path.exists('gfpgan/weights/GFPGANv1.2.pth'): 37 os.system( 38 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.2.pth -P ./gfpgan/weights') 39 if not os.path.exists('gfpgan/weights/GFPGANv1.3.pth'): 40 os.system( 41 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth -P ./gfpgan/weights')
33 os.system( 34 'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth -P ./gfpgan/weights' 35 ) 36 if not os.path.exists('gfpgan/weights/GFPGANv1.2.pth'): 37 os.system( 38 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.2.pth -P ./gfpgan/weights') 39 if not os.path.exists('gfpgan/weights/GFPGANv1.3.pth'): 40 os.system( 41 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth -P ./gfpgan/weights')
36 if not os.path.exists('gfpgan/weights/GFPGANv1.2.pth'): 37 os.system( 38 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.2.pth -P ./gfpgan/weights') 39 if not os.path.exists('gfpgan/weights/GFPGANv1.3.pth'): 40 os.system( 41 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth -P ./gfpgan/weights') 42 if not os.path.exists('gfpgan/weights/GFPGANv1.4.pth'): 43 os.system( 44 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P ./gfpgan/weights')
36 if not os.path.exists('gfpgan/weights/GFPGANv1.2.pth'): 37 os.system( 38 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.2.pth -P ./gfpgan/weights') 39 if not os.path.exists('gfpgan/weights/GFPGANv1.3.pth'): 40 os.system( 41 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth -P ./gfpgan/weights') 42 if not os.path.exists('gfpgan/weights/GFPGANv1.4.pth'): 43 os.system( 44 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P ./gfpgan/weights')
39 if not os.path.exists('gfpgan/weights/GFPGANv1.3.pth'): 40 os.system( 41 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth -P ./gfpgan/weights') 42 if not os.path.exists('gfpgan/weights/GFPGANv1.4.pth'): 43 os.system( 44 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P ./gfpgan/weights') 45 if not os.path.exists('gfpgan/weights/RestoreFormer.pth'): 46 os.system( 47 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth -P ./gfpgan/weights'
39 if not os.path.exists('gfpgan/weights/GFPGANv1.3.pth'): 40 os.system( 41 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth -P ./gfpgan/weights') 42 if not os.path.exists('gfpgan/weights/GFPGANv1.4.pth'): 43 os.system( 44 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P ./gfpgan/weights') 45 if not os.path.exists('gfpgan/weights/RestoreFormer.pth'): 46 os.system( 47 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth -P ./gfpgan/weights'
42 if not os.path.exists('gfpgan/weights/GFPGANv1.4.pth'): 43 os.system( 44 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P ./gfpgan/weights') 45 if not os.path.exists('gfpgan/weights/RestoreFormer.pth'): 46 os.system( 47 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth -P ./gfpgan/weights' 48 ) 49 50 # background enhancer with RealESRGAN 51 model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
42 if not os.path.exists('gfpgan/weights/GFPGANv1.4.pth'): 43 os.system( 44 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P ./gfpgan/weights') 45 if not os.path.exists('gfpgan/weights/RestoreFormer.pth'): 46 os.system( 47 'wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth -P ./gfpgan/weights' 48 ) 49 50 # background enhancer with RealESRGAN 51 model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
90 else: # used for encoder with different latent code for each layer 91 latent = styles[0] 92 elif len(styles) == 2: # mixing noises 93 if inject_index is None: 94 inject_index = random.randint(1, self.num_latent - 1) 95 latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) 96 latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) 97 latent = torch.cat([latent1, latent2], 1)
91 else: # used for encoder with different latent code for each layer 92 latent = styles[0] 93 elif len(styles) == 2: # mixing noises 94 if inject_index is None: 95 inject_index = random.randint(1, self.num_latent - 1) 96 latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) 97 latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) 98 latent = torch.cat([latent1, latent2], 1)
79 else: # used for encoder with different latent code for each layer 80 latent = styles[0] 81 elif len(styles) == 2: # mixing noises 82 if inject_index is None: 83 inject_index = random.randint(1, self.num_latent - 1) 84 latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) 85 latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) 86 latent = torch.cat([latent1, latent2], 1)
200 super().__init__() 201 self.in_channels = in_channels 202 self.head_size = head_size 203 self.att_size = in_channels // head_size 204 assert (in_channels % head_size == 0), 'The size of head should be divided by the number of channels.' 205 206 self.norm1 = Normalize(in_channels) 207 self.norm2 = Normalize(in_channels)
435 else: # used for encoder with different latent code for each layer 436 latent = styles[0] 437 elif len(styles) == 2: # mixing noises 438 if inject_index is None: 439 inject_index = random.randint(1, self.num_latent - 1) 440 latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) 441 latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) 442 latent = torch.cat([latent1, latent2], 1)
341 else: # used for encoder with different latent code for each layer 342 latent = styles[0] 343 elif len(styles) == 2: # mixing noises 344 if inject_index is None: 345 inject_index = random.randint(1, self.num_latent - 1) 346 latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) 347 latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) 348 latent = torch.cat([latent1, latent2], 1)
2 3 from setuptools import find_packages, setup 4 5 import os 6 import subprocess 7 import time 8 9 version_file = 'gfpgan/version.py'
27 # LANGUAGE is used on win32 28 env['LANGUAGE'] = 'C' 29 env['LANG'] = 'C' 30 env['LC_ALL'] = 'C' 31 out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0] 32 return out 33 34 try:
67 68 69 def get_version(): 70 with open(version_file, 'r') as f: 71 exec(compile(f.read(), version_file, 'exec')) 72 return locals()['__version__'] 73 74
10 if torch.cuda.is_available(): 11 net = ResNetArcFace(block='IRBlock', layers=(2, 2, 2, 2), use_se=True).cuda().eval() 12 img = torch.rand((1, 1, 128, 128), dtype=torch.float32).cuda() 13 output = net(img) 14 assert output.shape == (1, 512) 15 16 # -------------------- without SE block ----------------------- # 17 net = ResNetArcFace(block='IRBlock', layers=(2, 2, 2, 2), use_se=False).cuda().eval()
15 16 # -------------------- without SE block ----------------------- # 17 net = ResNetArcFace(block='IRBlock', layers=(2, 2, 2, 2), use_se=False).cuda().eval() 18 output = net(img) 19 assert output.shape == (1, 512) 20 21 22 def test_basicblock():
23 """Test the BasicBlock in arcface_arch""" 24 block = BasicBlock(1, 3, stride=1, downsample=None).cuda() 25 img = torch.rand((1, 1, 12, 12), dtype=torch.float32).cuda() 26 output = block(img) 27 assert output.shape == (1, 3, 12, 12) 28 29 # ----------------- use the downsmaple module--------------- # 30 downsample = torch.nn.UpsamplingNearest2d(scale_factor=0.5).cuda()
30 downsample = torch.nn.UpsamplingNearest2d(scale_factor=0.5).cuda() 31 block = BasicBlock(1, 3, stride=2, downsample=downsample).cuda() 32 img = torch.rand((1, 1, 12, 12), dtype=torch.float32).cuda() 33 output = block(img) 34 assert output.shape == (1, 3, 6, 6) 35 36 37 def test_bottleneck():
38 """Test the Bottleneck in arcface_arch""" 39 block = Bottleneck(1, 1, stride=1, downsample=None).cuda() 40 img = torch.rand((1, 1, 12, 12), dtype=torch.float32).cuda() 41 output = block(img) 42 assert output.shape == (1, 4, 12, 12) 43 44 # ----------------- use the downsmaple module--------------- # 45 downsample = torch.nn.UpsamplingNearest2d(scale_factor=0.5).cuda()
45 downsample = torch.nn.UpsamplingNearest2d(scale_factor=0.5).cuda() 46 block = Bottleneck(1, 1, stride=2, downsample=downsample).cuda() 47 img = torch.rand((1, 1, 12, 12), dtype=torch.float32).cuda() 48 output = block(img) 49 assert output.shape == (1, 4, 6, 6)
6 7 def test_ffhq_degradation_dataset(): 8 9 with open('tests/data/test_ffhq_degradation_dataset.yml', mode='r') as f: 10 opt = yaml.load(f, Loader=yaml.FullLoader) 11 12 dataset = FFHQDegradationDataset(opt) 13 assert dataset.io_backend_opt['type'] == 'disk' # io backend
9 with open('tests/data/test_ffhq_degradation_dataset.yml', mode='r') as f: 10 opt = yaml.load(f, Loader=yaml.FullLoader) 11 12 dataset = FFHQDegradationDataset(opt) 13 assert dataset.io_backend_opt['type'] == 'disk' # io backend 14 assert len(dataset) == 1 # whether to read correct meta info 15 assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations 16 assert dataset.color_jitter_prob == 1
10 opt = yaml.load(f, Loader=yaml.FullLoader) 11 12 dataset = FFHQDegradationDataset(opt) 13 assert dataset.io_backend_opt['type'] == 'disk' # io backend 14 assert len(dataset) == 1 # whether to read correct meta info 15 assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations 16 assert dataset.color_jitter_prob == 1 17
11 12 dataset = FFHQDegradationDataset(opt) 13 assert dataset.io_backend_opt['type'] == 'disk' # io backend 14 assert len(dataset) == 1 # whether to read correct meta info 15 assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations 16 assert dataset.color_jitter_prob == 1 17 18 # test __getitem__
12 dataset = FFHQDegradationDataset(opt) 13 assert dataset.io_backend_opt['type'] == 'disk' # io backend 14 assert len(dataset) == 1 # whether to read correct meta info 15 assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations 16 assert dataset.color_jitter_prob == 1 17 18 # test __getitem__ 19 result = dataset.__getitem__(0)
18 # test __getitem__ 19 result = dataset.__getitem__(0) 20 # check returned keys 21 expected_keys = ['gt', 'lq', 'gt_path'] 22 assert set(expected_keys).issubset(set(result.keys())) 23 # check shape and contents 24 assert result['gt'].shape == (3, 512, 512) 25 assert result['lq'].shape == (3, 512, 512)
20 # check returned keys 21 expected_keys = ['gt', 'lq', 'gt_path'] 22 assert set(expected_keys).issubset(set(result.keys())) 23 # check shape and contents 24 assert result['gt'].shape == (3, 512, 512) 25 assert result['lq'].shape == (3, 512, 512) 26 assert result['gt_path'] == 'tests/data/gt/00000000.png' 27
21 expected_keys = ['gt', 'lq', 'gt_path'] 22 assert set(expected_keys).issubset(set(result.keys())) 23 # check shape and contents 24 assert result['gt'].shape == (3, 512, 512) 25 assert result['lq'].shape == (3, 512, 512) 26 assert result['gt_path'] == 'tests/data/gt/00000000.png' 27 28 # ------------------ test with probability = 0 -------------------- #
22 assert set(expected_keys).issubset(set(result.keys())) 23 # check shape and contents 24 assert result['gt'].shape == (3, 512, 512) 25 assert result['lq'].shape == (3, 512, 512) 26 assert result['gt_path'] == 'tests/data/gt/00000000.png' 27 28 # ------------------ test with probability = 0 -------------------- # 29 opt['color_jitter_prob'] = 0
30 opt['color_jitter_pt_prob'] = 0 31 opt['gray_prob'] = 0 32 opt['io_backend'] = dict(type='disk') 33 dataset = FFHQDegradationDataset(opt) 34 assert dataset.io_backend_opt['type'] == 'disk' # io backend 35 assert len(dataset) == 1 # whether to read correct meta info 36 assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations 37 assert dataset.color_jitter_prob == 0
31 opt['gray_prob'] = 0 32 opt['io_backend'] = dict(type='disk') 33 dataset = FFHQDegradationDataset(opt) 34 assert dataset.io_backend_opt['type'] == 'disk' # io backend 35 assert len(dataset) == 1 # whether to read correct meta info 36 assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations 37 assert dataset.color_jitter_prob == 0 38
32 opt['io_backend'] = dict(type='disk') 33 dataset = FFHQDegradationDataset(opt) 34 assert dataset.io_backend_opt['type'] == 'disk' # io backend 35 assert len(dataset) == 1 # whether to read correct meta info 36 assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations 37 assert dataset.color_jitter_prob == 0 38 39 # test __getitem__
33 dataset = FFHQDegradationDataset(opt) 34 assert dataset.io_backend_opt['type'] == 'disk' # io backend 35 assert len(dataset) == 1 # whether to read correct meta info 36 assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations 37 assert dataset.color_jitter_prob == 0 38 39 # test __getitem__ 40 result = dataset.__getitem__(0)
39 # test __getitem__ 40 result = dataset.__getitem__(0) 41 # check returned keys 42 expected_keys = ['gt', 'lq', 'gt_path'] 43 assert set(expected_keys).issubset(set(result.keys())) 44 # check shape and contents 45 assert result['gt'].shape == (3, 512, 512) 46 assert result['lq'].shape == (3, 512, 512)
41 # check returned keys 42 expected_keys = ['gt', 'lq', 'gt_path'] 43 assert set(expected_keys).issubset(set(result.keys())) 44 # check shape and contents 45 assert result['gt'].shape == (3, 512, 512) 46 assert result['lq'].shape == (3, 512, 512) 47 assert result['gt_path'] == 'tests/data/gt/00000000.png' 48
42 expected_keys = ['gt', 'lq', 'gt_path'] 43 assert set(expected_keys).issubset(set(result.keys())) 44 # check shape and contents 45 assert result['gt'].shape == (3, 512, 512) 46 assert result['lq'].shape == (3, 512, 512) 47 assert result['gt_path'] == 'tests/data/gt/00000000.png' 48 49 # ------------------ test lmdb backend -------------------- #
43 assert set(expected_keys).issubset(set(result.keys())) 44 # check shape and contents 45 assert result['gt'].shape == (3, 512, 512) 46 assert result['lq'].shape == (3, 512, 512) 47 assert result['gt_path'] == 'tests/data/gt/00000000.png' 48 49 # ------------------ test lmdb backend -------------------- # 50 opt['dataroot_gt'] = 'tests/data/ffhq_gt.lmdb'
50 opt['dataroot_gt'] = 'tests/data/ffhq_gt.lmdb' 51 opt['io_backend'] = dict(type='lmdb') 52 53 dataset = FFHQDegradationDataset(opt) 54 assert dataset.io_backend_opt['type'] == 'lmdb' # io backend 55 assert len(dataset) == 1 # whether to read correct meta info 56 assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations 57 assert dataset.color_jitter_prob == 0
51 opt['io_backend'] = dict(type='lmdb') 52 53 dataset = FFHQDegradationDataset(opt) 54 assert dataset.io_backend_opt['type'] == 'lmdb' # io backend 55 assert len(dataset) == 1 # whether to read correct meta info 56 assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations 57 assert dataset.color_jitter_prob == 0 58
52 53 dataset = FFHQDegradationDataset(opt) 54 assert dataset.io_backend_opt['type'] == 'lmdb' # io backend 55 assert len(dataset) == 1 # whether to read correct meta info 56 assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations 57 assert dataset.color_jitter_prob == 0 58 59 # test __getitem__
53 dataset = FFHQDegradationDataset(opt) 54 assert dataset.io_backend_opt['type'] == 'lmdb' # io backend 55 assert len(dataset) == 1 # whether to read correct meta info 56 assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations 57 assert dataset.color_jitter_prob == 0 58 59 # test __getitem__ 60 result = dataset.__getitem__(0)
59 # test __getitem__ 60 result = dataset.__getitem__(0) 61 # check returned keys 62 expected_keys = ['gt', 'lq', 'gt_path'] 63 assert set(expected_keys).issubset(set(result.keys())) 64 # check shape and contents 65 assert result['gt'].shape == (3, 512, 512) 66 assert result['lq'].shape == (3, 512, 512)
61 # check returned keys 62 expected_keys = ['gt', 'lq', 'gt_path'] 63 assert set(expected_keys).issubset(set(result.keys())) 64 # check shape and contents 65 assert result['gt'].shape == (3, 512, 512) 66 assert result['lq'].shape == (3, 512, 512) 67 assert result['gt_path'] == '00000000' 68
62 expected_keys = ['gt', 'lq', 'gt_path'] 63 assert set(expected_keys).issubset(set(result.keys())) 64 # check shape and contents 65 assert result['gt'].shape == (3, 512, 512) 66 assert result['lq'].shape == (3, 512, 512) 67 assert result['gt_path'] == '00000000' 68 69 # ------------------ test with crop_components -------------------- #
63 assert set(expected_keys).issubset(set(result.keys())) 64 # check shape and contents 65 assert result['gt'].shape == (3, 512, 512) 66 assert result['lq'].shape == (3, 512, 512) 67 assert result['gt_path'] == '00000000' 68 69 # ------------------ test with crop_components -------------------- # 70 opt['crop_components'] = True
73 opt['gt_gray'] = True 74 opt['io_backend'] = dict(type='lmdb') 75 76 dataset = FFHQDegradationDataset(opt) 77 assert dataset.crop_components is True 78 79 # test __getitem__ 80 result = dataset.__getitem__(0)
79 # test __getitem__ 80 result = dataset.__getitem__(0) 81 # check returned keys 82 expected_keys = ['gt', 'lq', 'gt_path', 'loc_left_eye', 'loc_right_eye', 'loc_mouth'] 83 assert set(expected_keys).issubset(set(result.keys())) 84 # check shape and contents 85 assert result['gt'].shape == (3, 512, 512) 86 assert result['lq'].shape == (3, 512, 512)
81 # check returned keys 82 expected_keys = ['gt', 'lq', 'gt_path', 'loc_left_eye', 'loc_right_eye', 'loc_mouth'] 83 assert set(expected_keys).issubset(set(result.keys())) 84 # check shape and contents 85 assert result['gt'].shape == (3, 512, 512) 86 assert result['lq'].shape == (3, 512, 512) 87 assert result['gt_path'] == '00000000' 88 assert result['loc_left_eye'].shape == (4, )
82 expected_keys = ['gt', 'lq', 'gt_path', 'loc_left_eye', 'loc_right_eye', 'loc_mouth'] 83 assert set(expected_keys).issubset(set(result.keys())) 84 # check shape and contents 85 assert result['gt'].shape == (3, 512, 512) 86 assert result['lq'].shape == (3, 512, 512) 87 assert result['gt_path'] == '00000000' 88 assert result['loc_left_eye'].shape == (4, ) 89 assert result['loc_right_eye'].shape == (4, )
83 assert set(expected_keys).issubset(set(result.keys())) 84 # check shape and contents 85 assert result['gt'].shape == (3, 512, 512) 86 assert result['lq'].shape == (3, 512, 512) 87 assert result['gt_path'] == '00000000' 88 assert result['loc_left_eye'].shape == (4, ) 89 assert result['loc_right_eye'].shape == (4, ) 90 assert result['loc_mouth'].shape == (4, )
84 # check shape and contents 85 assert result['gt'].shape == (3, 512, 512) 86 assert result['lq'].shape == (3, 512, 512) 87 assert result['gt_path'] == '00000000' 88 assert result['loc_left_eye'].shape == (4, ) 89 assert result['loc_right_eye'].shape == (4, ) 90 assert result['loc_mouth'].shape == (4, ) 91
85 assert result['gt'].shape == (3, 512, 512) 86 assert result['lq'].shape == (3, 512, 512) 87 assert result['gt_path'] == '00000000' 88 assert result['loc_left_eye'].shape == (4, ) 89 assert result['loc_right_eye'].shape == (4, ) 90 assert result['loc_mouth'].shape == (4, ) 91 92 # ------------------ lmdb backend should have paths ends with lmdb -------------------- #
86 assert result['lq'].shape == (3, 512, 512) 87 assert result['gt_path'] == '00000000' 88 assert result['loc_left_eye'].shape == (4, ) 89 assert result['loc_right_eye'].shape == (4, ) 90 assert result['loc_mouth'].shape == (4, ) 91 92 # ------------------ lmdb backend should have paths ends with lmdb -------------------- # 93 with pytest.raises(ValueError):
23 condition2 = torch.rand((1, 512, 16, 16), dtype=torch.float32).cuda() 24 condition3 = torch.rand((1, 512, 32, 32), dtype=torch.float32).cuda() 25 conditions = [condition1, condition1, condition2, condition2, condition3, condition3] 26 output = net([style], conditions) 27 assert output[0].shape == (1, 3, 32, 32) 28 assert output[1] is None 29 30 # -------------------- with return_latents ----------------------- #
24 condition3 = torch.rand((1, 512, 32, 32), dtype=torch.float32).cuda() 25 conditions = [condition1, condition1, condition2, condition2, condition3, condition3] 26 output = net([style], conditions) 27 assert output[0].shape == (1, 3, 32, 32) 28 assert output[1] is None 29 30 # -------------------- with return_latents ----------------------- # 31 output = net([style], conditions, return_latents=True)
28 assert output[1] is None 29 30 # -------------------- with return_latents ----------------------- # 31 output = net([style], conditions, return_latents=True) 32 assert output[0].shape == (1, 3, 32, 32) 33 assert len(output[1]) == 1 34 # check latent 35 assert output[1][0].shape == (8, 512)
29 30 # -------------------- with return_latents ----------------------- # 31 output = net([style], conditions, return_latents=True) 32 assert output[0].shape == (1, 3, 32, 32) 33 assert len(output[1]) == 1 34 # check latent 35 assert output[1][0].shape == (8, 512) 36
31 output = net([style], conditions, return_latents=True) 32 assert output[0].shape == (1, 3, 32, 32) 33 assert len(output[1]) == 1 34 # check latent 35 assert output[1][0].shape == (8, 512) 36 37 # -------------------- with randomize_noise = False ----------------------- # 38 output = net([style], conditions, randomize_noise=False)
35 assert output[1][0].shape == (8, 512) 36 37 # -------------------- with randomize_noise = False ----------------------- # 38 output = net([style], conditions, randomize_noise=False) 39 assert output[0].shape == (1, 3, 32, 32) 40 assert output[1] is None 41 42 # -------------------- with truncation = 0.5 and mixing----------------------- #
36 37 # -------------------- with randomize_noise = False ----------------------- # 38 output = net([style], conditions, randomize_noise=False) 39 assert output[0].shape == (1, 3, 32, 32) 40 assert output[1] is None 41 42 # -------------------- with truncation = 0.5 and mixing----------------------- # 43 output = net([style, style], conditions, truncation=0.5, truncation_latent=style)
40 assert output[1] is None 41 42 # -------------------- with truncation = 0.5 and mixing----------------------- # 43 output = net([style, style], conditions, truncation=0.5, truncation_latent=style) 44 assert output[0].shape == (1, 3, 32, 32) 45 assert output[1] is None 46 47
41 42 # -------------------- with truncation = 0.5 and mixing----------------------- # 43 output = net([style, style], conditions, truncation=0.5, truncation_latent=style) 44 assert output[0].shape == (1, 3, 32, 32) 45 assert output[1] is None 46 47 48 def test_gfpganv1():
65 narrow=1, 66 sft_half=True).cuda().eval() 67 img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda() 68 output = net(img) 69 assert output[0].shape == (1, 3, 32, 32) 70 assert len(output[1]) == 3 71 # check out_rgbs for intermediate loss 72 assert output[1][0].shape == (1, 3, 8, 8)
66 sft_half=True).cuda().eval() 67 img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda() 68 output = net(img) 69 assert output[0].shape == (1, 3, 32, 32) 70 assert len(output[1]) == 3 71 # check out_rgbs for intermediate loss 72 assert output[1][0].shape == (1, 3, 8, 8) 73 assert output[1][1].shape == (1, 3, 16, 16)
68 output = net(img) 69 assert output[0].shape == (1, 3, 32, 32) 70 assert len(output[1]) == 3 71 # check out_rgbs for intermediate loss 72 assert output[1][0].shape == (1, 3, 8, 8) 73 assert output[1][1].shape == (1, 3, 16, 16) 74 assert output[1][2].shape == (1, 3, 32, 32) 75
69 assert output[0].shape == (1, 3, 32, 32) 70 assert len(output[1]) == 3 71 # check out_rgbs for intermediate loss 72 assert output[1][0].shape == (1, 3, 8, 8) 73 assert output[1][1].shape == (1, 3, 16, 16) 74 assert output[1][2].shape == (1, 3, 32, 32) 75 76 # -------------------- with different_w = True ----------------------- #
70 assert len(output[1]) == 3 71 # check out_rgbs for intermediate loss 72 assert output[1][0].shape == (1, 3, 8, 8) 73 assert output[1][1].shape == (1, 3, 16, 16) 74 assert output[1][2].shape == (1, 3, 32, 32) 75 76 # -------------------- with different_w = True ----------------------- # 77 net = GFPGANv1(
89 narrow=1, 90 sft_half=True).cuda().eval() 91 img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda() 92 output = net(img) 93 assert output[0].shape == (1, 3, 32, 32) 94 assert len(output[1]) == 3 95 # check out_rgbs for intermediate loss 96 assert output[1][0].shape == (1, 3, 8, 8)
90 sft_half=True).cuda().eval() 91 img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda() 92 output = net(img) 93 assert output[0].shape == (1, 3, 32, 32) 94 assert len(output[1]) == 3 95 # check out_rgbs for intermediate loss 96 assert output[1][0].shape == (1, 3, 8, 8) 97 assert output[1][1].shape == (1, 3, 16, 16)
92 output = net(img) 93 assert output[0].shape == (1, 3, 32, 32) 94 assert len(output[1]) == 3 95 # check out_rgbs for intermediate loss 96 assert output[1][0].shape == (1, 3, 8, 8) 97 assert output[1][1].shape == (1, 3, 16, 16) 98 assert output[1][2].shape == (1, 3, 32, 32) 99
93 assert output[0].shape == (1, 3, 32, 32) 94 assert len(output[1]) == 3 95 # check out_rgbs for intermediate loss 96 assert output[1][0].shape == (1, 3, 8, 8) 97 assert output[1][1].shape == (1, 3, 16, 16) 98 assert output[1][2].shape == (1, 3, 32, 32) 99 100
94 assert len(output[1]) == 3 95 # check out_rgbs for intermediate loss 96 assert output[1][0].shape == (1, 3, 8, 8) 97 assert output[1][1].shape == (1, 3, 16, 16) 98 assert output[1][2].shape == (1, 3, 32, 32) 99 100 101 def test_facialcomponentdiscriminator():
105 if torch.cuda.is_available(): 106 net = FacialComponentDiscriminator().cuda().eval() 107 img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda() 108 output = net(img) 109 assert len(output) == 2 110 assert output[0].shape == (1, 1, 8, 8) 111 assert output[1] is None 112
106 net = FacialComponentDiscriminator().cuda().eval() 107 img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda() 108 output = net(img) 109 assert len(output) == 2 110 assert output[0].shape == (1, 1, 8, 8) 111 assert output[1] is None 112 113 # -------------------- return intermediate features ----------------------- #
107 img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda() 108 output = net(img) 109 assert len(output) == 2 110 assert output[0].shape == (1, 1, 8, 8) 111 assert output[1] is None 112 113 # -------------------- return intermediate features ----------------------- # 114 output = net(img, return_feats=True)
111 assert output[1] is None 112 113 # -------------------- return intermediate features ----------------------- # 114 output = net(img, return_feats=True) 115 assert len(output) == 2 116 assert output[0].shape == (1, 1, 8, 8) 117 assert len(output[1]) == 2 118 assert output[1][0].shape == (1, 128, 16, 16)
112 113 # -------------------- return intermediate features ----------------------- # 114 output = net(img, return_feats=True) 115 assert len(output) == 2 116 assert output[0].shape == (1, 1, 8, 8) 117 assert len(output[1]) == 2 118 assert output[1][0].shape == (1, 128, 16, 16) 119 assert output[1][1].shape == (1, 256, 8, 8)
113 # -------------------- return intermediate features ----------------------- # 114 output = net(img, return_feats=True) 115 assert len(output) == 2 116 assert output[0].shape == (1, 1, 8, 8) 117 assert len(output[1]) == 2 118 assert output[1][0].shape == (1, 128, 16, 16) 119 assert output[1][1].shape == (1, 256, 8, 8) 120
114 output = net(img, return_feats=True) 115 assert len(output) == 2 116 assert output[0].shape == (1, 1, 8, 8) 117 assert len(output[1]) == 2 118 assert output[1][0].shape == (1, 128, 16, 16) 119 assert output[1][1].shape == (1, 256, 8, 8) 120 121
115 assert len(output) == 2 116 assert output[0].shape == (1, 1, 8, 8) 117 assert len(output[1]) == 2 118 assert output[1][0].shape == (1, 128, 16, 16) 119 assert output[1][1].shape == (1, 256, 8, 8) 120 121 122 def test_stylegan2generatorcsft():
131 condition2 = torch.rand((1, 512, 16, 16), dtype=torch.float32).cuda() 132 condition3 = torch.rand((1, 512, 32, 32), dtype=torch.float32).cuda() 133 conditions = [condition1, condition1, condition2, condition2, condition3, condition3] 134 output = net([style], conditions) 135 assert output[0].shape == (1, 3, 32, 32) 136 assert output[1] is None 137 138 # -------------------- with return_latents ----------------------- #
132 condition3 = torch.rand((1, 512, 32, 32), dtype=torch.float32).cuda() 133 conditions = [condition1, condition1, condition2, condition2, condition3, condition3] 134 output = net([style], conditions) 135 assert output[0].shape == (1, 3, 32, 32) 136 assert output[1] is None 137 138 # -------------------- with return_latents ----------------------- # 139 output = net([style], conditions, return_latents=True)
136 assert output[1] is None 137 138 # -------------------- with return_latents ----------------------- # 139 output = net([style], conditions, return_latents=True) 140 assert output[0].shape == (1, 3, 32, 32) 141 assert len(output[1]) == 1 142 # check latent 143 assert output[1][0].shape == (8, 512)
137 138 # -------------------- with return_latents ----------------------- # 139 output = net([style], conditions, return_latents=True) 140 assert output[0].shape == (1, 3, 32, 32) 141 assert len(output[1]) == 1 142 # check latent 143 assert output[1][0].shape == (8, 512) 144
139 output = net([style], conditions, return_latents=True) 140 assert output[0].shape == (1, 3, 32, 32) 141 assert len(output[1]) == 1 142 # check latent 143 assert output[1][0].shape == (8, 512) 144 145 # -------------------- with randomize_noise = False ----------------------- # 146 output = net([style], conditions, randomize_noise=False)
143 assert output[1][0].shape == (8, 512) 144 145 # -------------------- with randomize_noise = False ----------------------- # 146 output = net([style], conditions, randomize_noise=False) 147 assert output[0].shape == (1, 3, 32, 32) 148 assert output[1] is None 149 150 # -------------------- with truncation = 0.5 and mixing----------------------- #
144 145 # -------------------- with randomize_noise = False ----------------------- # 146 output = net([style], conditions, randomize_noise=False) 147 assert output[0].shape == (1, 3, 32, 32) 148 assert output[1] is None 149 150 # -------------------- with truncation = 0.5 and mixing----------------------- # 151 output = net([style, style], conditions, truncation=0.5, truncation_latent=style)
148 assert output[1] is None 149 150 # -------------------- with truncation = 0.5 and mixing----------------------- # 151 output = net([style, style], conditions, truncation=0.5, truncation_latent=style) 152 assert output[0].shape == (1, 3, 32, 32) 153 assert output[1] is None 154 155
149 150 # -------------------- with truncation = 0.5 and mixing----------------------- # 151 output = net([style, style], conditions, truncation=0.5, truncation_latent=style) 152 assert output[0].shape == (1, 3, 32, 32) 153 assert output[1] is None 154 155 156 def test_gfpganv1clean():
172 sft_half=True).cuda().eval() 173 174 img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda() 175 output = net(img) 176 assert output[0].shape == (1, 3, 32, 32) 177 assert len(output[1]) == 3 178 # check out_rgbs for intermediate loss 179 assert output[1][0].shape == (1, 3, 8, 8)
173 174 img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda() 175 output = net(img) 176 assert output[0].shape == (1, 3, 32, 32) 177 assert len(output[1]) == 3 178 # check out_rgbs for intermediate loss 179 assert output[1][0].shape == (1, 3, 8, 8) 180 assert output[1][1].shape == (1, 3, 16, 16)
175 output = net(img) 176 assert output[0].shape == (1, 3, 32, 32) 177 assert len(output[1]) == 3 178 # check out_rgbs for intermediate loss 179 assert output[1][0].shape == (1, 3, 8, 8) 180 assert output[1][1].shape == (1, 3, 16, 16) 181 assert output[1][2].shape == (1, 3, 32, 32) 182
176 assert output[0].shape == (1, 3, 32, 32) 177 assert len(output[1]) == 3 178 # check out_rgbs for intermediate loss 179 assert output[1][0].shape == (1, 3, 8, 8) 180 assert output[1][1].shape == (1, 3, 16, 16) 181 assert output[1][2].shape == (1, 3, 32, 32) 182 183 # -------------------- with different_w = True ----------------------- #
177 assert len(output[1]) == 3 178 # check out_rgbs for intermediate loss 179 assert output[1][0].shape == (1, 3, 8, 8) 180 assert output[1][1].shape == (1, 3, 16, 16) 181 assert output[1][2].shape == (1, 3, 32, 32) 182 183 # -------------------- with different_w = True ----------------------- # 184 net = GFPGANv1Clean(
194 narrow=1, 195 sft_half=True).cuda().eval() 196 img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda() 197 output = net(img) 198 assert output[0].shape == (1, 3, 32, 32) 199 assert len(output[1]) == 3 200 # check out_rgbs for intermediate loss 201 assert output[1][0].shape == (1, 3, 8, 8)
195 sft_half=True).cuda().eval() 196 img = torch.rand((1, 3, 32, 32), dtype=torch.float32).cuda() 197 output = net(img) 198 assert output[0].shape == (1, 3, 32, 32) 199 assert len(output[1]) == 3 200 # check out_rgbs for intermediate loss 201 assert output[1][0].shape == (1, 3, 8, 8) 202 assert output[1][1].shape == (1, 3, 16, 16)
197 output = net(img) 198 assert output[0].shape == (1, 3, 32, 32) 199 assert len(output[1]) == 3 200 # check out_rgbs for intermediate loss 201 assert output[1][0].shape == (1, 3, 8, 8) 202 assert output[1][1].shape == (1, 3, 16, 16) 203 assert output[1][2].shape == (1, 3, 32, 32)
198 assert output[0].shape == (1, 3, 32, 32) 199 assert len(output[1]) == 3 200 # check out_rgbs for intermediate loss 201 assert output[1][0].shape == (1, 3, 8, 8) 202 assert output[1][1].shape == (1, 3, 16, 16) 203 assert output[1][2].shape == (1, 3, 32, 32)
199 assert len(output[1]) == 3 200 # check out_rgbs for intermediate loss 201 assert output[1][0].shape == (1, 3, 8, 8) 202 assert output[1][1].shape == (1, 3, 16, 16) 203 assert output[1][2].shape == (1, 3, 32, 32)
11 12 13 def test_gfpgan_model(): 14 with open('tests/data/test_gfpgan_model.yml', mode='r') as f: 15 opt = yaml.load(f, Loader=yaml.FullLoader) 16 17 # build model 18 model = GFPGANModel(opt)
16 17 # build model 18 model = GFPGANModel(opt) 19 # test attributes 20 assert model.__class__.__name__ == 'GFPGANModel' 21 assert isinstance(model.net_g, GFPGANv1) # generator 22 assert isinstance(model.net_d, StyleGAN2Discriminator) # discriminator 23 # facial component discriminators
17 # build model 18 model = GFPGANModel(opt) 19 # test attributes 20 assert model.__class__.__name__ == 'GFPGANModel' 21 assert isinstance(model.net_g, GFPGANv1) # generator 22 assert isinstance(model.net_d, StyleGAN2Discriminator) # discriminator 23 # facial component discriminators 24 assert isinstance(model.net_d_left_eye, FacialComponentDiscriminator)
18 model = GFPGANModel(opt) 19 # test attributes 20 assert model.__class__.__name__ == 'GFPGANModel' 21 assert isinstance(model.net_g, GFPGANv1) # generator 22 assert isinstance(model.net_d, StyleGAN2Discriminator) # discriminator 23 # facial component discriminators 24 assert isinstance(model.net_d_left_eye, FacialComponentDiscriminator) 25 assert isinstance(model.net_d_right_eye, FacialComponentDiscriminator)
20 assert model.__class__.__name__ == 'GFPGANModel' 21 assert isinstance(model.net_g, GFPGANv1) # generator 22 assert isinstance(model.net_d, StyleGAN2Discriminator) # discriminator 23 # facial component discriminators 24 assert isinstance(model.net_d_left_eye, FacialComponentDiscriminator) 25 assert isinstance(model.net_d_right_eye, FacialComponentDiscriminator) 26 assert isinstance(model.net_d_mouth, FacialComponentDiscriminator) 27 # identity network
21 assert isinstance(model.net_g, GFPGANv1) # generator 22 assert isinstance(model.net_d, StyleGAN2Discriminator) # discriminator 23 # facial component discriminators 24 assert isinstance(model.net_d_left_eye, FacialComponentDiscriminator) 25 assert isinstance(model.net_d_right_eye, FacialComponentDiscriminator) 26 assert isinstance(model.net_d_mouth, FacialComponentDiscriminator) 27 # identity network 28 assert isinstance(model.network_identity, ResNetArcFace)
22 assert isinstance(model.net_d, StyleGAN2Discriminator) # discriminator 23 # facial component discriminators 24 assert isinstance(model.net_d_left_eye, FacialComponentDiscriminator) 25 assert isinstance(model.net_d_right_eye, FacialComponentDiscriminator) 26 assert isinstance(model.net_d_mouth, FacialComponentDiscriminator) 27 # identity network 28 assert isinstance(model.network_identity, ResNetArcFace) 29 # losses
24 assert isinstance(model.net_d_left_eye, FacialComponentDiscriminator) 25 assert isinstance(model.net_d_right_eye, FacialComponentDiscriminator) 26 assert isinstance(model.net_d_mouth, FacialComponentDiscriminator) 27 # identity network 28 assert isinstance(model.network_identity, ResNetArcFace) 29 # losses 30 assert isinstance(model.cri_pix, L1Loss) 31 assert isinstance(model.cri_perceptual, PerceptualLoss)
26 assert isinstance(model.net_d_mouth, FacialComponentDiscriminator) 27 # identity network 28 assert isinstance(model.network_identity, ResNetArcFace) 29 # losses 30 assert isinstance(model.cri_pix, L1Loss) 31 assert isinstance(model.cri_perceptual, PerceptualLoss) 32 assert isinstance(model.cri_gan, GANLoss) 33 assert isinstance(model.cri_l1, L1Loss)
27 # identity network 28 assert isinstance(model.network_identity, ResNetArcFace) 29 # losses 30 assert isinstance(model.cri_pix, L1Loss) 31 assert isinstance(model.cri_perceptual, PerceptualLoss) 32 assert isinstance(model.cri_gan, GANLoss) 33 assert isinstance(model.cri_l1, L1Loss) 34 # optimizer
28 assert isinstance(model.network_identity, ResNetArcFace) 29 # losses 30 assert isinstance(model.cri_pix, L1Loss) 31 assert isinstance(model.cri_perceptual, PerceptualLoss) 32 assert isinstance(model.cri_gan, GANLoss) 33 assert isinstance(model.cri_l1, L1Loss) 34 # optimizer 35 assert isinstance(model.optimizers[0], torch.optim.Adam)
29 # losses 30 assert isinstance(model.cri_pix, L1Loss) 31 assert isinstance(model.cri_perceptual, PerceptualLoss) 32 assert isinstance(model.cri_gan, GANLoss) 33 assert isinstance(model.cri_l1, L1Loss) 34 # optimizer 35 assert isinstance(model.optimizers[0], torch.optim.Adam) 36 assert isinstance(model.optimizers[1], torch.optim.Adam)
31 assert isinstance(model.cri_perceptual, PerceptualLoss) 32 assert isinstance(model.cri_gan, GANLoss) 33 assert isinstance(model.cri_l1, L1Loss) 34 # optimizer 35 assert isinstance(model.optimizers[0], torch.optim.Adam) 36 assert isinstance(model.optimizers[1], torch.optim.Adam) 37 38 # prepare data
32 assert isinstance(model.cri_gan, GANLoss) 33 assert isinstance(model.cri_l1, L1Loss) 34 # optimizer 35 assert isinstance(model.optimizers[0], torch.optim.Adam) 36 assert isinstance(model.optimizers[1], torch.optim.Adam) 37 38 # prepare data 39 gt = torch.rand((1, 3, 512, 512), dtype=torch.float32)
43 loc_mouth = torch.rand((1, 4), dtype=torch.float32) 44 data = dict(gt=gt, lq=lq, loc_left_eye=loc_left_eye, loc_right_eye=loc_right_eye, loc_mouth=loc_mouth) 45 model.feed_data(data) 46 # check data shape 47 assert model.lq.shape == (1, 3, 512, 512) 48 assert model.gt.shape == (1, 3, 512, 512) 49 assert model.loc_left_eyes.shape == (1, 4) 50 assert model.loc_right_eyes.shape == (1, 4)
44 data = dict(gt=gt, lq=lq, loc_left_eye=loc_left_eye, loc_right_eye=loc_right_eye, loc_mouth=loc_mouth) 45 model.feed_data(data) 46 # check data shape 47 assert model.lq.shape == (1, 3, 512, 512) 48 assert model.gt.shape == (1, 3, 512, 512) 49 assert model.loc_left_eyes.shape == (1, 4) 50 assert model.loc_right_eyes.shape == (1, 4) 51 assert model.loc_mouths.shape == (1, 4)
45 model.feed_data(data) 46 # check data shape 47 assert model.lq.shape == (1, 3, 512, 512) 48 assert model.gt.shape == (1, 3, 512, 512) 49 assert model.loc_left_eyes.shape == (1, 4) 50 assert model.loc_right_eyes.shape == (1, 4) 51 assert model.loc_mouths.shape == (1, 4) 52
46 # check data shape 47 assert model.lq.shape == (1, 3, 512, 512) 48 assert model.gt.shape == (1, 3, 512, 512) 49 assert model.loc_left_eyes.shape == (1, 4) 50 assert model.loc_right_eyes.shape == (1, 4) 51 assert model.loc_mouths.shape == (1, 4) 52 53 # ----------------- test optimize_parameters -------------------- #
47 assert model.lq.shape == (1, 3, 512, 512) 48 assert model.gt.shape == (1, 3, 512, 512) 49 assert model.loc_left_eyes.shape == (1, 4) 50 assert model.loc_right_eyes.shape == (1, 4) 51 assert model.loc_mouths.shape == (1, 4) 52 53 # ----------------- test optimize_parameters -------------------- # 54 model.feed_data(data)
52 53 # ----------------- test optimize_parameters -------------------- # 54 model.feed_data(data) 55 model.optimize_parameters(1) 56 assert model.output.shape == (1, 3, 512, 512) 57 assert isinstance(model.log_dict, dict) 58 # check returned keys 59 expected_keys = [
53 # ----------------- test optimize_parameters -------------------- # 54 model.feed_data(data) 55 model.optimize_parameters(1) 56 assert model.output.shape == (1, 3, 512, 512) 57 assert isinstance(model.log_dict, dict) 58 # check returned keys 59 expected_keys = [ 60 'l_g_pix', 'l_g_percep', 'l_g_style', 'l_g_gan', 'l_g_gan_left_eye', 'l_g_gan_right_eye', 'l_g_gan_mouth',
60 'l_g_pix', 'l_g_percep', 'l_g_style', 'l_g_gan', 'l_g_gan_left_eye', 'l_g_gan_right_eye', 'l_g_gan_mouth', 61 'l_g_comp_style_loss', 'l_identity', 'l_d', 'real_score', 'fake_score', 'l_d_r1', 'l_d_left_eye', 62 'l_d_right_eye', 'l_d_mouth' 63 ] 64 assert set(expected_keys).issubset(set(model.log_dict.keys())) 65 66 # ----------------- remove pyramid_loss_weight-------------------- # 67 model.feed_data(data)
65 66 # ----------------- remove pyramid_loss_weight-------------------- # 67 model.feed_data(data) 68 model.optimize_parameters(100000) # large than remove_pyramid_loss = 50000 69 assert model.output.shape == (1, 3, 512, 512) 70 assert isinstance(model.log_dict, dict) 71 # check returned keys 72 expected_keys = [
66 # ----------------- remove pyramid_loss_weight-------------------- # 67 model.feed_data(data) 68 model.optimize_parameters(100000) # large than remove_pyramid_loss = 50000 69 assert model.output.shape == (1, 3, 512, 512) 70 assert isinstance(model.log_dict, dict) 71 # check returned keys 72 expected_keys = [ 73 'l_g_pix', 'l_g_percep', 'l_g_style', 'l_g_gan', 'l_g_gan_left_eye', 'l_g_gan_right_eye', 'l_g_gan_mouth',
73 'l_g_pix', 'l_g_percep', 'l_g_style', 'l_g_gan', 'l_g_gan_left_eye', 'l_g_gan_right_eye', 'l_g_gan_mouth', 74 'l_g_comp_style_loss', 'l_identity', 'l_d', 'real_score', 'fake_score', 'l_d_r1', 'l_d_left_eye', 75 'l_d_right_eye', 'l_d_mouth' 76 ] 77 assert set(expected_keys).issubset(set(model.log_dict.keys())) 78 79 # ----------------- test save -------------------- # 80 with tempfile.TemporaryDirectory() as tmpdir:
83 model.save(0, 1) 84 85 # ----------------- test the test function -------------------- # 86 model.test() 87 assert model.output.shape == (1, 3, 512, 512) 88 # delete net_g_ema 89 model.__delattr__('net_g_ema') 90 model.test()
87 assert model.output.shape == (1, 3, 512, 512) 88 # delete net_g_ema 89 model.__delattr__('net_g_ema') 90 model.test() 91 assert model.output.shape == (1, 3, 512, 512) 92 assert model.net_g.training is True # should back to training mode after testing 93 94 # ----------------- test nondist_validation -------------------- #
88 # delete net_g_ema 89 model.__delattr__('net_g_ema') 90 model.test() 91 assert model.output.shape == (1, 3, 512, 512) 92 assert model.net_g.training is True # should back to training mode after testing 93 94 # ----------------- test nondist_validation -------------------- # 95 # construct dataloader
101 scale=4, 102 phase='val') 103 dataset = PairedImageDataset(dataset_opt) 104 dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=0) 105 assert model.is_train is True 106 with tempfile.TemporaryDirectory() as tmpdir: 107 model.opt['path']['visualization'] = tmpdir 108 model.nondist_validation(dataloader, 1, None, save_img=True)
105 assert model.is_train is True 106 with tempfile.TemporaryDirectory() as tmpdir: 107 model.opt['path']['visualization'] = tmpdir 108 model.nondist_validation(dataloader, 1, None, save_img=True) 109 assert model.is_train is True 110 # check metric_results 111 assert 'psnr' in model.metric_results 112 assert isinstance(model.metric_results['psnr'], float)
107 model.opt['path']['visualization'] = tmpdir 108 model.nondist_validation(dataloader, 1, None, save_img=True) 109 assert model.is_train is True 110 # check metric_results 111 assert 'psnr' in model.metric_results 112 assert isinstance(model.metric_results['psnr'], float) 113 114 # validation
108 model.nondist_validation(dataloader, 1, None, save_img=True) 109 assert model.is_train is True 110 # check metric_results 111 assert 'psnr' in model.metric_results 112 assert isinstance(model.metric_results['psnr'], float) 113 114 # validation 115 with tempfile.TemporaryDirectory() as tmpdir:
118 model.opt['path']['visualization'] = tmpdir 119 model.opt['val']['pbar'] = True 120 model.nondist_validation(dataloader, 1, None, save_img=True) 121 # check metric_results 122 assert 'psnr' in model.metric_results 123 assert isinstance(model.metric_results['psnr'], float) 124 125 # if opt['val']['suffix'] is None
119 model.opt['val']['pbar'] = True 120 model.nondist_validation(dataloader, 1, None, save_img=True) 121 # check metric_results 122 assert 'psnr' in model.metric_results 123 assert isinstance(model.metric_results['psnr'], float) 124 125 # if opt['val']['suffix'] is None 126 model.opt['val']['suffix'] = None
127 model.opt['name'] = 'demo' 128 model.opt['path']['visualization'] = tmpdir 129 model.nondist_validation(dataloader, 1, None, save_img=True) 130 # check metric_results 131 assert 'psnr' in model.metric_results 132 assert isinstance(model.metric_results['psnr'], float)
128 model.opt['path']['visualization'] = tmpdir 129 model.nondist_validation(dataloader, 1, None, save_img=True) 130 # check metric_results 131 assert 'psnr' in model.metric_results 132 assert isinstance(model.metric_results['psnr'], float)
11 net = StyleGAN2GeneratorClean( 12 out_size=32, num_style_feat=512, num_mlp=8, channel_multiplier=1, narrow=0.5).cuda().eval() 13 style = torch.rand((1, 512), dtype=torch.float32).cuda() 14 output = net([style], input_is_latent=False) 15 assert output[0].shape == (1, 3, 32, 32) 16 assert output[1] is None 17 18 # -------------------- with return_latents ----------------------- #
12 out_size=32, num_style_feat=512, num_mlp=8, channel_multiplier=1, narrow=0.5).cuda().eval() 13 style = torch.rand((1, 512), dtype=torch.float32).cuda() 14 output = net([style], input_is_latent=False) 15 assert output[0].shape == (1, 3, 32, 32) 16 assert output[1] is None 17 18 # -------------------- with return_latents ----------------------- # 19 output = net([style], input_is_latent=True, return_latents=True)
16 assert output[1] is None 17 18 # -------------------- with return_latents ----------------------- # 19 output = net([style], input_is_latent=True, return_latents=True) 20 assert output[0].shape == (1, 3, 32, 32) 21 assert len(output[1]) == 1 22 # check latent 23 assert output[1][0].shape == (8, 512)
17 18 # -------------------- with return_latents ----------------------- # 19 output = net([style], input_is_latent=True, return_latents=True) 20 assert output[0].shape == (1, 3, 32, 32) 21 assert len(output[1]) == 1 22 # check latent 23 assert output[1][0].shape == (8, 512) 24
19 output = net([style], input_is_latent=True, return_latents=True) 20 assert output[0].shape == (1, 3, 32, 32) 21 assert len(output[1]) == 1 22 # check latent 23 assert output[1][0].shape == (8, 512) 24 25 # -------------------- with randomize_noise = False ----------------------- # 26 output = net([style], randomize_noise=False)
23 assert output[1][0].shape == (8, 512) 24 25 # -------------------- with randomize_noise = False ----------------------- # 26 output = net([style], randomize_noise=False) 27 assert output[0].shape == (1, 3, 32, 32) 28 assert output[1] is None 29 30 # -------------------- with truncation = 0.5 and mixing----------------------- #
24 25 # -------------------- with randomize_noise = False ----------------------- # 26 output = net([style], randomize_noise=False) 27 assert output[0].shape == (1, 3, 32, 32) 28 assert output[1] is None 29 30 # -------------------- with truncation = 0.5 and mixing----------------------- # 31 output = net([style, style], truncation=0.5, truncation_latent=style)
28 assert output[1] is None 29 30 # -------------------- with truncation = 0.5 and mixing----------------------- # 31 output = net([style, style], truncation=0.5, truncation_latent=style) 32 assert output[0].shape == (1, 3, 32, 32) 33 assert output[1] is None 34 35 # ------------------ test make_noise ----------------------- #
29 30 # -------------------- with truncation = 0.5 and mixing----------------------- # 31 output = net([style, style], truncation=0.5, truncation_latent=style) 32 assert output[0].shape == (1, 3, 32, 32) 33 assert output[1] is None 34 35 # ------------------ test make_noise ----------------------- # 36 out = net.make_noise()
33 assert output[1] is None 34 35 # ------------------ test make_noise ----------------------- # 36 out = net.make_noise() 37 assert len(out) == 7 38 assert out[0].shape == (1, 1, 4, 4) 39 assert out[1].shape == (1, 1, 8, 8) 40 assert out[2].shape == (1, 1, 8, 8)
34 35 # ------------------ test make_noise ----------------------- # 36 out = net.make_noise() 37 assert len(out) == 7 38 assert out[0].shape == (1, 1, 4, 4) 39 assert out[1].shape == (1, 1, 8, 8) 40 assert out[2].shape == (1, 1, 8, 8) 41 assert out[3].shape == (1, 1, 16, 16)
35 # ------------------ test make_noise ----------------------- # 36 out = net.make_noise() 37 assert len(out) == 7 38 assert out[0].shape == (1, 1, 4, 4) 39 assert out[1].shape == (1, 1, 8, 8) 40 assert out[2].shape == (1, 1, 8, 8) 41 assert out[3].shape == (1, 1, 16, 16) 42 assert out[4].shape == (1, 1, 16, 16)
36 out = net.make_noise() 37 assert len(out) == 7 38 assert out[0].shape == (1, 1, 4, 4) 39 assert out[1].shape == (1, 1, 8, 8) 40 assert out[2].shape == (1, 1, 8, 8) 41 assert out[3].shape == (1, 1, 16, 16) 42 assert out[4].shape == (1, 1, 16, 16) 43 assert out[5].shape == (1, 1, 32, 32)
37 assert len(out) == 7 38 assert out[0].shape == (1, 1, 4, 4) 39 assert out[1].shape == (1, 1, 8, 8) 40 assert out[2].shape == (1, 1, 8, 8) 41 assert out[3].shape == (1, 1, 16, 16) 42 assert out[4].shape == (1, 1, 16, 16) 43 assert out[5].shape == (1, 1, 32, 32) 44 assert out[6].shape == (1, 1, 32, 32)
38 assert out[0].shape == (1, 1, 4, 4) 39 assert out[1].shape == (1, 1, 8, 8) 40 assert out[2].shape == (1, 1, 8, 8) 41 assert out[3].shape == (1, 1, 16, 16) 42 assert out[4].shape == (1, 1, 16, 16) 43 assert out[5].shape == (1, 1, 32, 32) 44 assert out[6].shape == (1, 1, 32, 32) 45
39 assert out[1].shape == (1, 1, 8, 8) 40 assert out[2].shape == (1, 1, 8, 8) 41 assert out[3].shape == (1, 1, 16, 16) 42 assert out[4].shape == (1, 1, 16, 16) 43 assert out[5].shape == (1, 1, 32, 32) 44 assert out[6].shape == (1, 1, 32, 32) 45 46 # ------------------ test get_latent ----------------------- #
40 assert out[2].shape == (1, 1, 8, 8) 41 assert out[3].shape == (1, 1, 16, 16) 42 assert out[4].shape == (1, 1, 16, 16) 43 assert out[5].shape == (1, 1, 32, 32) 44 assert out[6].shape == (1, 1, 32, 32) 45 46 # ------------------ test get_latent ----------------------- # 47 out = net.get_latent(style)
44 assert out[6].shape == (1, 1, 32, 32) 45 46 # ------------------ test get_latent ----------------------- # 47 out = net.get_latent(style) 48 assert out.shape == (1, 512) 49 50 # ------------------ test mean_latent ----------------------- # 51 out = net.mean_latent(2)
48 assert out.shape == (1, 512) 49 50 # ------------------ test mean_latent ----------------------- # 51 out = net.mean_latent(2) 52 assert out.shape == (1, 512)
14 arch='clean', 15 channel_multiplier=2, 16 bg_upsampler=None) 17 # test attribute 18 assert isinstance(restorer.gfpgan, GFPGANv1Clean) 19 assert isinstance(restorer.face_helper, FaceRestoreHelper) 20 21 # initialize with the original model
15 channel_multiplier=2, 16 bg_upsampler=None) 17 # test attribute 18 assert isinstance(restorer.gfpgan, GFPGANv1Clean) 19 assert isinstance(restorer.face_helper, FaceRestoreHelper) 20 21 # initialize with the original model 22 restorer = GFPGANer(
25 arch='original', 26 channel_multiplier=1, 27 bg_upsampler=None) 28 # test attribute 29 assert isinstance(restorer.gfpgan, GFPGANv1) 30 assert isinstance(restorer.face_helper, FaceRestoreHelper) 31 32 # ------------------ test enhance ---------------- #
26 channel_multiplier=1, 27 bg_upsampler=None) 28 # test attribute 29 assert isinstance(restorer.gfpgan, GFPGANv1) 30 assert isinstance(restorer.face_helper, FaceRestoreHelper) 31 32 # ------------------ test enhance ---------------- # 33 img = cv2.imread('tests/data/gt/00000000.png', cv2.IMREAD_COLOR)
31 32 # ------------------ test enhance ---------------- # 33 img = cv2.imread('tests/data/gt/00000000.png', cv2.IMREAD_COLOR) 34 result = restorer.enhance(img, has_aligned=False, paste_back=True) 35 assert result[0][0].shape == (512, 512, 3) 36 assert result[1][0].shape == (512, 512, 3) 37 assert result[2].shape == (1024, 1024, 3) 38
32 # ------------------ test enhance ---------------- # 33 img = cv2.imread('tests/data/gt/00000000.png', cv2.IMREAD_COLOR) 34 result = restorer.enhance(img, has_aligned=False, paste_back=True) 35 assert result[0][0].shape == (512, 512, 3) 36 assert result[1][0].shape == (512, 512, 3) 37 assert result[2].shape == (1024, 1024, 3) 38 39 # with has_aligned=True
33 img = cv2.imread('tests/data/gt/00000000.png', cv2.IMREAD_COLOR) 34 result = restorer.enhance(img, has_aligned=False, paste_back=True) 35 assert result[0][0].shape == (512, 512, 3) 36 assert result[1][0].shape == (512, 512, 3) 37 assert result[2].shape == (1024, 1024, 3) 38 39 # with has_aligned=True 40 result = restorer.enhance(img, has_aligned=True, paste_back=False)
37 assert result[2].shape == (1024, 1024, 3) 38 39 # with has_aligned=True 40 result = restorer.enhance(img, has_aligned=True, paste_back=False) 41 assert result[0][0].shape == (512, 512, 3) 42 assert result[1][0].shape == (512, 512, 3) 43 assert result[2] is None
38 39 # with has_aligned=True 40 result = restorer.enhance(img, has_aligned=True, paste_back=False) 41 assert result[0][0].shape == (512, 512, 3) 42 assert result[1][0].shape == (512, 512, 3) 43 assert result[2] is None
39 # with has_aligned=True 40 result = restorer.enhance(img, has_aligned=True, paste_back=False) 41 assert result[0][0].shape == (512, 512, 3) 42 assert result[1][0].shape == (512, 512, 3) 43 assert result[2] is None
39 2) A bounding box that can be fed readily to tf.image.crop_to_bounding_box 40 to undo the padding. 41 """ 42 # Input checking. 43 assert np.ndim(x) == 4 44 assert align > 0, 'align must be a positive number.' 45 46 height, width = x.shape[-3:-1]
40 to undo the padding. 41 """ 42 # Input checking. 43 assert np.ndim(x) == 4 44 assert align > 0, 'align must be a positive number.' 45 46 height, width = x.shape[-3:-1] 47 height_to_pad = (align - height % align) if height % align != 0 else 0
80 81 height, width, channel = image.shape[-3:] 82 patch_height, patch_width = height//block_height, width//block_width 83 84 assert height == ( 85 patch_height * block_height 86 ), 'block_height=%d should evenly divide height=%d.'%(block_height, height) 87 assert width == ( 88 patch_width * block_width 89 ), 'block_width=%d should evenly divide width=%d.'%(block_width, width)
83 84 assert height == ( 85 patch_height * block_height 86 ), 'block_height=%d should evenly divide height=%d.'%(block_height, height) 87 assert width == ( 88 patch_width * block_width 89 ), 'block_width=%d should evenly divide width=%d.'%(block_width, width) 90 91 patch_size = patch_height * patch_width 92 paddings = 2*[[0, 0]]
42 "(2^times_to_interpolate + 1) frames, fps of 30.", 43 ) 44 def predict(self, frame1, frame2, times_to_interpolate): 45 INPUT_EXT = ['.png', '.jpg', '.jpeg'] 46 assert os.path.splitext(str(frame1))[-1] in INPUT_EXT and os.path.splitext(str(frame2))[-1] in INPUT_EXT, \ 47 "Please provide png, jpg or jpeg images." 48 49 # make sure 2 images are the same size 50 img1 = Image.open(str(frame1))
3 import os 4 import platform 5 import shlex 6 import stat 7 import subprocess 8 import sys 9 from contextlib import contextmanager 10 from importlib import import_module
176 shell_cmd, 177 stdout=subprocess.PIPE, 178 stderr=subprocess.PIPE, 179 text=True, 180 shell=True, 181 check=True, 182 ) 183 184 stdout_lines = result.stdout.strip().split("\n") 185 stderr_lines = result.stderr.strip().split("\n") 186 187 # Print stdout, skipping ignored lines 188 for line in stdout_lines: 189 if not any(line.startswith(ign) for ign in ignored_lines_start): 190 print(line)
252 return None 253 254 255 def download_file(url, file_name): 256 with requests.get(url, stream=True) as response: 257 response.raise_for_status() 258 total_size = int(response.headers.get("content-length", 0)) 259 with open(file_name, "wb") as file, tqdm(
332 else: 333 tag_url = ( 334 f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases/latest" 335 ) 336 response = requests.get(tag_url) 337 if response.status_code == 404: 338 # print_formatted( 339 # f"Tag version '{apply_color(version,'cyan')}' not found for {owner}/{repo} repository."
118 """ 119 # Input kernel length must be odd 120 k_l = len(kernel_1d) 121 122 assert k_l % 2 != 0 123 # Convolution is repeat-padded 124 extended = np.pad(array, k_l // 2, mode="wrap") 125 # Output has same size as input (padded, valid-mode convolution)
97 else: 98 base_url += f"?&{random_seed}" 99 try: 100 log.debug(f"Getting unsplash image from {base_url}") 101 response = requests.get(base_url) 102 response.raise_for_status() 103 104 image = Image.open(io.BytesIO(response.content))
32 33 url_values = urllib.parse.urlencode(data) 34 url = f"http://{base_url}:{port}/view?{url_values}" 35 log.debug(f"Fetching image from {url}") 36 with urllib.request.urlopen(url) as response: 37 return io.BytesIO(response.read()) 38 39
368 369 history_url = f"http://{base_url}:{port}/history" 370 log.debug(f"Fetching history from {history_url}") 371 output = torch.zeros(0) 372 with urllib.request.urlopen(history_url) as response: 373 output = self.load_batch_frames(response, offset, count, frames) 374 375 if output.size(0) == 0:
511 512 except ValueError: 513 try: 514 expression = expression.replace("^", "**") 515 result = eval(expression) 516 except Exception as e: 517 # Handle any other exceptions and provide a meaningful error message 518 raise ValueError(
291 CATEGORY = "mtb/IO" 292 293 def load(self, url): 294 # get the image from the url 295 image = Image.open(requests.get(url, stream=True).raw) 296 image = ImageOps.exif_transpose(image) 297 return (pil2tensor(image),) 298
1 import json 2 import subprocess 3 import uuid 4 from pathlib import Path 5 from typing import List, Optional 6 7 import comfy.model_management as model_management 8 import folder_paths
193 "-y", 194 out_path, 195 ] 196 log.debug(f"Executing {command}") 197 subprocess.run(command) 198 199 temp_playlist_path.unlink() 200
225 "gif", 226 "-y", 227 out_path, 228 ] 229 process = subprocess.Popen(command, stdin=subprocess.PIPE) 230 for frame in frames: 231 model_management.throw_exception_if_processing_interrupted() 232 Image.fromarray(frame).save(process.stdin, "PNG")
263 "-y", 264 out_path, 265 ] 266 267 process = subprocess.Popen(command, stdin=subprocess.PIPE) 268 269 for frame in frames: 270 model_management.throw_exception_if_processing_interrupted()
372 "gif", 373 "-y", 374 out_path, 375 ] 376 process = subprocess.Popen(command, stdin=subprocess.PIPE) 377 for image in pil_images: 378 model_management.throw_exception_if_processing_interrupted() 379 image.save(process.stdin, "PNG")
1 import os 2 import requests 3 from rich.console import Console 4 from tqdm import tqdm 5 import subprocess 6 import sys 7 8 try:
67 try: 68 import gdown 69 except ImportError: 70 print("Installing gdown") 71 subprocess.check_call( 72 [ 73 sys.executable, 74 "-m", 75 "pip", 76 "install", 77 "git+https://github.com/melMass/gdown@main", 78 ] 79 ) 80 import gdown 81 82 if "/folders/" in download_url:
90 # download from google drive 91 gdown.download(download_url, destination, quiet=False, resume=True) 92 return 93 94 response = requests.get(download_url, stream=True) 95 total_size = int(response.headers.get("content-length", 0)) 96 97 destination_path = os.path.join(destination, filename)
12 from rich.console import Console 13 from rich.progress import Progress 14 15 import numpy as np 16 import subprocess 17 18 19 def write_prores_444_video(output_file, frames: List[np.ndarray], fps):
49 "-y", # Overwrite output file if it already exists 50 output_file, 51 ] 52 53 process = subprocess.Popen(command, stdin=subprocess.PIPE) 54 55 for frame in frames: 56 process.stdin.write(frame.tobytes())
5 import os 6 import shlex 7 import shutil 8 import socket 9 import subprocess 10 import sys 11 import uuid 12 from pathlib import Path
183 yield info[4][0] 184 185 def _test_url(self, url): 186 try: 187 response = requests.get(url) 188 return response.status_code == 200 189 except Exception: 190 return False
195 from comfy.cli_args import args 196 197 ip_checker = IPChecker() 198 base_url = args.listen 199 if base_url == "0.0.0.0": 200 log.debug("Server set to 0.0.0.0, we will try to resolve the host IP") 201 base_url = ip_checker.get_working_ip( 202 f"http://{{}}:{args.port}/history"
296 shell_cmd, 297 stdout=subprocess.PIPE, 298 stderr=subprocess.PIPE, 299 text=True, 300 shell=True, 301 check=True, 302 ) 303 304 stdout_lines = result.stdout.strip().split("\n") 305 stderr_lines = result.stderr.strip().split("\n") 306 307 # Print stdout, skipping ignored lines 308 for line in stdout_lines: 309 if not any(line.startswith(ign) for ign in ignored_lines_start): 310 print(line)
489 np.linspace(1, 0, num=ramp_h)[None], (1, 0) 490 ) 491 492 # Assume tiles are squared 493 assert ramp_h == ramp_w 494 # top left corner 495 corner = np.rot90(corner_mask(ramp_h), 2) 496 mask[:ramp_h, :ramp_w] = corner
554 width = img_size[2] + pad_left + pad_right 555 stride_h, stride_w = stride_size 556 557 # stride must be even 558 assert (stride_h % 2 == 0) and (stride_w % 2 == 0) 559 # stride must be greater or equal than half tile 560 assert (stride_h >= tile_h / 2) and (stride_w >= tile_w / 2) 561 # stride must be smaller or equal tile size
556 557 # stride must be even 558 assert (stride_h % 2 == 0) and (stride_w % 2 == 0) 559 # stride must be greater or equal than half tile 560 assert (stride_h >= tile_h / 2) and (stride_w >= tile_w / 2) 561 # stride must be smaller or equal tile size 562 assert (stride_h <= tile_h) and (stride_w <= tile_w) 563
558 assert (stride_h % 2 == 0) and (stride_w % 2 == 0) 559 # stride must be greater or equal than half tile 560 assert (stride_h >= tile_h / 2) and (stride_w >= tile_w / 2) 561 # stride must be smaller or equal tile size 562 assert (stride_h <= tile_h) and (stride_w <= tile_w) 563 564 merged = np.zeros((img_size[0], height, width)) 565 mask = generate_mask((tile_h, tile_w), stride_size)
587 stride_h, stride_w = stride_size 588 img_h, img_w = img.shape[0], img.shape[1] 589 590 # stride must be even 591 assert (stride_h % 2 == 0) and (stride_w % 2 == 0) 592 # stride must be greater or equal than half tile 593 assert (stride_h >= tile_h / 2) and (stride_w >= tile_w / 2) 594 # stride must be smaller or equal tile size
589 590 # stride must be even 591 assert (stride_h % 2 == 0) and (stride_w % 2 == 0) 592 # stride must be greater or equal than half tile 593 assert (stride_h >= tile_h / 2) and (stride_w >= tile_w / 2) 594 # stride must be smaller or equal tile size 595 assert (stride_h <= tile_h) and (stride_w <= tile_w) 596
591 assert (stride_h % 2 == 0) and (stride_w % 2 == 0) 592 # stride must be greater or equal than half tile 593 assert (stride_h >= tile_h / 2) and (stride_w >= tile_w / 2) 594 # stride must be smaller or equal tile size 595 assert (stride_h <= tile_h) and (stride_w <= tile_w) 596 597 # find total height & width padding sizes 598 pad_h, pad_w = 0, 0