39 import random 40 import re 41 import requests 42 import socket 43 import subprocess 44 import sys 45 import datetime 46 import time
331 332 # Freeze PIP modules 333 def packages(versions=False): 334 import sys 335 import subprocess 336 return [( r.decode().split('==')[0] if not versions else r.decode() ) for r in subprocess.check_output([sys.executable, '-s', '-m', 'pip', 'freeze']).split()] 337 338 def install_package(package, uninstall_first: Union[List[str], str] = None):
332 # Freeze PIP modules 333 def packages(versions=False): 334 import sys 335 import subprocess 336 return [( r.decode().split('==')[0] if not versions else r.decode() ) for r in subprocess.check_output([sys.executable, '-s', '-m', 'pip', 'freeze']).split()] 337 338 def install_package(package, uninstall_first: Union[List[str], str] = None): 339 if os.getenv("WAS_BLOCK_AUTO_INSTALL", 'False').lower() in ('true', '1', 't'):
345 if isinstance(uninstall_first, str): 346 uninstall_first = [uninstall_first] 347 348 cstr(f"Uninstalling {', '.join(uninstall_first)}..") 349 subprocess.check_call([sys.executable, '-s', '-m', 'pip', 'uninstall', *uninstall_first]) 350 cstr("Installing package...").msg.print() 351 subprocess.check_call([sys.executable, '-s', '-m', 'pip', '-q', 'install', package]) 352
347 348 cstr(f"Uninstalling {', '.join(uninstall_first)}..") 349 subprocess.check_call([sys.executable, '-s', '-m', 'pip', 'uninstall', *uninstall_first]) 350 cstr("Installing package...").msg.print() 351 subprocess.check_call([sys.executable, '-s', '-m', 'pip', '-q', 'install', package]) 352 353 # Tensor to PIL 354 def tensor2pil(image):
450 filename = url.split('/')[-1] 451 if not path: 452 path = '.' 453 save_path = os.path.join(path, filename) 454 response = requests.get(url, stream=True) 455 if response.status_code == requests.codes.ok: 456 file_size = int(response.headers.get('Content-Length', 0)) 457 with open(save_path, 'wb') as file:
474 # Fetch the NSP Pantry 475 if pantry_path is None: 476 pantry_path = DEFAULT_NSP_PANTRY_PATH 477 if not os.path.exists(pantry_path): 478 response = urlopen('https://raw.githubusercontent.com/WASasquatch/noodle-soup-prompts/main/nsp_pantry.json') 479 tmp_pantry = json.loads(response.read()) 480 # Dump JSON locally 481 pantry_serialized = json.dumps(tmp_pantry, indent=4)
499 tcount = new_text.count(tkey) 500 # Apply random results for each noodle counted 501 for _ in range(tcount): 502 new_text = new_text.replace( 503 tkey, random.choice(nspterminology[term]), 1) 504 seed += 1 505 random.seed(seed) 506
517 lines = file.readlines() 518 if lines: 519 random_line = None 520 while not random_line: 521 line = random.choice(lines).strip() 522 if not line.startswith('#') and not line.startswith('//'): 523 random_line = line 524 text = text.replace(key, random_line)
552 lines = file.readlines() 553 if lines: 554 random_line = None 555 while not random_line: 556 line = random.choice(lines).strip() 557 if not line.startswith('#') and not line.startswith('//'): 558 random_line = line 559 text = text.replace(key, random_line)
591 random.seed(seed) 592 593 def replace_match(match): 594 options = match.group(1).split('|') 595 return random.choice(options) 596 597 parse_prompt = re.sub(r'\<(.*?)\>', replace_match, prompt) 598 while re.search(r'\<(.*?)\>', parse_prompt):
1764 1765 particles = Image.new('RGBA', (width, height), (0, 0, 0, 0)) 1766 draw = ImageDraw.Draw(particles) 1767 for i in range(5000): 1768 x = random.randint(0, width) 1769 y = random.randint(0, height) 1770 r = random.randint(0, 255) 1771 g = random.randint(0, 255)
1765 particles = Image.new('RGBA', (width, height), (0, 0, 0, 0)) 1766 draw = ImageDraw.Draw(particles) 1767 for i in range(5000): 1768 x = random.randint(0, width) 1769 y = random.randint(0, height) 1770 r = random.randint(0, 255) 1771 g = random.randint(0, 255) 1772 b = random.randint(0, 255)
1766 draw = ImageDraw.Draw(particles) 1767 for i in range(5000): 1768 x = random.randint(0, width) 1769 y = random.randint(0, height) 1770 r = random.randint(0, 255) 1771 g = random.randint(0, 255) 1772 b = random.randint(0, 255) 1773 draw.point((x, y), fill=(r, g, b, 255))
1767 for i in range(5000): 1768 x = random.randint(0, width) 1769 y = random.randint(0, height) 1770 r = random.randint(0, 255) 1771 g = random.randint(0, 255) 1772 b = random.randint(0, 255) 1773 draw.point((x, y), fill=(r, g, b, 255)) 1774 particles = particles.filter(ImageFilter.GaussianBlur(radius=1))
1768 x = random.randint(0, width) 1769 y = random.randint(0, height) 1770 r = random.randint(0, 255) 1771 g = random.randint(0, 255) 1772 b = random.randint(0, 255) 1773 draw.point((x, y), fill=(r, g, b, 255)) 1774 particles = particles.filter(ImageFilter.GaussianBlur(radius=1)) 1775 particles.putalpha(128)
1776 1777 particles2 = Image.new('RGBA', (width, height), (0, 0, 0, 0)) 1778 draw = ImageDraw.Draw(particles2) 1779 for i in range(5000): 1780 x = random.randint(0, width) 1781 y = random.randint(0, height) 1782 r = random.randint(0, 255) 1783 g = random.randint(0, 255)
1777 particles2 = Image.new('RGBA', (width, height), (0, 0, 0, 0)) 1778 draw = ImageDraw.Draw(particles2) 1779 for i in range(5000): 1780 x = random.randint(0, width) 1781 y = random.randint(0, height) 1782 r = random.randint(0, 255) 1783 g = random.randint(0, 255) 1784 b = random.randint(0, 255)
1778 draw = ImageDraw.Draw(particles2) 1779 for i in range(5000): 1780 x = random.randint(0, width) 1781 y = random.randint(0, height) 1782 r = random.randint(0, 255) 1783 g = random.randint(0, 255) 1784 b = random.randint(0, 255) 1785 draw.point((x, y), fill=(r, g, b, 255))
1779 for i in range(5000): 1780 x = random.randint(0, width) 1781 y = random.randint(0, height) 1782 r = random.randint(0, 255) 1783 g = random.randint(0, 255) 1784 b = random.randint(0, 255) 1785 draw.point((x, y), fill=(r, g, b, 255)) 1786 particles2 = particles2.filter(ImageFilter.GaussianBlur(radius=1))
1780 x = random.randint(0, width) 1781 y = random.randint(0, height) 1782 r = random.randint(0, 255) 1783 g = random.randint(0, 255) 1784 b = random.randint(0, 255) 1785 draw.point((x, y), fill=(r, g, b, 255)) 1786 particles2 = particles2.filter(ImageFilter.GaussianBlur(radius=1)) 1787 particles2.putalpha(128)
4555 4556 for _ in range(black_mix): 4557 for x in range(width): 4558 for y in range(height): 4559 if random.randint(0,1) == 1: 4560 black_noise.putpixel((x, y), (0, 0, 0, 255)) 4561 4562 randomized_image = Image.alpha_composite(randomized_image, black_noise)
5149 if image == None: 5150 cstr(f"No valid image was found for the next ID. Did you remove images from the source directory?").error.print() 5151 return (None, None) 5152 else: 5153 newindex = int(random.random() * len(fl.image_paths)) 5154 image, filename = fl.get_image_by_id(newindex) 5155 if image == None: 5156 cstr(f"No valid image was found for the next ID. Did you remove images from the source directory?").error.print()
6106 num_pixels = int(density * img_gray.size[0] * img_gray.size[1]) 6107 6108 noise_pixels = [] 6109 for i in range(num_pixels): 6110 x = random.randint(0, img_gray.size[0]-1) 6111 y = random.randint(0, img_gray.size[1]-1) 6112 noise_pixels.append((x, y)) 6113
6107 6108 noise_pixels = [] 6109 for i in range(num_pixels): 6110 x = random.randint(0, img_gray.size[0]-1) 6111 y = random.randint(0, img_gray.size[1]-1) 6112 noise_pixels.append((x, y)) 6113 6114 for x, y in noise_pixels:
6111 y = random.randint(0, img_gray.size[1]-1) 6112 noise_pixels.append((x, y)) 6113 6114 for x, y in noise_pixels: 6115 value = random.randint(0, 255) 6116 img_gray.putpixel((x, y), value) 6117 6118 img_noise = img_gray.convert('RGB')
7416 return (image, mask, filename) 7417 7418 def download_image(self, url): 7419 try: 7420 response = requests.get(url) 7421 response.raise_for_status() 7422 img = Image.open(BytesIO(response.content)) 7423 return img
10048 10049 def text_random_line(self, text, seed): 10050 lines = text.split("\n") 10051 random.seed(seed) 10052 choice = random.choice(lines) 10053 return (choice, ) 10054 10055 @classmethod
10246 for term in dictionary.keys(): 10247 tkey = f'{replacement_key}{term}{replacement_key}' 10248 tcount = new_text.count(tkey) 10249 for _ in range(tcount): 10250 new_text = new_text.replace(tkey, random.choice(dictionary[term]), 1) 10251 if seed > 0 or seed < 0: 10252 seed = seed + 1 10253 random.seed(seed)
10925 return (self.search_lexica_art(search_seed), ) 10926 10927 def search_lexica_art(self, query=None): 10928 if not query: 10929 query = random.choice(["portrait","landscape","anime","superhero","animal","nature","scenery"]) 10930 url = f"https://lexica.art/api/v1/search?q={query}" 10931 try: 10932 response = requests.get(url)
10928 if not query: 10929 query = random.choice(["portrait","landscape","anime","superhero","animal","nature","scenery"]) 10930 url = f"https://lexica.art/api/v1/search?q={query}" 10931 try: 10932 response = requests.get(url) 10933 data = response.json() 10934 images = data.get("images", []) 10935 if not images:
10933 data = response.json() 10934 images = data.get("images", []) 10935 if not images: 10936 return "404 not found error" 10937 random_image = random.choice(images) 10938 prompt = random_image.get("prompt") 10939 except Exception: 10940 cstr("Unable to establish connection to Lexica API.").error.print()
11366 11367 sam_file = os.path.join(sam_dir, model_filename) 11368 if not os.path.exists(sam_file): 11369 cstr("Selected SAM model not found. Downloading...").msg.print() 11370 r = requests.get(model_url, allow_redirects=True) 11371 open(sam_file, 'wb').write(r.content) 11372 11373 from segment_anything import build_sam_vit_h, build_sam_vit_l, build_sam_vit_b
11927 11928 # Return random number 11929 if number_type: 11930 if number_type == 'integer': 11931 number = random.randint(minimum, maximum) 11932 elif number_type == 'float': 11933 number = random.uniform(minimum, maximum) 11934 elif number_type == 'bool':
11929 if number_type: 11930 if number_type == 'integer': 11931 number = random.randint(minimum, maximum) 11932 elif number_type == 'float': 11933 number = random.uniform(minimum, maximum) 11934 elif number_type == 'bool': 11935 number = random.random() 11936 else:
11931 number = random.randint(minimum, maximum) 11932 elif number_type == 'float': 11933 number = random.uniform(minimum, maximum) 11934 elif number_type == 'bool': 11935 number = random.random() 11936 else: 11937 return 11938
11996 }, 11997 "id": 1 11998 } 11999 12000 response = requests.post(url, headers=headers, data=json.dumps(payload)) 12001 if response.status_code == 200: 12002 data = response.json() 12003 if "result" in data:
13625 @classmethod 13626 def INPUT_TYPES(cls): 13627 return { 13628 "required": { 13629 "latent_suffix": ("STRING", {"default": str(random.randint(999999, 99999999))+"_cache", "multiline":False}), 13630 "image_suffix": ("STRING", {"default": str(random.randint(999999, 99999999))+"_cache", "multiline":False}), 13631 "conditioning_suffix": ("STRING", {"default": str(random.randint(999999, 99999999))+"_cache", "multiline":False}), 13632 },
13626 def INPUT_TYPES(cls): 13627 return { 13628 "required": { 13629 "latent_suffix": ("STRING", {"default": str(random.randint(999999, 99999999))+"_cache", "multiline":False}), 13630 "image_suffix": ("STRING", {"default": str(random.randint(999999, 99999999))+"_cache", "multiline":False}), 13631 "conditioning_suffix": ("STRING", {"default": str(random.randint(999999, 99999999))+"_cache", "multiline":False}), 13632 }, 13633 "optional": {
13627 return { 13628 "required": { 13629 "latent_suffix": ("STRING", {"default": str(random.randint(999999, 99999999))+"_cache", "multiline":False}), 13630 "image_suffix": ("STRING", {"default": str(random.randint(999999, 99999999))+"_cache", "multiline":False}), 13631 "conditioning_suffix": ("STRING", {"default": str(random.randint(999999, 99999999))+"_cache", "multiline":False}), 13632 }, 13633 "optional": { 13634 "output_path": ("STRING", {"default": os.path.join(WAS_SUITE_ROOT, 'cache'), "multiline": False}),
14252 '\033[93m"The biggest risk is not taking any risk. In a world that is changing quickly, the only strategy that is guaranteed to fail is not taking risks."\033[0m\033[3m - Mark Zuckerberg', 14253 '\033[93m"The journey of a thousand miles begins with one step."\033[0m\033[3m - Lao Tzu', 14254 '\033[93m"Every strike brings me closer to the next home run."\033[0m\033[3m - Babe Ruth', 14255 ] 14256 print(f'\n\t\033[3m{random.choice(art_quotes)}\033[0m\n')
355 outputs = self_attention_outputs[1:-1] 356 present_key_value = self_attention_outputs[-1] 357 358 if mode=='multimodal': 359 assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers" 360 361 cross_attention_outputs = self.crossattention( 362 attention_output,
50 51 52 def forward(self, image, caption, mode): 53 54 assert mode in ['image', 'text', 'multimodal'], "mode parameter must be image, text, or multimodal" 55 text = self.tokenizer(caption, return_tensors="pt").to(image.device) 56 57 if mode=='image':
178 def blip_decoder(pretrained='',**kwargs): 179 model = BLIP_Decoder(**kwargs) 180 if pretrained: 181 model,msg = load_checkpoint(model,pretrained) 182 assert(len(msg.missing_keys)==0) 183 return model 184 185 def blip_feature_extractor(pretrained='',**kwargs):
185 def blip_feature_extractor(pretrained='',**kwargs): 186 model = BLIP_Base(**kwargs) 187 if pretrained: 188 model,msg = load_checkpoint(model,pretrained) 189 assert(len(msg.missing_keys)==0) 190 return model 191 192 def init_tokenizer():
198 199 200 def create_vit(vit, image_size, use_grad_checkpointing=False, ckpt_layer=0, drop_path_rate=0): 201 202 assert vit in ['base', 'large'], "vit parameter must be base or large" 203 if vit=='base': 204 vision_width = 768 205 visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12,