24 def fix(self, line: str, *, strict: bool = True) -> FixResult: 25 try: 26 json.loads(line, strict=strict) 27 return FixResult(success=True, line=line, origin=True) 28 except Exception: 29 pass 30 31 ok, new_line = self.fixwithtry(line, strict=strict) 32 return FixResult(success=ok, line=new_line, origin=False)
285 """ 286 287 288 def errmsg_inv(e: ValueError) -> Dict[str, Any]: 289 assert isinstance(e, PyJSONDecodeError) 290 parser = e.__dict__.get("parser", "") 291 errmsg = e.msg 292 localerr = errors.get_decode_error(parser, errmsg)
148 Returns a tensor of with the same dimensions as the input: i.e., 149 [1, T, D] or [T, D]. 150 """ 151 shape_len = len(seq_embeds.shape) 152 assert 2 <= shape_len <= 3 153 len_seq = seq_embeds.size(-2) 154 assert len_seq <= self.max_seq_len 155 pos_embeds = self.pos_idx_to_embed[0:seq_embeds.size(-2), :]
150 """ 151 shape_len = len(seq_embeds.shape) 152 assert 2 <= shape_len <= 3 153 len_seq = seq_embeds.size(-2) 154 assert len_seq <= self.max_seq_len 155 pos_embeds = self.pos_idx_to_embed[0:seq_embeds.size(-2), :] 156 # Adapt pre-computed positional embeddings to the input. 157 if shape_len == 3:
189 Returns a tensor of with the same dimensions as the input: i.e., 190 [1, T, D] or [T, D]. 191 """ 192 shape_len = len(seq_embeds.shape) 193 assert 2 <= shape_len <= 3 194 len_seq = seq_embeds.size(-2) 195 assert len_seq <= self.num_pos 196 # [T, D]
191 """ 192 shape_len = len(seq_embeds.shape) 193 assert 2 <= shape_len <= 3 194 len_seq = seq_embeds.size(-2) 195 assert len_seq <= self.num_pos 196 # [T, D] 197 pos_embeds = self.embeddings( 198 torch.arange(len_seq).to(seq_embeds.device))
277 278 def forward(self, x, size): 279 B, N, C = x.shape 280 H, W = size 281 assert N == H * W 282 283 x = self.dw(x.transpose(1, 2).view(B, C, H, W)) 284 size = (x.size(-2), x.size(-1))
435 def forward(self, x, size): 436 437 H, W = size 438 B, L, C = x.shape 439 assert L == H * W, "input feature has wrong size" 440 441 x = x.view(B, H, W, C) 442
566 self.num_heads = num_heads 567 self.num_groups = num_groups 568 self.num_stages = len(self.embed_dims) 569 self.enable_checkpoint = enable_checkpoint 570 assert self.num_stages == len(self.num_heads) == len(self.num_groups) 571 572 num_stages = len(embed_dims) 573 dpr = [x.item() for x in torch.linspace(
2491 ) 2492 class Florence2VisionModel(Florence2PreTrainedModel): 2493 def __init__(self, config: Florence2VisionConfig): 2494 super().__init__(config) 2495 assert config.model_type == 'davit', 'only DaViT is supported for now' 2496 self.vision_tower = DaViT.from_config(config=config) 2497 2498 self.post_init()
2511 ) 2512 class Florence2VisionModelWithProjection(Florence2PreTrainedModel): 2513 def __init__(self, config: Florence2VisionConfig): 2514 super().__init__(config) 2515 assert config.model_type == 'davit', 'only DaViT is supported for now' 2516 self.vision_tower = DaViT.from_config(config=config) 2517 2518 self._build_image_projection_layers(config)
2558 if self.image_pos_embed is not None: 2559 x = x.view(batch_size * T, -1, x.shape[-1]) 2560 num_tokens = x.shape[-2] 2561 h, w = int(num_tokens ** 0.5), int(num_tokens ** 0.5) 2562 assert h * w == num_tokens, 'only support square feature maps for now' 2563 x = x.view(batch_size * T, h, w, x.shape[-1]) 2564 pos_embed = self.image_pos_embed(x) 2565 x = x + pos_embed
2604 ) 2605 class Florence2ForConditionalGeneration(Florence2PreTrainedModel): 2606 def __init__(self, config: Florence2Config): 2607 super().__init__(config) 2608 assert config.vision_config.model_type == 'davit', 'only DaViT is supported for now' 2609 del config.vision_config.model_type 2610 self.vision_tower = DaViT.from_config(config=config.vision_config) 2611 # remove unused layers
2684 if self.image_pos_embed is not None: 2685 x = x.view(batch_size * T, -1, x.shape[-1]) 2686 num_tokens = x.shape[-2] 2687 h, w = int(num_tokens ** 0.5), int(num_tokens ** 0.5) 2688 assert h * w == num_tokens, 'only support square feature maps for now' 2689 x = x.view(batch_size * T, h, w, x.shape[-1]) 2690 pos_embed = self.image_pos_embed(x) 2691 x = x + pos_embed
100 widget_name_zh = widget_name 101 for k, v in ZH_Replace_Map.items(): 102 widget_name_zh = widget_name_zh.replace(k, v) 103 nodes[key]["outputs"][widget_name] = widget_name_zh 104 except: 105 pass 106 107 with open(translation_config, "w", encoding="utf-8") as f: 108 f.write(json.dumps(nodes, indent=4, ensure_ascii=False))
2 import importlib 3 import json 4 import os 5 import shutil 6 import subprocess 7 import sys 8 import torch 9 try:
71 72 print(f"pip install {wheel_url}") 73 modelscope_url = f"https://www.modelscope.cn/api/v1/models/wailovet/MinusZoneAIModels/repo?Revision=master&FilePath=llama-cpp-python-win%2F{cuda_version}%2F{wheel_name}" 74 if mz_prompt_utils.Utils.testDownloadSpeed(wheel_url): 75 ret = subprocess.run([ 76 sys.executable, "-m", 77 "pip", "install", wheel_url], check=True) 78 elif mz_prompt_utils.Utils.testDownloadSpeed(modelscope_url): 79 import tempfile 80 whl_download_file = os.path.join(
81 tempfile.gettempdir(), wheel_name) 82 mz_prompt_utils.Utils.download_file( 83 modelscope_url, whl_download_file) 84 print(f"pip install {whl_download_file}") 85 ret = subprocess.run([ 86 sys.executable, "-m", 87 "pip", "install", whl_download_file], check=True) 88 else: 89 90 # 兜底方案
95 tempfile.gettempdir(), wheel_name) 96 mz_prompt_utils.Utils.download_file( 97 modelscope_url, whl_download_file) 98 print(f"pip install {whl_download_file}") 99 ret = subprocess.run([ 100 sys.executable, "-m", 101 "pip", "install", whl_download_file], check=True) 102 else: 103 ret = subprocess.run([ 104 sys.executable, "-m",
99 ret = subprocess.run([ 100 sys.executable, "-m", 101 "pip", "install", whl_download_file], check=True) 102 else: 103 ret = subprocess.run([ 104 sys.executable, "-m", 105 "pip", "install", wheel_url], check=True) 106 107 if ret.returncode != 0: 108 raise ValueError("Failed to install llama_cpp. (安装llama_cpp失败)")
1 import os 2 import sys 3 import json 4 import subprocess 5 6 from . import mz_prompt_utils 7 from . import mz_llama_cpp 8 from . import mz_llama_core_nodes
31 try: 32 from openai import OpenAI 33 import openai 34 except ImportError: 35 subprocess.check_call( 36 [sys.executable, "-m", "pip", "install", "openai"]) 37 from openai import OpenAI 38 import openai 39
1 2 import json 3 import os 4 import shutil 5 import subprocess 6 import sys 7 import threading 8 import time
26 27 28 class Utils: 29 def Md5(str): 30 return hashlib.md5(str.encode('utf-8')).hexdigest() 31 32 def check_frames_path(frames_path): 33
187 try: 188 import argostranslate 189 from argostranslate import translate, package 190 except ImportError: 191 subprocess.run([ 192 sys.executable, "-m", 193 "pip", "install", "argostranslate"], check=True) 194 195 try: 196 import argostranslate
422 def get_device(): 423 return comfy.model_management.get_torch_device() 424 425 def download_small_file(url, filepath): 426 response = requests.get(url) 427 os.makedirs(os.path.dirname(filepath), exist_ok=True) 428 with open(filepath, "wb") as f: 429 f.write(response.content)
430 return filepath 431 432 def download_file(url, filepath, threads=8, retries=6): 433 434 get_size_tmp = requests.get(url, stream=True) 435 total_size = int(get_size_tmp.headers.get("content-length", 0)) 436 437 print(f"Downloading {url} to {filepath} with size {total_size} bytes")
485 continue 486 # print(f"Downloading {cache_filepath} with headers bytes={start + existing_size}-{end}") 487 488 # Streaming, so we can iterate over the response. 489 response = requests.get(url, stream=True, headers=headers) 490 491 def download_file_thread(response, cache_filepath): 492 block_size = 1024
818 cache_json[key] = value 819 820 with open(cache_json_file, "w", encoding="utf-8") as f: 821 json.dump(cache_json, f, indent=4) 822 except: 823 pass 824 825 def file_sha256(file_path): 826 # 获取文件的更新时间
906 source_model_zoo_json = [] 907 try: 908 with open(source_model_zoo_file, "r", encoding="utf-8") as f: 909 source_model_zoo_json = json.load(f) 910 except: 911 pass 912 913 # Utils.print_log(f"source_model_zoo_json: {json.dumps(source_model_zoo_json, indent=4)}") 914 if tags_filter is not None:
1 import json 2 import os 3 import shutil 4 import subprocess 5 import traceback 6 7 8 def transformers_captioner(args_dict, myfunc):
193 if not os.path.exists(model_path): 194 # GIT_LFS_SKIP_SMUDGE=1 git clone https://www.modelscope.cn/AI-ModelScope/Florence-2-large.git 195 original_env = os.environ.get("GIT_LFS_SKIP_SMUDGE") 196 os.environ["GIT_LFS_SKIP_SMUDGE"] = "1" 197 subprocess.run( 198 ["git", "clone", "https://www.modelscope.cn/AI-ModelScope/Florence-2-large.git", model_path]) 199 if original_env is not None: 200 os.environ["GIT_LFS_SKIP_SMUDGE"] = original_env 201
193 if not os.path.exists(model_path): 194 # GIT_LFS_SKIP_SMUDGE=1 git clone https://www.modelscope.cn/AI-ModelScope/Florence-2-large.git 195 original_env = os.environ.get("GIT_LFS_SKIP_SMUDGE") 196 os.environ["GIT_LFS_SKIP_SMUDGE"] = "1" 197 subprocess.run( 198 ["git", "clone", "https://www.modelscope.cn/AI-ModelScope/Florence-2-large.git", model_path]) 199 if original_env is not None: 200 os.environ["GIT_LFS_SKIP_SMUDGE"] = original_env 201