Spaces:
Build error
Build error
| import os | |
| import random | |
| import sys | |
| from typing import Sequence, Mapping, Any, Union | |
| import torch | |
| def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any: | |
| try: | |
| return obj[index] | |
| except KeyError: | |
| return obj["result"][index] | |
| def find_path(name: str, path: str = None) -> str: | |
| if path is None: | |
| path = os.getcwd() | |
| if name in os.listdir(path): | |
| path_name = os.path.join(path, name) | |
| print(f"{name} found: {path_name}") | |
| return path_name | |
| parent_directory = os.path.dirname(path) | |
| if parent_directory == path: | |
| return None | |
| return find_path(name, parent_directory) | |
| def add_comfyui_directory_to_sys_path() -> None: | |
| comfyui_path = find_path("ComfyUI") | |
| if comfyui_path and os.path.isdir(comfyui_path): | |
| sys.path.append(comfyui_path) | |
| print(f"'{comfyui_path}' added to sys.path") | |
| def add_extra_model_paths() -> None: | |
| try: | |
| from main import load_extra_path_config | |
| except ImportError: | |
| print("Fallback to utils.extra_config") | |
| from utils.extra_config import load_extra_path_config | |
| extra_model_paths = find_path("extra_model_paths.yaml") | |
| if extra_model_paths: | |
| load_extra_path_config(extra_model_paths) | |
| else: | |
| print("Could not find the extra_model_paths config file.") | |
| add_comfyui_directory_to_sys_path() | |
| add_extra_model_paths() | |
| async def import_custom_nodes() -> None: | |
| import asyncio | |
| import execution | |
| from nodes import init_extra_nodes | |
| import server | |
| loop = asyncio.get_event_loop() | |
| server_instance = server.PromptServer(loop) | |
| execution.PromptQueue(server_instance) | |
| await init_extra_nodes() | |
| from nodes import NODE_CLASS_MAPPINGS | |
| async def main(): | |
| await import_custom_nodes() | |
| with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16): | |
| vaeloader = NODE_CLASS_MAPPINGS["VAELoader"]() | |
| vaeloader_39 = vaeloader.load_vae(vae_name="ae.safetensors") | |
| loadimage = NODE_CLASS_MAPPINGS["LoadImage"]() | |
| loadimage_133 = loadimage.load_image(image="testImage_1.jpg") | |
| imagescaletototalpixels = NODE_CLASS_MAPPINGS["ImageScaleToTotalPixels"]() | |
| imagescaletototalpixels_187 = imagescaletototalpixels.upscale( | |
| upscale_method="bicubic", | |
| megapixels=1, | |
| image=get_value_at_index(loadimage_133, 0), | |
| ) | |
| vaeencode = NODE_CLASS_MAPPINGS["VAEEncode"]() | |
| vaeencode_124 = vaeencode.encode( | |
| pixels=get_value_at_index(imagescaletototalpixels_187, 0), | |
| vae=get_value_at_index(vaeloader_39, 0), | |
| ) | |
| dualcliploader = NODE_CLASS_MAPPINGS["DualCLIPLoader"]() | |
| dualcliploader_184 = dualcliploader.load_clip( | |
| clip_name1="model.safetensors", | |
| clip_name2="t5xxl_fp8_e4m3fn.safetensors", | |
| type="flux", | |
| device="default", | |
| ) | |
| cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]() | |
| cliptextencode_181 = cliptextencode.encode( | |
| text="Turn this into a photorealistic Na\u2019vi character from Avatar, with blue bioluminescent skin, large eyes, and set in the glowing jungle of Pandora.", | |
| clip=get_value_at_index(dualcliploader_184, 0), | |
| ) | |
| cliptextencode_182 = cliptextencode.encode( | |
| text="", clip=get_value_at_index(dualcliploader_184, 0) | |
| ) | |
| checkpointloadersimple = NODE_CLASS_MAPPINGS["CheckpointLoaderSimple"]() | |
| checkpointloadersimple_188 = checkpointloadersimple.load_checkpoint( | |
| ckpt_name="flux1-kontext-dev.safetensors" | |
| ) | |
| loraloadermodelonly = NODE_CLASS_MAPPINGS["LoraLoaderModelOnly"]() | |
| loraloadermodelonly_186 = loraloadermodelonly.load_lora_model_only( | |
| lora_name="Avataar_LoRA_000003000.safetensors", | |
| strength_model=1, | |
| model=get_value_at_index(checkpointloadersimple_188, 0), | |
| ) | |
| referencelatent = NODE_CLASS_MAPPINGS["ReferenceLatent"]() | |
| fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]() | |
| ksampler = NODE_CLASS_MAPPINGS["KSampler"]() | |
| vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]() | |
| saveimage = NODE_CLASS_MAPPINGS["SaveImage"]() | |
| for _ in range(1): | |
| referencelatent_176 = referencelatent.append( | |
| conditioning=get_value_at_index(cliptextencode_181, 0), | |
| latent=get_value_at_index(vaeencode_124, 0), | |
| ) | |
| fluxguidance_179 = fluxguidance.append( | |
| guidance=4.5, conditioning=get_value_at_index(referencelatent_176, 0) | |
| ) | |
| ksampler_178 = ksampler.sample( | |
| seed=42, | |
| steps=25, | |
| cfg=1, | |
| sampler_name="euler", | |
| scheduler="simple", | |
| denoise=1, | |
| model=get_value_at_index(loraloadermodelonly_186, 0), | |
| positive=get_value_at_index(fluxguidance_179, 0), | |
| negative=get_value_at_index(cliptextencode_182, 0), | |
| latent_image=get_value_at_index(vaeencode_124, 0), | |
| ) | |
| vaedecode_177 = vaedecode.decode( | |
| samples=get_value_at_index(ksampler_178, 0), | |
| vae=get_value_at_index(vaeloader_39, 0), | |
| ) | |
| saveimage_180 = saveimage.save_images( | |
| filename_prefix="ComfyUI", images=get_value_at_index(vaedecode_177, 0) | |
| ) | |
| if __name__ == "__main__": | |
| import asyncio | |
| asyncio.run(main()) |