Spaces:
Running
on
Zero
Running
on
Zero
File size: 5,707 Bytes
71f5363 7d4ee71 71f5363 7d4ee71 6571814 4f2abef 6571814 ec6ec95 fe9c804 71f5363 63c5b22 9c01f36 695bf10 7d4ee71 c70c8bd a2cff3a c70c8bd a2cff3a c70c8bd a2cff3a c70c8bd 79640f8 c70c8bd a2cff3a 4f2abef a2cff3a 4f2abef 6571814 028ba65 7d4ee71 79640f8 028ba65 c70c8bd fab4b60 c70c8bd 05eb5ed 7d4ee71 c70c8bd 7d4ee71 79640f8 16da357 7d4ee71 c70c8bd 7d4ee71 79640f8 fab4b60 c70c8bd 79640f8 9c01f36 79640f8 028ba65 9cbc39c b5c1d6f 028ba65 7d4ee71 29a13d1 7d4ee71 c70c8bd a2fda4d c70c8bd aae18e9 a2fda4d 028ba65 79640f8 7d4ee71 a2fda4d 16da357 aae18e9 c70c8bd fee7cbb c70c8bd cc842fe c70c8bd 79640f8 fab4b60 c70c8bd 9c01f36 fab4b60 f88bac4 fab4b60 c70c8bd aae18e9 c70c8bd 884ddb3 c70c8bd 884ddb3 028ba65 7d4ee71 26b519f c70c8bd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
import gradio as gr
import numpy as np
import random
import torch
import spaces
from PIL import Image
from diffusers import FlowMatchEulerDiscreteScheduler
from optimization import optimize_pipeline_
from diffusers import QwenImageEditPlusPipeline, QwenImageTransformer2DModel
# from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
# from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
# from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
import math
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
from PIL import Image
import os
import gradio as gr
from gradio_client import Client, handle_file
import tempfile
# --- Model Loading ---
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
# Scheduler configuration for Lightning
scheduler_config = {
"base_image_seq_len": 256,
"base_shift": math.log(3),
"invert_sigmas": False,
"max_image_seq_len": 8192,
"max_shift": math.log(3),
"num_train_timesteps": 1000,
"shift": 1.0,
"shift_terminal": None,
"stochastic_sampling": False,
"time_shift_type": "exponential",
"use_beta_sigmas": False,
"use_dynamic_shifting": True,
"use_exponential_sigmas": False,
"use_karras_sigmas": False,
}
# Initialize scheduler with Lightning config
scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509",
scheduler=scheduler,
torch_dtype=dtype).to(device)
pipe.load_lora_weights(
"lightx2v/Qwen-Image-Lightning",
weight_name="Qwen-Image-Lightning-4steps-V2.0.safetensors", adapter_name="fast"
)
pipe.load_lora_weights(
"dx8152/Qwen-Image-Edit-2509-Fusion",
weight_name="溶图.safetensors", adapter_name="fusion"
)
pipe.set_adapters(["fast", "fusion"], adapter_weights=[1.,1.])
pipe.fuse_lora(adapter_names=["fast"])
pipe.fuse_lora(adapter_names=["fusion"])
pipe.unload_lora_weights()
# pipe.transformer.__class__ = QwenImageTransformer2DModel
# pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
# optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))], prompt="prompt")
MAX_SEED = np.iinfo(np.int32).max
@spaces.GPU
def infer(
image_subject,
image_background=None,
prompt="",
seed=42,
randomize_seed=True,
true_guidance_scale=1,
num_inference_steps=4,
height=None,
width=None,
progress=gr.Progress(track_tqdm=True)
):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator(device=device).manual_seed(seed)
result = pipe(
image=image_subject,
prompt=prompt,
# height=height,
# width=width,
num_inference_steps=num_inference_steps,
generator=generator,
true_cfg_scale=true_guidance_scale,
num_images_per_prompt=1,
).images[0]
return [image_subject,result], seed
# --- UI ---
css = '''#col-container { max-width: 800px; margin: 0 auto; }
.dark .progress-text{color: white !important}
#examples{max-width: 800px; margin: 0 auto; }'''
with gr.Blocks(theme=gr.themes.Citrus(), css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown("## Qwen Image Edit — Fusion")
gr.Markdown("""
Qwen Image Edit 2509 ✨
Using [dx8152's Qwen-Image-Edit-2509 Fusion LoRA](https://huggingface.co/dx8152/Qwen-Image-Edit-2509-Fusion) and [lightx2v Qwen-Image-Lightning LoRA]() for 4-step inference 💨
"""
)
with gr.Row():
with gr.Column():
with gr.Row():
image_subject = gr.Image(label="input image", type="pil")
image_background = gr.Image(label="background Image", type="pil", visible=False)
prompt = gr.Textbox(label="prompt")
run_button = gr.Button("Fuse", variant="primary")
with gr.Accordion("Advanced Settings", open=False):
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
true_guidance_scale = gr.Slider(label="True Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
num_inference_steps = gr.Slider(label="Inference Steps", minimum=1, maximum=40, step=1, value=4)
height = gr.Slider(label="Height", minimum=256, maximum=2048, step=8, value=1024)
width = gr.Slider(label="Width", minimum=256, maximum=2048, step=8, value=1024)
with gr.Column():
result = gr.ImageSlider(label="Output Image", interactive=False)
prompt_preview = gr.Textbox(label="Processed Prompt", interactive=False, visible=False)
gr.Examples(
examples=[
["fusion_car.png"],["fusion_shoes.png"],
],
inputs=[image_subject],
outputs=[result,seed],
fn=infer,
cache_examples="lazy",
elem_id="examples"
)
inputs = [
image_subject,image_background, prompt,
seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width
]
outputs = [result, seed]
run_event = run_button.click(
fn=infer,
inputs=inputs,
outputs=outputs
)
demo.launch(share=True) |