linoyts HF Staff commited on
Commit
aae18e9
·
verified ·
1 Parent(s): b27dae8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -18
app.py CHANGED
@@ -76,6 +76,7 @@ MAX_SEED = np.iinfo(np.int32).max
76
  @spaces.GPU
77
  def infer(
78
  image_subject,image_background,
 
79
  seed=42,
80
  randomize_seed=True,
81
  true_guidance_scale=1,
@@ -85,13 +86,12 @@ def infer(
85
  progress=gr.Progress(track_tqdm=True)
86
  ):
87
 
88
- prompt=""
89
  if randomize_seed:
90
  seed = random.randint(0, MAX_SEED)
91
  generator = torch.Generator(device=device).manual_seed(seed)
92
 
93
  result = pipe(
94
- image=image_subject,
95
  prompt=prompt,
96
  # height=height,
97
  # width=width,
@@ -117,16 +117,16 @@ with gr.Blocks(theme=gr.themes.Citrus(), css=css) as demo:
117
  gr.Markdown("## Qwen Image Edit — Fusion")
118
  gr.Markdown("""
119
  Qwen Image Edit 2509 ✨
120
- Using [Alissonerdx's Qwen-Edit-2509 Face Swap LoRA]() and [Phr00t/Qwen-Image-Edit-Rapid-AIO]() for 4-step inference 💨
121
  """
122
  )
123
 
124
  with gr.Row():
125
  with gr.Column():
126
  with gr.Row():
127
- image_subject = gr.Image(label="subject Image", type="pil")
128
- image_background = gr.Image(label="background Image", type="pil")
129
-
130
  run_button = gr.Button("Fuse", variant="primary")
131
  with gr.Accordion("Advanced Settings", open=False):
132
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
@@ -142,20 +142,20 @@ with gr.Blocks(theme=gr.themes.Citrus(), css=css) as demo:
142
  result = gr.Image(label="Output Image", interactive=False)
143
  prompt_preview = gr.Textbox(label="Processed Prompt", interactive=False, visible=False)
144
 
145
- gr.Examples(
146
- examples=[
147
- ["wednesday.png", "pexels-alipazani-2613260.jpg"],
148
-
149
- ],
150
- inputs=[image_subject,image_background],
151
- outputs=[result,seed],
152
- fn=infer,
153
- cache_examples="lazy",
154
- elem_id="examples"
155
- )
156
 
157
  inputs = [
158
- image_subject,image_background,
159
  seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width
160
  ]
161
  outputs = [result, seed]
 
76
  @spaces.GPU
77
  def infer(
78
  image_subject,image_background,
79
+ prompt,
80
  seed=42,
81
  randomize_seed=True,
82
  true_guidance_scale=1,
 
86
  progress=gr.Progress(track_tqdm=True)
87
  ):
88
 
 
89
  if randomize_seed:
90
  seed = random.randint(0, MAX_SEED)
91
  generator = torch.Generator(device=device).manual_seed(seed)
92
 
93
  result = pipe(
94
+ image=image_subject["composite"],
95
  prompt=prompt,
96
  # height=height,
97
  # width=width,
 
117
  gr.Markdown("## Qwen Image Edit — Fusion")
118
  gr.Markdown("""
119
  Qwen Image Edit 2509 ✨
120
+ Using [dx8152's Qwen-Image-Edit-2509 Fusion LoRA](https://huggingface.co/dx8152/Qwen-Image-Edit-2509-Fusion) and [lightx2v Qwen-Image-Lightning LoRA]() for 4-step inference 💨
121
  """
122
  )
123
 
124
  with gr.Row():
125
  with gr.Column():
126
  with gr.Row():
127
+ image_subject = gr.ImageEditor(label="input image", type="pil")
128
+ image_background = gr.Image(label="background Image", type="pil", visible=False)
129
+ prompt = gr.Textbox(label="prompt")
130
  run_button = gr.Button("Fuse", variant="primary")
131
  with gr.Accordion("Advanced Settings", open=False):
132
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
 
142
  result = gr.Image(label="Output Image", interactive=False)
143
  prompt_preview = gr.Textbox(label="Processed Prompt", interactive=False, visible=False)
144
 
145
+ # gr.Examples(
146
+ # examples=[
147
+ # ["wednesday.png", "pexels-alipazani-2613260.jpg"],
148
+
149
+ # ],
150
+ # inputs=[image_subject,image_background],
151
+ # outputs=[result,seed],
152
+ # fn=infer,
153
+ # cache_examples="lazy",
154
+ # elem_id="examples"
155
+ # )
156
 
157
  inputs = [
158
+ image_subject,image_background, prompt,
159
  seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width
160
  ]
161
  outputs = [result, seed]