linoyts HF Staff commited on
Commit
dea1d11
·
verified ·
1 Parent(s): 58f7c88

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -8
app.py CHANGED
@@ -14,9 +14,18 @@ dtype = torch.bfloat16
14
  device = "cuda" if torch.cuda.is_available() else "cpu"
15
 
16
 
17
- vlm_pipe = ModularPipeline.from_pretrained("briaai/FIBO-VLM-prompt-to-JSON", trust_remote_code=True).to(device)
 
18
  pipe = BriaFiboPipeline.from_pretrained("briaai/FIBO", trust_remote_code=True, torch_dtype=dtype).to(device)
19
 
 
 
 
 
 
 
 
 
20
  @spaces.GPU(duration=300)
21
  def infer(
22
  prompt,
@@ -26,7 +35,7 @@ def infer(
26
  seed=42,
27
  randomize_seed=False,
28
  width=1024,
29
- height=768,
30
  guidance_scale=5,
31
  num_inference_steps=50,
32
  mode="generate",
@@ -35,12 +44,7 @@ def infer(
35
  seed = random.randint(0, MAX_SEED)
36
 
37
  with torch.inference_mode():
38
- if negative_prompt:
39
- neg_output = vlm_pipe(prompt=negative_prompt)
40
- neg_json_prompt = neg_output.values["json_prompt"]
41
- else:
42
- neg_json_prompt = ""
43
-
44
  if mode == "refine":
45
  json_prompt_str = (
46
  json.dumps(prompt_in_json)
@@ -52,6 +56,12 @@ def infer(
52
  output = vlm_pipe(prompt=prompt)
53
  json_prompt = output.values["json_prompt"]
54
 
 
 
 
 
 
 
55
  image = pipe(
56
  prompt=json_prompt,
57
  num_inference_steps=num_inference_steps,
 
14
  device = "cuda" if torch.cuda.is_available() else "cpu"
15
 
16
 
17
+ vlm_pipe = ModularPipeline.from_pretrained("briaai/FIBO-VLM-prompt-to-JSON", trust_remote_code=True)
18
+ # vlm_pipe = ModularPipeline.from_pretrained("briaai/FIBO-gemini-prompt-to-JSON", trust_remote_code=True)
19
  pipe = BriaFiboPipeline.from_pretrained("briaai/FIBO", trust_remote_code=True, torch_dtype=dtype).to(device)
20
 
21
+ @spaces.GPU()
22
+ def get_default_negative_prompt(existing_json: dict) -> str:
23
+ negative_prompt = ""
24
+ style_medium = existing_json.get("style_medium", "").lower()
25
+ if style_medium in ["photograph", "photography", "photo"]:
26
+ negative_prompt = """{'style_medium':'digital illustration','artistic_style':'non-realistic'}"""
27
+ return negative_prompt
28
+
29
  @spaces.GPU(duration=300)
30
  def infer(
31
  prompt,
 
35
  seed=42,
36
  randomize_seed=False,
37
  width=1024,
38
+ height=1024,
39
  guidance_scale=5,
40
  num_inference_steps=50,
41
  mode="generate",
 
44
  seed = random.randint(0, MAX_SEED)
45
 
46
  with torch.inference_mode():
47
+
 
 
 
 
 
48
  if mode == "refine":
49
  json_prompt_str = (
50
  json.dumps(prompt_in_json)
 
56
  output = vlm_pipe(prompt=prompt)
57
  json_prompt = output.values["json_prompt"]
58
 
59
+ if negative_prompt:
60
+ neg_output = vlm_pipe(prompt=negative_prompt)
61
+ neg_json_prompt = neg_output.values["json_prompt"]
62
+ else:
63
+ neg_json_prompt = get_default_negative_prompt(json.loads(json_prompt))
64
+
65
  image = pipe(
66
  prompt=json_prompt,
67
  num_inference_steps=num_inference_steps,