File size: 4,235 Bytes
5b7ef6c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
import argparse
import glob
import json
import os
import torch
from accelerate import PartialState
from src_inference.lora_helper import set_single_lora
from src_inference.pipeline import FluxPipeline
from PIL import Image
def clear_cache(transformer):
for _, attn_processor in transformer.attn_processors.items():
attn_processor.bank_kv.clear()
class style_processor:
def __init__(self, flux_path, lora_path, omni_path, device):
# Initialize model
self.device = device
self.base_path = flux_path # assuming 'flux' is the base path
self.pipe = FluxPipeline.from_pretrained(
self.base_path, torch_dtype=torch.bfloat16
).to(self.device)
self.style_prompt = f"{os.path.basename(lora_path).replace('_rank128_bf16.safetensors', '').replace('_', ' ').title()} style, "
# Load OmniConsistency model
set_single_lora(
self.pipe.transformer,
omni_path,
lora_weights=[1],
cond_size=512,
)
# Load external LoRA
self.pipe.unload_lora_weights()
self.pipe.load_lora_weights(lora_path, weight_name="lora_name.safetensors")
def process(self, image_path, prompt):
if isinstance(image_path, str):
spatial_image = [Image.open(image_path).convert("RGB")]
elif isinstance(image_path, Image.Image):
spatial_image = [image_path]
else:
raise ValueError(f"Invalid image type: {type(image_path)}")
subject_images = []
width, height = spatial_image[0].size
image = self.pipe(
prompt,
height=height,
width=width,
guidance_scale=3.5,
num_inference_steps=25,
max_sequence_length=512,
generator=torch.Generator("cpu").manual_seed(5),
spatial_images=spatial_image,
subject_images=subject_images,
cond_size=512,
).images[0]
# Clear cache after generation
clear_cache(self.pipe.transformer)
return image
def get_images_from_path(path):
if os.path.isdir(path):
return glob.glob(os.path.join(path, "*.jpg")) + glob.glob(
os.path.join(path, "*.png")
)
elif os.path.isfile(path) and (path.endswith(".jpg") or path.endswith(".png")):
return [path]
else:
return []
def parse_args():
parser = argparse.ArgumentParser(description="Style processor")
parser.add_argument("--flux_path", type=str, required=True)
parser.add_argument("--lora_paths", type=str, required=True, nargs="+")
parser.add_argument("--omni_path", type=str, required=True)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument("--prompt_dir", type=str, required=True)
parser.add_argument("--images_path", type=str, required=True)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
flux_path = args.flux_path
lora_paths = args.lora_paths
omni_path = args.omni_path
output_dir = args.output_dir
prompt_dir = args.prompt_dir
images_path = args.images_path
distributed_state = PartialState()
device = distributed_state.device
rank = int(str(device).split(":")[1])
lora = lora_paths[rank]
output_lora_path = os.path.join(output_dir, os.path.basename(lora))
os.makedirs(output_lora_path, exist_ok=True)
processor = style_processor(flux_path, lora, omni_path, device)
images_path = get_images_from_path(images_path)
for image_path in images_path:
image_output_path = os.path.join(output_lora_path, os.path.basename(image_path))
if os.path.exists(image_output_path):
print(f"File {image_output_path} already exists, skipping.")
continue
try:
with open(
os.path.join(prompt_dir, os.path.basename(image_path) + ".json")
) as f:
prompt = json.load(f)["caption"]
output = processor.process(image_path, processor.style_prompt + prompt)
output.save(image_output_path)
except Exception as e:
print(f"Error processing {image_path}: {e}")
|