data_pipeline / style.py
same899's picture
Upload style.py with huggingface_hub
5b7ef6c verified
import argparse
import glob
import json
import os
import torch
from accelerate import PartialState
from src_inference.lora_helper import set_single_lora
from src_inference.pipeline import FluxPipeline
from PIL import Image
def clear_cache(transformer):
for _, attn_processor in transformer.attn_processors.items():
attn_processor.bank_kv.clear()
class style_processor:
def __init__(self, flux_path, lora_path, omni_path, device):
# Initialize model
self.device = device
self.base_path = flux_path # assuming 'flux' is the base path
self.pipe = FluxPipeline.from_pretrained(
self.base_path, torch_dtype=torch.bfloat16
).to(self.device)
self.style_prompt = f"{os.path.basename(lora_path).replace('_rank128_bf16.safetensors', '').replace('_', ' ').title()} style, "
# Load OmniConsistency model
set_single_lora(
self.pipe.transformer,
omni_path,
lora_weights=[1],
cond_size=512,
)
# Load external LoRA
self.pipe.unload_lora_weights()
self.pipe.load_lora_weights(lora_path, weight_name="lora_name.safetensors")
def process(self, image_path, prompt):
if isinstance(image_path, str):
spatial_image = [Image.open(image_path).convert("RGB")]
elif isinstance(image_path, Image.Image):
spatial_image = [image_path]
else:
raise ValueError(f"Invalid image type: {type(image_path)}")
subject_images = []
width, height = spatial_image[0].size
image = self.pipe(
prompt,
height=height,
width=width,
guidance_scale=3.5,
num_inference_steps=25,
max_sequence_length=512,
generator=torch.Generator("cpu").manual_seed(5),
spatial_images=spatial_image,
subject_images=subject_images,
cond_size=512,
).images[0]
# Clear cache after generation
clear_cache(self.pipe.transformer)
return image
def get_images_from_path(path):
if os.path.isdir(path):
return glob.glob(os.path.join(path, "*.jpg")) + glob.glob(
os.path.join(path, "*.png")
)
elif os.path.isfile(path) and (path.endswith(".jpg") or path.endswith(".png")):
return [path]
else:
return []
def parse_args():
parser = argparse.ArgumentParser(description="Style processor")
parser.add_argument("--flux_path", type=str, required=True)
parser.add_argument("--lora_paths", type=str, required=True, nargs="+")
parser.add_argument("--omni_path", type=str, required=True)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument("--prompt_dir", type=str, required=True)
parser.add_argument("--images_path", type=str, required=True)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
flux_path = args.flux_path
lora_paths = args.lora_paths
omni_path = args.omni_path
output_dir = args.output_dir
prompt_dir = args.prompt_dir
images_path = args.images_path
distributed_state = PartialState()
device = distributed_state.device
rank = int(str(device).split(":")[1])
lora = lora_paths[rank]
output_lora_path = os.path.join(output_dir, os.path.basename(lora))
os.makedirs(output_lora_path, exist_ok=True)
processor = style_processor(flux_path, lora, omni_path, device)
images_path = get_images_from_path(images_path)
for image_path in images_path:
image_output_path = os.path.join(output_lora_path, os.path.basename(image_path))
if os.path.exists(image_output_path):
print(f"File {image_output_path} already exists, skipping.")
continue
try:
with open(
os.path.join(prompt_dir, os.path.basename(image_path) + ".json")
) as f:
prompt = json.load(f)["caption"]
output = processor.process(image_path, processor.style_prompt + prompt)
output.save(image_output_path)
except Exception as e:
print(f"Error processing {image_path}: {e}")