|
|
import argparse |
|
|
import glob |
|
|
import json |
|
|
import os |
|
|
|
|
|
import torch |
|
|
from accelerate import PartialState |
|
|
from PIL import Image |
|
|
from tqdm import tqdm |
|
|
from transformers import AutoModel, AutoTokenizer |
|
|
|
|
|
|
|
|
class caption_processor: |
|
|
def __init__(self, vlm_name, device): |
|
|
self.vlm = AutoModel.from_pretrained( |
|
|
vlm_name, |
|
|
trust_remote_code=True, |
|
|
attn_implementation="flash_attention_2", |
|
|
torch_dtype=torch.bfloat16, |
|
|
) |
|
|
self.vlm_tokenizer = AutoTokenizer.from_pretrained( |
|
|
vlm_name, trust_remote_code=True |
|
|
) |
|
|
|
|
|
self.vlm = self.vlm.eval().to(device) |
|
|
self.prompt = """ |
|
|
1. describe the image in brief, Avoid using phrases in [In the/The image/scene shows/contains/is a] in the captions, directly describe the contents. |
|
|
2. Imagine this picture is the first frame of a 5-second video. Please describe the video and add dynamics, including the movement of objects and themes, as well as the overall camera movement.Avoid using phrases in [In the/The video/scene shows/contains/is a] in the descriptions, directly describe the contents. |
|
|
3. Please output in JSON format.{"caption": "...","video_description": "..."} |
|
|
""" |
|
|
|
|
|
def str_2_json(self, str): |
|
|
|
|
|
start_idx = str.find("{") |
|
|
if start_idx == -1: |
|
|
return None |
|
|
|
|
|
|
|
|
end_idx = str.rfind("}") |
|
|
if end_idx == -1 or end_idx <= start_idx: |
|
|
return None |
|
|
|
|
|
|
|
|
json_str = str[start_idx : end_idx + 1] |
|
|
|
|
|
|
|
|
try: |
|
|
import json |
|
|
|
|
|
return json.loads(json_str) |
|
|
except json.JSONDecodeError: |
|
|
return None |
|
|
|
|
|
def process(self, image): |
|
|
msgs = [{"role": "user", "content": [image, self.prompt]}] |
|
|
|
|
|
answer = self.vlm.chat( |
|
|
msgs=msgs, tokenizer=self.vlm_tokenizer, enable_thinking=False, stream=False |
|
|
) |
|
|
|
|
|
dict_answer = self.str_2_json(answer) |
|
|
if dict_answer is None: |
|
|
return {"response": answer} |
|
|
|
|
|
return dict_answer |
|
|
|
|
|
|
|
|
def get_images_from_path(path): |
|
|
if os.path.isdir(path): |
|
|
return glob.glob(os.path.join(path, "*.jpg")) + glob.glob( |
|
|
os.path.join(path, "*.png") |
|
|
) |
|
|
elif os.path.isfile(path) and (path.endswith(".jpg") or path.endswith(".png")): |
|
|
return [path] |
|
|
else: |
|
|
return [] |
|
|
|
|
|
|
|
|
def parse_args(): |
|
|
parser = argparse.ArgumentParser(description="Caption processor") |
|
|
parser.add_argument("--vlm_name", type=str, required=True) |
|
|
parser.add_argument("--output_dir", type=str, required=True) |
|
|
parser.add_argument("--paths", type=str, required=True, nargs="+") |
|
|
return parser.parse_args() |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
distributed_state = PartialState() |
|
|
args = parse_args() |
|
|
output_dir = args.output_dir |
|
|
os.makedirs(output_dir, exist_ok=True) |
|
|
|
|
|
vlm_name = args.vlm_name |
|
|
paths = args.paths |
|
|
all_paths = [] |
|
|
for path in paths: |
|
|
images = get_images_from_path(path) |
|
|
all_paths.extend(images) |
|
|
print("found", len(all_paths), "images") |
|
|
|
|
|
processor = caption_processor( |
|
|
vlm_name, |
|
|
distributed_state.device, |
|
|
) |
|
|
with distributed_state.split_between_processes( |
|
|
all_paths, apply_padding=False |
|
|
) as batched_paths: |
|
|
print("GPU", distributed_state.device, "found", len(batched_paths), "images") |
|
|
|
|
|
for path in tqdm(batched_paths, desc="Processing images"): |
|
|
try: |
|
|
json_path = os.path.join(output_dir, os.path.basename(path) + ".json") |
|
|
if os.path.exists(json_path): |
|
|
print(f"File {json_path} already exists, skipping.") |
|
|
continue |
|
|
|
|
|
image = Image.open(path) |
|
|
output = None |
|
|
|
|
|
for _ in range(3): |
|
|
output = processor.process(image) |
|
|
if output is not None: |
|
|
break |
|
|
|
|
|
if output is None: |
|
|
raise Exception("Failed to process image after 3 attempts") |
|
|
else: |
|
|
with open( |
|
|
json_path, |
|
|
"w", |
|
|
encoding="utf-8", |
|
|
) as f: |
|
|
json.dump(output, f, ensure_ascii=False, indent=2) |
|
|
except Exception as e: |
|
|
print(f"Error processing {path}: {e}") |
|
|
|