File size: 7,336 Bytes
12f33e3 8ca7c55 12f33e3 b9a64da 12f33e3 b9a64da 12f33e3 b9a64da 12f33e3 8ca7c55 12f33e3 8ca7c55 89955d0 8ca7c55 9b77b8a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 |
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import PydanticOutputParser
from langchain_core.prompts import ChatPromptTemplate
from dotenv import load_dotenv
from custom_wrapper import OpenRouterChat
from pydantic import BaseModel, Field
from typing import List
import os
import json
import cv2
import base64
from PIL import Image
import io
import numpy as np
from scipy.io.wavfile import write
load_dotenv()
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
# Fixed: Added missing fields that you're trying to access
class AudioArrayOutput(BaseModel):
arr: List[float] = Field(description="Array for the audio waves")
environment_description: str = Field(description="Description of the environment")
reasoning: str = Field(description="Reasoning behind the audio generation")
llm = OpenRouterChat(
api_key=OPENROUTER_API_KEY,
model="meta-llama/llama-3.2-90b-vision-instruct",
temperature=0.7,
max_tokens=2048
)
parser = PydanticOutputParser(pydantic_object=AudioArrayOutput)
def extract_first_frame(video_path):
"""Extract the first frame from a video file"""
try:
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
raise ValueError(f"Cannot open video file: {video_path}")
success, frame = cap.read()
cap.release()
if not success:
raise ValueError("Cannot read the first frame from video")
return frame
except Exception as e:
print(f"Error extracting first frame: {e}")
return None
def image_to_base64(image):
"""Convert OpenCV image to base64 string"""
try:
# Convert BGR to RGB
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Convert to PIL Image
pil_image = Image.fromarray(image_rgb)
# Convert to base64
buffered = io.BytesIO()
pil_image.save(buffered, format="JPEG", quality=85)
img_str = base64.b64encode(buffered.getvalue()).decode()
return img_str
except Exception as e:
print(f"Error converting image to base64: {e}")
return None
def save_audio_from_array(audio_array, sample_rate=44100, output_path="generated_audio.wav"):
"""Save audio array as WAV file"""
try:
audio_np = np.array(audio_array, dtype=np.float32)
# Normalize and convert to int16
if np.max(np.abs(audio_np)) > 0: # Avoid division by zero
audio_np = audio_np / np.max(np.abs(audio_np))
audio_np = np.clip(audio_np, -1.0, 1.0)
audio_np = np.int16(audio_np * 32767)
write(output_path, sample_rate, audio_np)
return output_path
except Exception as e:
print(f"Error saving audio: {e}")
return None
# Updated prompt to match the expected output format
prompt = ChatPromptTemplate.from_template("""
You are an expert sound designer and acoustic AI generator.
Analyze the provided image and generate a footstep sound array.
Image Data (base64): {image_data}
**CRITICAL INSTRUCTIONS:**
- Output ONLY valid JSON format, nothing else
- No explanations, no code, no markdown formatting
- No additional text before or after the JSON
Generate a JSON object with exactly these three fields:
1. "arr": Array of 50-80 float values between -1.0 and 1.0
2. "environment_description": Brief description of the environment
3. "reasoning": Brief explanation of sound design choices
{format_instructions}
Output ONLY the JSON:
""")
chain = (
{"image_data": RunnablePassthrough(), "format_instructions": lambda x: parser.get_format_instructions()}
| prompt
| llm
| parser
)
def analyze_image_and_generate_audio(image_base64):
try:
# Ensure audio directory exists
os.makedirs("./audio", exist_ok=True)
result = chain.invoke(image_base64)
p=open("ss.txt","w")
p.write(str(result))
p.close()
print("Generated array:", result.arr)
print("Array length:", len(result.arr))
# Validate the audio array
if not result.arr or len(result.arr) < 10:
print("Warning: Generated audio array is too short or empty")
# Create a fallback array
fallback_array = np.sin(2 * np.pi * 440 * np.linspace(0, 1, 50)).tolist()
audio_path = save_audio_from_array(audio_array=fallback_array, output_path="./audio/footstep_from_image.wav")
else:
audio_path = save_audio_from_array(audio_array=result.arr, output_path="./audio/footstep_from_image.wav")
print("π§ Environment Description:", result.environment_description)
print("π§ Reasoning:", result.reasoning)
print(f"β
Audio saved at: {audio_path}")
return audio_path
except Exception as e:
print("Error during LLM audio generation:", e)
# Create a simple fallback audio
fallback_array = np.sin(2 * np.pi * 440 * np.linspace(0, 1, 50)).tolist()
audio_path = save_audio_from_array(audio_array=fallback_array, output_path="./audio/fallback_footstep.wav")
return audio_path
def process_video_for_footstep_audio(video_path):
try:
print("π₯ Extracting first frame...")
first_frame = extract_first_frame(video_path)
if first_frame is None:
print("β Failed to extract frame from video")
# Return a fallback audio path
return create_fallback_audio()
image_base64 = image_to_base64(first_frame)
if image_base64 is None:
print("β Failed to convert image to base64")
return create_fallback_audio()
print("π€ Generating footstep audio from LLM...")
audio_path = analyze_image_and_generate_audio(image_base64)
# Verify the file exists
if audio_path and os.path.exists(audio_path):
print(f"β
Audio generated successfully at: {audio_path}")
return audio_path
else:
print("β Generated audio file not found")
return create_fallback_audio()
except Exception as e:
print(f"β Error in process_video_for_footstep_audio: {e}")
import traceback
traceback.print_exc()
return create_fallback_audio()
def create_fallback_audio():
"""Create a simple fallback audio file"""
try:
os.makedirs("./audio", exist_ok=True)
fallback_path = "./audio/fallback_footstep.wav"
# Create a simple footstep-like sound
sample_rate = 44100
duration = 1.0
t = np.linspace(0, duration, int(sample_rate * duration))
# Combination of low frequency thump and noise burst
footstep = (
np.sin(2 * np.pi * 80 * t) * np.exp(-8 * t) + # Low frequency thump
np.random.normal(0, 0.1, len(t)) * np.exp(-15 * t) # Noise burst
)
footstep = footstep / np.max(np.abs(footstep)) * 0.8
write(fallback_path, sample_rate, np.int16(footstep * 32767))
print(f"β
Created fallback audio at: {fallback_path}")
return r"./audio/Footsteps on Gravel Path Outdoor.mp3"
except Exception as e:
print(f"β Failed to create fallback audio: {e}")
return None
|