File size: 24,694 Bytes
a4bd75a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d1f54f
 
 
a4bd75a
475892a
5d1f54f
475892a
a4bd75a
475892a
 
 
 
5d1f54f
475892a
a4bd75a
 
5d1f54f
 
 
 
 
a4bd75a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
475892a
 
 
 
 
a4bd75a
 
 
 
 
 
 
ea0b5ea
 
 
 
a4bd75a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
475892a
 
a4bd75a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ea0b5ea
a4bd75a
ea0b5ea
a4bd75a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d1f54f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a4bd75a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
#!/usr/bin/env python3
"""
Audio-Enhanced Video Highlights Generator
Combines SmolVLM2 visual analysis with Whisper audio transcription
Supports 99+ languages including Telugu, Hindi, English
"""

import os
import sys
import cv2
import argparse
import json
import subprocess
import threading
import time
import tempfile
from pathlib import Path
from PIL import Image
from typing import List, Dict, Optional
import logging

# Add src directory to path for imports
sys.path.append(str(Path(__file__).parent / "src"))

try:
    from src.smolvlm2_handler import SmolVLM2Handler
except ImportError:
    print("❌ SmolVLM2Handler not found. Make sure to install dependencies first.")
    sys.exit(1)

try:
    import whisper
    WHISPER_AVAILABLE = True
    print("βœ… Whisper available for audio transcription")
except ImportError:
    WHISPER_AVAILABLE = False
    print("❌ Whisper not available. Install with: pip install openai-whisper")
    sys.exit(1)

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class AudioVisualAnalyzer:
    """Comprehensive analyzer combining visual and audio analysis"""
    
    def __init__(self, whisper_model_size="base", timeout_seconds=90, enable_visual=True, visual_only_mode=False):
        """Initialize with SmolVLM2 and optionally Whisper models"""
        print("πŸ”§ Initializing Visual Analyzer...")
        
        self.enable_visual = enable_visual
        self.visual_only_mode = visual_only_mode
        
        # Initialize SmolVLM2 for visual analysis
        if self.enable_visual:
            print("πŸ”₯ Loading SmolVLM2...")
            self.vlm_handler = SmolVLM2Handler()
        else:
            print("πŸ”‡ Visual analysis disabled")
            self.vlm_handler = None
        self.timeout_seconds = timeout_seconds
        
        # Skip Whisper loading in visual-only mode to save memory/resources
        if self.visual_only_mode:
            print("πŸ‘οΈ Visual-only mode enabled - skipping audio processing to optimize performance")
            self.whisper_model = None
        elif WHISPER_AVAILABLE:
            print(f"πŸ“₯ Loading Whisper model ({whisper_model_size})...")
            self.whisper_model = whisper.load_model(whisper_model_size)
            print("βœ… Whisper model loaded successfully")
        else:
            self.whisper_model = None
            print("⚠️ Whisper not available - audio analysis disabled")
    
    def extract_audio_segments(self, video_path: str, segments: List[Dict]) -> List[str]:
        """Extract audio for specific video segments"""
        audio_files = []
        temp_dir = tempfile.mkdtemp()
        
        for i, segment in enumerate(segments):
            start_time = segment['start_time']
            duration = segment['duration']
            
            audio_path = os.path.join(temp_dir, f"segment_{i}.wav")
            
            # Extract audio segment using FFmpeg
            cmd = [
                'ffmpeg', '-i', video_path,
                '-ss', str(start_time),
                '-t', str(duration),
                '-vn',  # No video
                '-acodec', 'pcm_s16le',  # Uncompressed audio
                '-ar', '16000',  # 16kHz sample rate for Whisper
                '-ac', '1',  # Mono
                '-f', 'wav',  # Force WAV format
                '-y',  # Overwrite
                audio_path
            ]
            
            try:
                result = subprocess.run(cmd, check=True, capture_output=True, text=True)
                if os.path.exists(audio_path) and os.path.getsize(audio_path) > 0:
                    audio_files.append(audio_path)
                    logger.info(f"πŸ“„ Extracted audio segment {i+1}: {duration:.1f}s")
                else:
                    logger.warning(f"⚠️ Audio segment {i+1} is empty or missing")
                    audio_files.append(None)
            except subprocess.CalledProcessError as e:
                logger.warning(f"⚠️ No audio stream in segment {i+1} (this is normal for silent videos)")
                audio_files.append(None)
        
        return audio_files
    
    def transcribe_audio_segment(self, audio_path: str) -> Dict:
        """Transcribe audio segment with Whisper"""
        if not WHISPER_AVAILABLE or not audio_path or not os.path.exists(audio_path):
            return {"text": "", "language": "unknown", "confidence": 0.0}
        
        try:
            result = self.whisper_model.transcribe(
                audio_path,
                language=None,  # Auto-detect language
                task="transcribe"
            )
            
            return {
                "text": result.get("text", "").strip(),
                "language": result.get("language", "unknown"),
                "confidence": 1.0  # Whisper doesn't provide confidence scores
            }
        except Exception as e:
            logger.error(f"❌ Audio transcription failed: {e}")
            return {"text": "", "language": "unknown", "confidence": 0.0}
    
    def analyze_visual_content(self, frame_path: str) -> Dict:
        """Analyze visual content using SmolVLM2 with robust error handling"""
        # If visual analysis is disabled, return audio-focused fallback
        if not self.enable_visual or self.vlm_handler is None:
            logger.info("πŸ“Ή Visual analysis disabled, using audio-only mode")
            return {"description": "Audio-only analysis mode - visual analysis disabled", "score": 7.0}
        
        max_retries = 2
        retry_count = 0
        
        while retry_count < max_retries:
            try:
                def generate_with_timeout():
                    prompt = ("Analyze this video frame for interesting, engaging, or highlight-worthy content. "
                             "IMPORTANT: Start your response with 'Score: X/10' where X is a number from 1-10. "
                             "Then explain what makes it noteworthy. Focus on action, emotion, important moments, or visually striking elements. "
                             "Rate based on: Action/movement (high scores), People talking/interacting (medium-high), "
                             "Static scenes (low-medium), Boring/empty scenes (low scores).")
                    return self.vlm_handler.generate_response(frame_path, prompt)
                
                # Run with timeout protection
                thread_result = [None]
                exception_result = [None]
                
                def target():
                    try:
                        thread_result[0] = generate_with_timeout()
                    except Exception as e:
                        exception_result[0] = e
                
                thread = threading.Thread(target=target)
                thread.daemon = True
                thread.start()
                thread.join(self.timeout_seconds)
                
                if thread.is_alive():
                    logger.warning(f"⏰ Visual analysis timed out after {self.timeout_seconds}s (attempt {retry_count + 1})")
                    retry_count += 1
                    if retry_count >= max_retries:
                        logger.info("πŸ”‡ Switching to audio-only mode due to visual timeout")
                        return {"description": "Visual analysis timed out - using audio-only mode", "score": 7.0}
                    continue
                
                if exception_result[0]:
                    error_msg = str(exception_result[0])
                    if "probability tensor" in error_msg or "inf" in error_msg or "nan" in error_msg:
                        logger.warning(f"⚠️ Model inference error, retrying (attempt {retry_count + 1}): {error_msg}")
                        retry_count += 1
                        if retry_count >= max_retries:
                            return {"description": "Model inference failed after retries", "score": 6.0}
                        continue
                    else:
                        raise exception_result[0]
                    
                response = thread_result[0]
                if not response or len(response.strip()) == 0:
                    logger.warning(f"⚠️ Empty response, retrying (attempt {retry_count + 1})")
                    retry_count += 1
                    if retry_count >= max_retries:
                        return {"description": "No meaningful response after retries", "score": 6.0}
                    continue
                
                # Extract score from response
                score = self.extract_score_from_text(response)
                return {"description": response, "score": score}
                
            except Exception as e:
                error_msg = str(e)
                logger.warning(f"⚠️ Visual analysis error (attempt {retry_count + 1}): {error_msg}")
                retry_count += 1
                if retry_count >= max_retries:
                    return {"description": f"Analysis failed after {max_retries} attempts: {error_msg}", "score": 6.0}
        
        # Fallback if all retries failed
        return {"description": "Analysis failed after all retry attempts", "score": 6.0}
    
    def extract_score_from_text(self, text: str) -> float:
        """Extract numeric score from analysis text"""
        import re
        
        # Look for patterns like "Score: 8/10", "8/10", "score: 7", etc.
        patterns = [
            r'score:\s*(\d+(?:\.\d+)?)\s*/\s*10',  # "Score: 8/10" (our new format)
            r'(\d+(?:\.\d+)?)\s*/\s*10',  # "8/10" or "7.5/10"
            r'(?:score|rating|rate)(?:\s*[:=]\s*)(\d+(?:\.\d+)?)',  # "score: 8" or "rating=7.5"
            r'(\d+(?:\.\d+)?)\s*(?:out of|/)\s*10',  # "8 out of 10"
            r'(?:^|\s)(\d+(?:\.\d+)?)(?:\s*[/]\s*10)?(?:\s|$)',  # Just numbers
        ]
        
        for pattern in patterns:
            matches = re.findall(pattern, text.lower())
            if matches:
                try:
                    score = float(matches[0])
                    return min(max(score, 1.0), 10.0)  # Clamp between 1-10
                except ValueError:
                    continue
        
        return 6.0  # Default score if no pattern found
    
    def calculate_combined_score(self, visual_score: float, audio_text: str, audio_lang: str) -> float:
        """Calculate combined score from visual and audio analysis"""
        # Start with visual score
        combined_score = visual_score
        
        # Audio content scoring
        if audio_text:
            audio_bonus = 0.0
            text_lower = audio_text.lower()
            
            # Positive indicators
            excitement_words = ['amazing', 'incredible', 'wow', 'fantastic', 'awesome', 'perfect', 'excellent']
            action_words = ['goal', 'win', 'victory', 'success', 'breakthrough', 'achievement']
            emotion_words = ['happy', 'excited', 'thrilled', 'surprised', 'shocked', 'love']
            
            # Telugu positive indicators (basic)
            telugu_positive = ['అద్భుఀం', 'చాలా బాగుంది', 'డాడ్', 'సూΰ°ͺర్']
            
            # Count positive indicators
            for word_list in [excitement_words, action_words, emotion_words, telugu_positive]:
                for word in word_list:
                    if word in text_lower:
                        audio_bonus += 0.5
            
            # Length bonus for substantial content
            if len(audio_text) > 50:
                audio_bonus += 0.3
            elif len(audio_text) > 20:
                audio_bonus += 0.1
            
            # Language diversity bonus
            if audio_lang in ['te', 'telugu']:  # Telugu content
                audio_bonus += 0.2
            elif audio_lang in ['hi', 'hindi']:  # Hindi content
                audio_bonus += 0.2
            
            combined_score += audio_bonus
        
        # Clamp final score
        return min(max(combined_score, 1.0), 10.0)
    
    def analyze_segment(self, video_path: str, segment: Dict, temp_frame_path: str) -> Dict:
        """Analyze a single video segment with both visual and audio"""
        start_time = segment['start_time']
        duration = segment['duration']
        
        logger.info(f"πŸ” Analyzing segment at {start_time:.1f}s ({duration:.1f}s duration)")
        
        # Visual analysis
        visual_analysis = self.analyze_visual_content(temp_frame_path)
        
        # Skip audio analysis in visual-only mode to save resources
        if self.visual_only_mode:
            logger.info("πŸ‘οΈ Visual-only mode: skipping audio analysis")
            audio_analysis = {"text": "", "language": "unknown", "confidence": 0.0}
            # Use pure visual score for highlights
            combined_score = visual_analysis['score']
        else:
            # Audio analysis
            audio_files = self.extract_audio_segments(video_path, [segment])
            audio_analysis = {"text": "", "language": "unknown", "confidence": 0.0}
            
            if audio_files and audio_files[0]:
                audio_analysis = self.transcribe_audio_segment(audio_files[0])
                # Cleanup temporary audio file
                try:
                    os.unlink(audio_files[0])
                except:
                    pass
            
            # Combined scoring
            combined_score = self.calculate_combined_score(
                visual_analysis['score'],
                audio_analysis['text'],
                audio_analysis['language']
            )
        
        return {
            'start_time': start_time,
            'duration': duration,
            'visual_score': visual_analysis['score'],
            'visual_description': visual_analysis['description'],
            'audio_text': audio_analysis['text'],
            'audio_language': audio_analysis['language'],
            'combined_score': combined_score,
            'selected': False
        }

def extract_frames_at_intervals(video_path: str, interval_seconds: float = 10.0) -> List[Dict]:
    """Extract frames at regular intervals from video"""
    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        raise ValueError(f"Cannot open video file: {video_path}")
    
    fps = cap.get(cv2.CAP_PROP_FPS)
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    duration = total_frames / fps
    
    logger.info(f"πŸ“Ή Video: {duration:.1f}s, {fps:.1f} FPS, {total_frames} frames")
    
    segments = []
    current_time = 0
    
    while current_time < duration:
        segment_duration = min(interval_seconds, duration - current_time)
        segments.append({
            'start_time': current_time,
            'duration': segment_duration,
            'frame_number': int(current_time * fps)
        })
        current_time += interval_seconds
    
    cap.release()
    return segments

def save_frame_at_time(video_path: str, time_seconds: float, output_path: str) -> bool:
    """Save a frame at specific time with robust frame extraction"""
    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        return False
    
    try:
        fps = cap.get(cv2.CAP_PROP_FPS)
        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        frame_number = int(time_seconds * fps)
        
        # Ensure frame number is within valid range
        frame_number = min(frame_number, total_frames - 1)
        frame_number = max(frame_number, 0)
        
        # Try to extract frame with fallback options
        for attempt in range(3):
            try:
                # Try exact frame first
                test_frame = frame_number + attempt
                if test_frame >= total_frames:
                    test_frame = frame_number - attempt
                if test_frame < 0:
                    test_frame = frame_number
                
                cap.set(cv2.CAP_PROP_POS_FRAMES, test_frame)
                ret, frame = cap.read()
                
                if ret and frame is not None and frame.size > 0:
                    # Validate frame data
                    if len(frame.shape) == 3 and frame.shape[2] == 3:  # Valid color frame
                        success = cv2.imwrite(output_path, frame)
                        if success:
                            cap.release()
                            return True
                    
            except Exception as e:
                logger.warning(f"Frame extraction attempt {attempt + 1} failed: {e}")
                continue
        
        cap.release()
        return False
        
    except Exception as e:
        logger.error(f"Critical error in frame extraction: {e}")
        cap.release()
        return False

def create_highlights_video(video_path: str, selected_segments: List[Dict], output_path: str):
    """Create highlights video from selected segments"""
    if not selected_segments:
        logger.error("❌ No segments selected for highlights")
        return False
    
    # Create temporary files for each segment
    temp_files = []
    temp_dir = tempfile.mkdtemp()
    
    for i, segment in enumerate(selected_segments):
        temp_file = os.path.join(temp_dir, f"segment_{i}.mp4")
        
        cmd = [
            'ffmpeg', '-i', video_path,
            '-ss', str(segment['start_time']),
            '-t', str(segment['duration']),
            '-c', 'copy',  # Copy streams without re-encoding
            '-y', temp_file
        ]
        
        try:
            subprocess.run(cmd, check=True, capture_output=True)
            temp_files.append(temp_file)
            logger.info(f"βœ… Created segment {i+1}/{len(selected_segments)}")
        except subprocess.CalledProcessError as e:
            logger.error(f"❌ Failed to create segment {i+1}: {e}")
            continue
    
    if not temp_files:
        logger.error("❌ No valid segments created")
        return False
    
    # Create concat file
    concat_file = os.path.join(temp_dir, "concat.txt")
    with open(concat_file, 'w') as f:
        for temp_file in temp_files:
            f.write(f"file '{temp_file}'\n")
    
    # Concatenate segments
    cmd = [
        'ffmpeg', '-f', 'concat', '-safe', '0',
        '-i', concat_file,
        '-c', 'copy',
        '-y', output_path
    ]
    
    try:
        subprocess.run(cmd, check=True, capture_output=True)
        logger.info(f"βœ… Highlights video created: {output_path}")
        
        # Cleanup
        for temp_file in temp_files:
            try:
                os.unlink(temp_file)
            except:
                pass
        try:
            os.unlink(concat_file)
            os.rmdir(temp_dir)
        except:
            pass
        
        return True
    except subprocess.CalledProcessError as e:
        logger.error(f"❌ Failed to create highlights video: {e}")
        return False

def main():
    parser = argparse.ArgumentParser(description="Audio-Enhanced Video Highlights Generator")
    parser.add_argument("video_path", help="Path to input video file")
    parser.add_argument("--output", "-o", default="audio_enhanced_highlights.mp4", 
                       help="Output highlights video path")
    parser.add_argument("--interval", "-i", type=float, default=10.0,
                       help="Analysis interval in seconds (default: 10.0)")
    parser.add_argument("--min-score", "-s", type=float, default=7.0,
                       help="Minimum score for highlights (default: 7.0)")
    parser.add_argument("--max-highlights", "-m", type=int, default=5,
                       help="Maximum number of highlights (default: 5)")
    parser.add_argument("--whisper-model", "-w", default="base",
                       choices=["tiny", "base", "small", "medium", "large"],
                       help="Whisper model size (default: base)")
    parser.add_argument("--timeout", "-t", type=int, default=30,
                       help="Timeout for each analysis in seconds (default: 30)")
    parser.add_argument("--save-analysis", action="store_true",
                       help="Save detailed analysis to JSON file")
    
    args = parser.parse_args()
    
    # Validate input
    if not os.path.exists(args.video_path):
        print(f"❌ Video file not found: {args.video_path}")
        sys.exit(1)
    
    print("🎬 Audio-Enhanced Video Highlights Generator")
    print(f"πŸ“ Input: {args.video_path}")
    print(f"πŸ“ Output: {args.output}")
    print(f"⏱️  Analysis interval: {args.interval}s")
    print(f"🎯 Minimum score: {args.min_score}")
    print(f"πŸ† Max highlights: {args.max_highlights}")
    print(f"πŸŽ™οΈ  Whisper model: {args.whisper_model}")
    print()
    
    try:
        # Initialize analyzer
        analyzer = AudioVisualAnalyzer(
            whisper_model_size=args.whisper_model,
            timeout_seconds=args.timeout
        )
        
        # Extract segments for analysis
        segments = extract_frames_at_intervals(args.video_path, args.interval)
        print(f"πŸ“Š Analyzing {len(segments)} segments...")
        
        analyzed_segments = []
        temp_frame_path = "temp_frame.jpg"
        
        for i, segment in enumerate(segments):
            print(f"\nπŸ” Segment {i+1}/{len(segments)} (t={segment['start_time']:.1f}s)")
            
            # Save frame for visual analysis
            if save_frame_at_time(args.video_path, segment['start_time'], temp_frame_path):
                # Analyze segment
                analysis = analyzer.analyze_segment(args.video_path, segment, temp_frame_path)
                analyzed_segments.append(analysis)
                
                print(f"  πŸ‘οΈ  Visual: {analysis['visual_score']:.1f}/10")
                print(f"  πŸŽ™οΈ  Audio: '{analysis['audio_text'][:50]}...' ({analysis['audio_language']})")
                print(f"  🎯 Combined: {analysis['combined_score']:.1f}/10")
            else:
                print(f"  ❌ Failed to extract frame")
        
        # Cleanup temp frame
        try:
            os.unlink(temp_frame_path)
        except:
            pass
        
        if not analyzed_segments:
            print("❌ No segments analyzed successfully")
            sys.exit(1)
        
        # Select best segments
        analyzed_segments.sort(key=lambda x: x['combined_score'], reverse=True)
        selected_segments = [s for s in analyzed_segments if s['combined_score'] >= args.min_score]
        selected_segments = selected_segments[:args.max_highlights]
        
        print(f"\nπŸ† Selected {len(selected_segments)} highlights:")
        for i, segment in enumerate(selected_segments):
            print(f"{i+1}. t={segment['start_time']:.1f}s, score={segment['combined_score']:.1f}")
            if segment['audio_text']:
                print(f"   Audio: \"{segment['audio_text'][:100]}...\"")
        
        if not selected_segments:
            print(f"❌ No segments met minimum score of {args.min_score}")
            sys.exit(1)
        
        # Create highlights video
        print(f"\n🎬 Creating highlights video...")
        success = create_highlights_video(args.video_path, selected_segments, args.output)
        
        if success:
            print(f"βœ… Audio-enhanced highlights created: {args.output}")
            
            # Save analysis if requested
            if args.save_analysis:
                analysis_file = args.output.replace('.mp4', '_analysis.json')
                with open(analysis_file, 'w') as f:
                    json.dump({
                        'input_video': args.video_path,
                        'output_video': args.output,
                        'settings': {
                            'interval': args.interval,
                            'min_score': args.min_score,
                            'max_highlights': args.max_highlights,
                            'whisper_model': args.whisper_model,
                            'timeout': args.timeout
                        },
                        'segments': analyzed_segments,
                        'selected_segments': selected_segments
                    }, f, indent=2)
                print(f"πŸ“Š Analysis saved: {analysis_file}")
        else:
            print("❌ Failed to create highlights video")
            sys.exit(1)
    
    except KeyboardInterrupt:
        print("\n⏹️  Operation cancelled by user")
        sys.exit(1)
    except Exception as e:
        print(f"❌ Error: {e}")
        sys.exit(1)

if __name__ == "__main__":
    main()