Spaces:
Sleeping
π¬ CRITICAL FIX: Export real photo instead of depth map!
Browse filesPROBLEM FIXED:
- Video exports were showing the colored depth visualization (grayscale/heatmap)
- Users wanted to see the actual photo with cinematic camera effects
- All 14 camera effects (Zoom, Pan, Dolly, Ken Burns, etc.) were applied to depth map
SOLUTION:
β
Changed video export to use original_image instead of depth_colored
β
All camera effects now animate the REAL PHOTO
β
Beautiful cinematic videos with the actual image content
NOW YOU GET:
π¨ Real photo with smooth zoom effects
π¬ Actual image with Ken Burns panning
πΈ Original colors with all camera movements
β Professional video export with your real photos!
All 14 effects fixed:
- Zoom In/Out - Real photo zooms
- Pan (Left/Right/Up/Down) - Real photo panning
- Dolly In/Out - Real photo dollying
- Ken Burns - Real photo with zoom + pan
- Rotate CW/CCW - Real photo rotation
- Tilt Up/Down - Real photo perspective
- Orbit - Real photo orbital motion
π€ Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
|
@@ -182,7 +182,9 @@ if 'depth_colored' in st.session_state:
|
|
| 182 |
import cv2
|
| 183 |
import tempfile
|
| 184 |
|
| 185 |
-
|
|
|
|
|
|
|
| 186 |
|
| 187 |
# Get dimensions
|
| 188 |
if video_resolution == "1080p":
|
|
@@ -192,10 +194,10 @@ if 'depth_colored' in st.session_state:
|
|
| 192 |
elif video_resolution == "Square 1080p":
|
| 193 |
width, height = 1080, 1080
|
| 194 |
else:
|
| 195 |
-
height, width =
|
| 196 |
|
| 197 |
-
# Resize depth map
|
| 198 |
-
|
| 199 |
|
| 200 |
# Create video
|
| 201 |
total_frames = video_duration * video_fps
|
|
@@ -209,14 +211,14 @@ if 'depth_colored' in st.session_state:
|
|
| 209 |
for frame_num in range(total_frames):
|
| 210 |
progress = frame_num / total_frames
|
| 211 |
|
| 212 |
-
# Apply effect
|
| 213 |
if video_effect == "Zoom In":
|
| 214 |
scale = 1.0 + (progress * 0.5)
|
| 215 |
center_x, center_y = width // 2, height // 2
|
| 216 |
new_w, new_h = int(width / scale), int(height / scale)
|
| 217 |
x1, y1 = center_x - new_w // 2, center_y - new_h // 2
|
| 218 |
x2, y2 = x1 + new_w, y1 + new_h
|
| 219 |
-
cropped =
|
| 220 |
frame = cv2.resize(cropped, (width, height))
|
| 221 |
|
| 222 |
elif video_effect == "Zoom Out":
|
|
@@ -225,7 +227,7 @@ if 'depth_colored' in st.session_state:
|
|
| 225 |
new_w, new_h = int(width / scale), int(height / scale)
|
| 226 |
x1, y1 = center_x - new_w // 2, center_y - new_h // 2
|
| 227 |
x2, y2 = x1 + new_w, y1 + new_h
|
| 228 |
-
cropped =
|
| 229 |
frame = cv2.resize(cropped, (width, height))
|
| 230 |
|
| 231 |
elif video_effect == "Ken Burns (Zoom + Pan)":
|
|
@@ -238,7 +240,7 @@ if 'depth_colored' in st.session_state:
|
|
| 238 |
new_w, new_h = int(width / scale), int(height / scale)
|
| 239 |
x1, y1 = center_x - new_w // 2, center_y - new_h // 2
|
| 240 |
x2, y2 = x1 + new_w, y1 + new_h
|
| 241 |
-
cropped =
|
| 242 |
frame = cv2.resize(cropped, (width, height))
|
| 243 |
|
| 244 |
elif video_effect == "Dolly In":
|
|
@@ -248,7 +250,7 @@ if 'depth_colored' in st.session_state:
|
|
| 248 |
new_w, new_h = int(width / scale), int(height / scale)
|
| 249 |
x1, y1 = center_x - new_w // 2, center_y - new_h // 2
|
| 250 |
x2, y2 = x1 + new_w, y1 + new_h
|
| 251 |
-
cropped =
|
| 252 |
frame = cv2.resize(cropped, (width, height))
|
| 253 |
|
| 254 |
elif video_effect == "Dolly Out":
|
|
@@ -257,24 +259,24 @@ if 'depth_colored' in st.session_state:
|
|
| 257 |
new_w, new_h = int(width / scale), int(height / scale)
|
| 258 |
x1, y1 = center_x - new_w // 2, center_y - new_h // 2
|
| 259 |
x2, y2 = x1 + new_w, y1 + new_h
|
| 260 |
-
cropped =
|
| 261 |
frame = cv2.resize(cropped, (width, height))
|
| 262 |
|
| 263 |
elif video_effect == "Pan Left":
|
| 264 |
offset = int(width * progress * 0.3)
|
| 265 |
-
frame = np.roll(
|
| 266 |
|
| 267 |
elif video_effect == "Pan Right":
|
| 268 |
offset = int(width * progress * 0.3)
|
| 269 |
-
frame = np.roll(
|
| 270 |
|
| 271 |
elif video_effect == "Pan Up":
|
| 272 |
offset = int(height * progress * 0.3)
|
| 273 |
-
frame = np.roll(
|
| 274 |
|
| 275 |
elif video_effect == "Pan Down":
|
| 276 |
offset = int(height * progress * 0.3)
|
| 277 |
-
frame = np.roll(
|
| 278 |
|
| 279 |
elif video_effect == "Tilt Up":
|
| 280 |
# Tilt up: perspective transformation
|
|
@@ -287,7 +289,7 @@ if 'depth_colored' in st.session_state:
|
|
| 287 |
[width, height]
|
| 288 |
])
|
| 289 |
matrix = cv2.getPerspectiveTransform(pts1, pts2)
|
| 290 |
-
frame = cv2.warpPerspective(
|
| 291 |
|
| 292 |
elif video_effect == "Tilt Down":
|
| 293 |
tilt_factor = progress * 0.3
|
|
@@ -299,19 +301,19 @@ if 'depth_colored' in st.session_state:
|
|
| 299 |
[width, height - int(height * tilt_factor)]
|
| 300 |
])
|
| 301 |
matrix = cv2.getPerspectiveTransform(pts1, pts2)
|
| 302 |
-
frame = cv2.warpPerspective(
|
| 303 |
|
| 304 |
elif video_effect == "Rotate CW":
|
| 305 |
angle = progress * 360
|
| 306 |
center = (width // 2, height // 2)
|
| 307 |
rotation_matrix = cv2.getRotationMatrix2D(center, -angle, 1.0)
|
| 308 |
-
frame = cv2.warpAffine(
|
| 309 |
|
| 310 |
elif video_effect == "Rotate CCW":
|
| 311 |
angle = progress * 360
|
| 312 |
center = (width // 2, height // 2)
|
| 313 |
rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
|
| 314 |
-
frame = cv2.warpAffine(
|
| 315 |
|
| 316 |
elif video_effect == "Orbit":
|
| 317 |
# Orbit: rotate + slight zoom
|
|
@@ -319,10 +321,10 @@ if 'depth_colored' in st.session_state:
|
|
| 319 |
scale = 1.0 + (np.sin(progress * np.pi) * 0.2)
|
| 320 |
center = (width // 2, height // 2)
|
| 321 |
rotation_matrix = cv2.getRotationMatrix2D(center, angle, scale)
|
| 322 |
-
frame = cv2.warpAffine(
|
| 323 |
|
| 324 |
else:
|
| 325 |
-
frame =
|
| 326 |
|
| 327 |
# Convert RGB to BGR for cv2
|
| 328 |
frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
|
|
|
| 182 |
import cv2
|
| 183 |
import tempfile
|
| 184 |
|
| 185 |
+
# CRITICAL FIX: Use original image instead of depth map for video export!
|
| 186 |
+
# This ensures we export the real photo with camera effects, not the colored depth visualization
|
| 187 |
+
original_image = st.session_state['original_image']
|
| 188 |
|
| 189 |
# Get dimensions
|
| 190 |
if video_resolution == "1080p":
|
|
|
|
| 194 |
elif video_resolution == "Square 1080p":
|
| 195 |
width, height = 1080, 1080
|
| 196 |
else:
|
| 197 |
+
height, width = original_image.shape[:2]
|
| 198 |
|
| 199 |
+
# Resize original image (not depth map!)
|
| 200 |
+
image_resized = cv2.resize(original_image, (width, height))
|
| 201 |
|
| 202 |
# Create video
|
| 203 |
total_frames = video_duration * video_fps
|
|
|
|
| 211 |
for frame_num in range(total_frames):
|
| 212 |
progress = frame_num / total_frames
|
| 213 |
|
| 214 |
+
# Apply effect - NOW USING REAL PHOTO instead of depth map!
|
| 215 |
if video_effect == "Zoom In":
|
| 216 |
scale = 1.0 + (progress * 0.5)
|
| 217 |
center_x, center_y = width // 2, height // 2
|
| 218 |
new_w, new_h = int(width / scale), int(height / scale)
|
| 219 |
x1, y1 = center_x - new_w // 2, center_y - new_h // 2
|
| 220 |
x2, y2 = x1 + new_w, y1 + new_h
|
| 221 |
+
cropped = image_resized[max(0, y1):min(height, y2), max(0, x1):min(width, x2)]
|
| 222 |
frame = cv2.resize(cropped, (width, height))
|
| 223 |
|
| 224 |
elif video_effect == "Zoom Out":
|
|
|
|
| 227 |
new_w, new_h = int(width / scale), int(height / scale)
|
| 228 |
x1, y1 = center_x - new_w // 2, center_y - new_h // 2
|
| 229 |
x2, y2 = x1 + new_w, y1 + new_h
|
| 230 |
+
cropped = image_resized[max(0, y1):min(height, y2), max(0, x1):min(width, x2)]
|
| 231 |
frame = cv2.resize(cropped, (width, height))
|
| 232 |
|
| 233 |
elif video_effect == "Ken Burns (Zoom + Pan)":
|
|
|
|
| 240 |
new_w, new_h = int(width / scale), int(height / scale)
|
| 241 |
x1, y1 = center_x - new_w // 2, center_y - new_h // 2
|
| 242 |
x2, y2 = x1 + new_w, y1 + new_h
|
| 243 |
+
cropped = image_resized[max(0, y1):min(height, y2), max(0, x1):min(width, x2)]
|
| 244 |
frame = cv2.resize(cropped, (width, height))
|
| 245 |
|
| 246 |
elif video_effect == "Dolly In":
|
|
|
|
| 250 |
new_w, new_h = int(width / scale), int(height / scale)
|
| 251 |
x1, y1 = center_x - new_w // 2, center_y - new_h // 2
|
| 252 |
x2, y2 = x1 + new_w, y1 + new_h
|
| 253 |
+
cropped = image_resized[max(0, y1):min(height, y2), max(0, x1):min(width, x2)]
|
| 254 |
frame = cv2.resize(cropped, (width, height))
|
| 255 |
|
| 256 |
elif video_effect == "Dolly Out":
|
|
|
|
| 259 |
new_w, new_h = int(width / scale), int(height / scale)
|
| 260 |
x1, y1 = center_x - new_w // 2, center_y - new_h // 2
|
| 261 |
x2, y2 = x1 + new_w, y1 + new_h
|
| 262 |
+
cropped = image_resized[max(0, y1):min(height, y2), max(0, x1):min(width, x2)]
|
| 263 |
frame = cv2.resize(cropped, (width, height))
|
| 264 |
|
| 265 |
elif video_effect == "Pan Left":
|
| 266 |
offset = int(width * progress * 0.3)
|
| 267 |
+
frame = np.roll(image_resized, -offset, axis=1)
|
| 268 |
|
| 269 |
elif video_effect == "Pan Right":
|
| 270 |
offset = int(width * progress * 0.3)
|
| 271 |
+
frame = np.roll(image_resized, offset, axis=1)
|
| 272 |
|
| 273 |
elif video_effect == "Pan Up":
|
| 274 |
offset = int(height * progress * 0.3)
|
| 275 |
+
frame = np.roll(image_resized, -offset, axis=0)
|
| 276 |
|
| 277 |
elif video_effect == "Pan Down":
|
| 278 |
offset = int(height * progress * 0.3)
|
| 279 |
+
frame = np.roll(image_resized, offset, axis=0)
|
| 280 |
|
| 281 |
elif video_effect == "Tilt Up":
|
| 282 |
# Tilt up: perspective transformation
|
|
|
|
| 289 |
[width, height]
|
| 290 |
])
|
| 291 |
matrix = cv2.getPerspectiveTransform(pts1, pts2)
|
| 292 |
+
frame = cv2.warpPerspective(image_resized, matrix, (width, height))
|
| 293 |
|
| 294 |
elif video_effect == "Tilt Down":
|
| 295 |
tilt_factor = progress * 0.3
|
|
|
|
| 301 |
[width, height - int(height * tilt_factor)]
|
| 302 |
])
|
| 303 |
matrix = cv2.getPerspectiveTransform(pts1, pts2)
|
| 304 |
+
frame = cv2.warpPerspective(image_resized, matrix, (width, height))
|
| 305 |
|
| 306 |
elif video_effect == "Rotate CW":
|
| 307 |
angle = progress * 360
|
| 308 |
center = (width // 2, height // 2)
|
| 309 |
rotation_matrix = cv2.getRotationMatrix2D(center, -angle, 1.0)
|
| 310 |
+
frame = cv2.warpAffine(image_resized, rotation_matrix, (width, height))
|
| 311 |
|
| 312 |
elif video_effect == "Rotate CCW":
|
| 313 |
angle = progress * 360
|
| 314 |
center = (width // 2, height // 2)
|
| 315 |
rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
|
| 316 |
+
frame = cv2.warpAffine(image_resized, rotation_matrix, (width, height))
|
| 317 |
|
| 318 |
elif video_effect == "Orbit":
|
| 319 |
# Orbit: rotate + slight zoom
|
|
|
|
| 321 |
scale = 1.0 + (np.sin(progress * np.pi) * 0.2)
|
| 322 |
center = (width // 2, height // 2)
|
| 323 |
rotation_matrix = cv2.getRotationMatrix2D(center, angle, scale)
|
| 324 |
+
frame = cv2.warpAffine(image_resized, rotation_matrix, (width, height))
|
| 325 |
|
| 326 |
else:
|
| 327 |
+
frame = image_resized.copy()
|
| 328 |
|
| 329 |
# Convert RGB to BGR for cv2
|
| 330 |
frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|