Spaces:
Sleeping
Sleeping
| """ | |
| DimensioDepth - Add Dimension to Everything | |
| Advanced AI Depth Estimation with 3D Visualization | |
| Powered by Depth-Anything V2 | Runs on Hugging Face Spaces | |
| """ | |
| import streamlit as st | |
| import numpy as np | |
| import cv2 | |
| from PIL import Image | |
| from pathlib import Path | |
| import sys | |
| # Page config | |
| st.set_page_config( | |
| page_title="DimensioDepth - AI Depth Estimation", | |
| page_icon="π¨", | |
| layout="wide" | |
| ) | |
| # Add backend to path | |
| sys.path.append(str(Path(__file__).parent / "backend")) | |
| # Import backend utilities | |
| from backend.utils.image_processing import ( | |
| depth_to_colormap, | |
| create_side_by_side | |
| ) | |
| # Try to import REAL AI model | |
| def load_model(): | |
| try: | |
| print("[*] Attempting to import TransformersDepthEstimator...") | |
| from backend.utils.transformers_depth import TransformersDepthEstimator | |
| print("[*] Import successful! Loading REAL AI Depth-Anything V2 BASE model...") | |
| print("[*] This will download ~372MB on first run (one-time download)") | |
| depth_estimator = TransformersDepthEstimator(model_size="base") | |
| print("[+] REAL AI MODE ACTIVE - BASE MODEL!") | |
| print("[+] Quality: SUPERB (best available)") | |
| return depth_estimator, True, "BASE (372MB)" | |
| except Exception as e: | |
| print(f"[!] FULL ERROR TRACEBACK:") | |
| import traceback | |
| traceback.print_exc() | |
| print(f"[!] Error type: {type(e).__name__}") | |
| print(f"[!] Error message: {str(e)}") | |
| print("[*] Falling back to DEMO MODE") | |
| return None, False, "Demo Mode" | |
| depth_estimator, USE_REAL_AI, MODEL_SIZE = load_model() | |
| def estimate_depth(image, colormap_style): | |
| """Estimate depth from an input image using REAL AI or DEMO MODE""" | |
| try: | |
| # Convert PIL to numpy if needed | |
| if isinstance(image, Image.Image): | |
| image = np.array(image) | |
| # Generate depth map | |
| if USE_REAL_AI: | |
| depth = depth_estimator.predict(image) | |
| mode_text = "REAL AI (Depth-Anything V2)" | |
| else: | |
| from backend.utils.demo_depth import generate_smart_depth | |
| depth = generate_smart_depth(image) | |
| mode_text = "DEMO MODE (Synthetic)" | |
| # Convert colormap style to cv2 constant | |
| colormap_dict = { | |
| "Inferno": cv2.COLORMAP_INFERNO, | |
| "Viridis": cv2.COLORMAP_VIRIDIS, | |
| "Plasma": cv2.COLORMAP_PLASMA, | |
| "Turbo": cv2.COLORMAP_TURBO, | |
| "Magma": cv2.COLORMAP_MAGMA, | |
| "Hot": cv2.COLORMAP_HOT, | |
| "Ocean": cv2.COLORMAP_OCEAN, | |
| "Rainbow": cv2.COLORMAP_RAINBOW | |
| } | |
| # Create colored depth map | |
| depth_colored = depth_to_colormap(depth, colormap_dict[colormap_style]) | |
| # Create grayscale depth map | |
| depth_gray = (depth * 255).astype(np.uint8) | |
| depth_gray = cv2.cvtColor(depth_gray, cv2.COLOR_GRAY2RGB) | |
| return depth_colored, depth_gray, mode_text, image.shape, depth.shape | |
| except Exception as e: | |
| st.error(f"Error during depth estimation: {str(e)}") | |
| import traceback | |
| traceback.print_exc() | |
| return None, None, None, None, None | |
| # Header | |
| st.title("π¨ DimensioDepth - Add Dimension to Everything") | |
| st.markdown("### Transform 2D images into stunning 3D depth visualizations") | |
| # Status banner | |
| if USE_REAL_AI: | |
| st.success(f"π REAL AI MODE ACTIVE! - Powered by Depth-Anything V2 {MODEL_SIZE} - SUPERB Quality!") | |
| else: | |
| st.info("Running in DEMO MODE - Ultra-fast synthetic depth estimation") | |
| st.markdown("---") | |
| # Main interface | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| st.subheader("Input") | |
| uploaded_file = st.file_uploader("Upload Your Image", type=['png', 'jpg', 'jpeg']) | |
| colormap_style = st.selectbox( | |
| "Colormap Style", | |
| ["Inferno", "Viridis", "Plasma", "Turbo", "Magma", "Hot", "Ocean", "Rainbow"] | |
| ) | |
| process_btn = st.button("π Generate Depth Map", type="primary") | |
| with col2: | |
| st.subheader("Output") | |
| depth_placeholder = st.empty() | |
| # Processing | |
| if uploaded_file is not None and process_btn: | |
| # Load image | |
| image = Image.open(uploaded_file) | |
| with col1: | |
| st.image(image, caption="Original Image", use_column_width=True) | |
| with st.spinner("Generating depth map..."): | |
| depth_colored, depth_gray, mode_text, input_shape, output_shape = estimate_depth(image, colormap_style) | |
| if depth_colored is not None: | |
| with col2: | |
| tab1, tab2 = st.tabs(["Colored", "Grayscale"]) | |
| with tab1: | |
| st.image(depth_colored, caption="Depth Map (Colored)", use_column_width=True) | |
| with tab2: | |
| st.image(depth_gray, caption="Depth Map (Grayscale)", use_column_width=True) | |
| # Info | |
| st.success(f"β Depth Estimation Complete!") | |
| st.info(f""" | |
| **Mode**: {mode_text} | |
| **Input Size**: {input_shape[1]}x{input_shape[0]} | |
| **Output Size**: {output_shape[1]}x{output_shape[0]} | |
| **Colormap**: {colormap_style} | |
| {f'**Powered by**: Depth-Anything V2 {MODEL_SIZE}' if USE_REAL_AI else '**Processing**: Ultra-fast (<50ms) synthetic depth'} | |
| """) | |
| # Video Export Section | |
| st.markdown("---") | |
| st.subheader("π¬ Video Export") | |
| if uploaded_file is not None and depth_colored is not None: | |
| with st.expander("Export Depth Map as Video"): | |
| col_vid1, col_vid2 = st.columns(2) | |
| with col_vid1: | |
| video_duration = st.slider("Duration (seconds)", 1, 10, 3) | |
| video_fps = st.selectbox("FPS", [24, 30, 60], index=1) | |
| with col_vid2: | |
| video_resolution = st.selectbox("Resolution", ["Original", "1080p", "720p", "Square 1080p"]) | |
| video_effect = st.selectbox("Effect", ["Zoom In", "Zoom Out", "Pan Left", "Pan Right", "Rotate"]) | |
| if st.button("π¬ Export Video", type="primary"): | |
| with st.spinner("Generating video..."): | |
| try: | |
| import cv2 | |
| import tempfile | |
| # Get dimensions | |
| if video_resolution == "1080p": | |
| width, height = 1920, 1080 | |
| elif video_resolution == "720p": | |
| width, height = 1280, 720 | |
| elif video_resolution == "Square 1080p": | |
| width, height = 1080, 1080 | |
| else: | |
| height, width = depth_colored.shape[:2] | |
| # Resize depth map | |
| depth_resized = cv2.resize(depth_colored, (width, height)) | |
| # Create video | |
| total_frames = video_duration * video_fps | |
| with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as tmp_file: | |
| output_path = tmp_file.name | |
| fourcc = cv2.VideoWriter_fourcc(*'mp4v') | |
| out = cv2.VideoWriter(output_path, fourcc, video_fps, (width, height)) | |
| for frame_num in range(total_frames): | |
| progress = frame_num / total_frames | |
| # Apply effect | |
| if video_effect == "Zoom In": | |
| scale = 1.0 + (progress * 0.5) # Zoom from 1x to 1.5x | |
| center_x, center_y = width // 2, height // 2 | |
| new_w, new_h = int(width / scale), int(height / scale) | |
| x1, y1 = center_x - new_w // 2, center_y - new_h // 2 | |
| x2, y2 = x1 + new_w, y1 + new_h | |
| cropped = depth_resized[max(0, y1):min(height, y2), max(0, x1):min(width, x2)] | |
| frame = cv2.resize(cropped, (width, height)) | |
| elif video_effect == "Zoom Out": | |
| scale = 1.5 - (progress * 0.5) # Zoom from 1.5x to 1x | |
| center_x, center_y = width // 2, height // 2 | |
| new_w, new_h = int(width / scale), int(height / scale) | |
| x1, y1 = center_x - new_w // 2, center_y - new_h // 2 | |
| x2, y2 = x1 + new_w, y1 + new_h | |
| cropped = depth_resized[max(0, y1):min(height, y2), max(0, x1):min(width, x2)] | |
| frame = cv2.resize(cropped, (width, height)) | |
| elif video_effect == "Pan Left": | |
| offset = int(width * progress * 0.3) | |
| frame = np.roll(depth_resized, -offset, axis=1) | |
| elif video_effect == "Pan Right": | |
| offset = int(width * progress * 0.3) | |
| frame = np.roll(depth_resized, offset, axis=1) | |
| elif video_effect == "Rotate": | |
| angle = progress * 360 | |
| center = (width // 2, height // 2) | |
| rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0) | |
| frame = cv2.warpAffine(depth_resized, rotation_matrix, (width, height)) | |
| else: | |
| frame = depth_resized.copy() | |
| # Convert RGB to BGR for cv2 | |
| frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) | |
| out.write(frame_bgr) | |
| out.release() | |
| # Read video and provide download | |
| with open(output_path, 'rb') as f: | |
| video_bytes = f.read() | |
| st.success(f"β Video generated! {total_frames} frames at {video_fps} FPS") | |
| st.download_button( | |
| label="π₯ Download Video", | |
| data=video_bytes, | |
| file_name=f"depth_video_{video_effect.lower().replace(' ', '_')}.mp4", | |
| mime="video/mp4" | |
| ) | |
| except Exception as e: | |
| st.error(f"Error generating video: {str(e)}") | |
| import traceback | |
| traceback.print_exc() | |
| # Info section | |
| st.markdown("---") | |
| st.markdown(""" | |
| ## π‘ About DimensioDepth | |
| ### Features: | |
| - β Real AI depth estimation with Depth-Anything V2 | |
| - β Multiple colormap styles for visualization | |
| - β Fast processing (~800ms on CPU, ~200ms on GPU) | |
| - β SUPERB quality depth maps | |
| - β **NEW!** Video export with camera effects | |
| ### Use Cases: | |
| - π¨ **Creative & Artistic**: Depth-enhanced photos, 3D effects | |
| - π¬ **VFX & Film**: Depth map generation for compositing | |
| - π¬ **Research**: Computer vision, depth perception studies | |
| - π± **Content Creation**: Engaging 3D effects for social media | |
| Made with β€οΈ for the AI community | |
| """) | |