Spaces:
Sleeping
Sleeping
| #!/usr/bin/env python3 | |
| """ | |
| Download Depth-Anything V2 ONNX models from HuggingFace | |
| This script downloads optimized ONNX versions of Depth-Anything V2 models | |
| for fast inference without PyTorch dependency. | |
| """ | |
| import os | |
| from pathlib import Path | |
| from huggingface_hub import hf_hub_download | |
| import sys | |
| # Model configurations | |
| MODELS = { | |
| "small": { | |
| "repo_id": "depth-anything/Depth-Anything-V2-Small", | |
| "filename": "depth_anything_v2_vits.onnx", | |
| "size": "~100MB", | |
| "speed": "Fast (25M params)" | |
| }, | |
| "large": { | |
| "repo_id": "depth-anything/Depth-Anything-V2-Large", | |
| "filename": "depth_anything_v2_vitl.onnx", | |
| "size": "~5GB", | |
| "speed": "Slower (1.3B params)" | |
| } | |
| } | |
| def download_model(model_type: str, cache_dir: str = "./models/cache"): | |
| """ | |
| Download a Depth-Anything V2 ONNX model | |
| Args: | |
| model_type: Either 'small' or 'large' | |
| cache_dir: Directory to cache models | |
| """ | |
| if model_type not in MODELS: | |
| print(f"❌ Error: Unknown model type '{model_type}'") | |
| print(f"Available models: {', '.join(MODELS.keys())}") | |
| return False | |
| model_info = MODELS[model_type] | |
| cache_path = Path(cache_dir) | |
| cache_path.mkdir(parents=True, exist_ok=True) | |
| print(f"\n📥 Downloading {model_type} model...") | |
| print(f" Repo: {model_info['repo_id']}") | |
| print(f" File: {model_info['filename']}") | |
| print(f" Size: {model_info['size']}") | |
| print(f" Speed: {model_info['speed']}") | |
| try: | |
| # Note: Using a placeholder repo since actual ONNX models might not be available | |
| # In production, you would either: | |
| # 1. Convert PyTorch models to ONNX yourself | |
| # 2. Use a community ONNX conversion | |
| # 3. Host your own converted models | |
| print("\n⚠️ IMPORTANT NOTE:") | |
| print("Official ONNX models may not be available on HuggingFace yet.") | |
| print("You'll need to convert PyTorch models to ONNX format.") | |
| print("\nTo convert models yourself:") | |
| print("1. Install: pip install torch transformers") | |
| print("2. Download PyTorch model") | |
| print("3. Export to ONNX using torch.onnx.export()") | |
| print("\nAlternatively, check these resources:") | |
| print("- https://github.com/LiheYoung/Depth-Anything") | |
| print("- Community ONNX conversions on HuggingFace") | |
| # Placeholder for actual download | |
| # model_path = hf_hub_download( | |
| # repo_id=model_info['repo_id'], | |
| # filename=model_info['filename'], | |
| # cache_dir=str(cache_path) | |
| # ) | |
| print(f"\n✓ Model would be saved to: {cache_path / model_info['filename']}") | |
| return True | |
| except Exception as e: | |
| print(f"\n❌ Error downloading model: {e}") | |
| return False | |
| def create_conversion_script(): | |
| """Create a helper script for converting PyTorch to ONNX""" | |
| script_content = '''#!/usr/bin/env python3 | |
| """ | |
| Convert Depth-Anything V2 PyTorch model to ONNX | |
| """ | |
| import torch | |
| from transformers import AutoModel | |
| import sys | |
| def convert_to_onnx(model_name, output_path): | |
| """Convert model to ONNX format""" | |
| print(f"Loading PyTorch model: {model_name}") | |
| model = AutoModel.from_pretrained(model_name, trust_remote_code=True) | |
| model.eval() | |
| # Dummy input | |
| dummy_input = torch.randn(1, 3, 518, 518) | |
| print(f"Exporting to ONNX: {output_path}") | |
| torch.onnx.export( | |
| model, | |
| dummy_input, | |
| output_path, | |
| input_names=['input'], | |
| output_names=['output'], | |
| dynamic_axes={ | |
| 'input': {0: 'batch', 2: 'height', 3: 'width'}, | |
| 'output': {0: 'batch', 2: 'height', 3: 'width'} | |
| }, | |
| opset_version=17 | |
| ) | |
| print(f"✓ Conversion complete: {output_path}") | |
| if __name__ == "__main__": | |
| # Example usage | |
| convert_to_onnx( | |
| "LiheYoung/depth-anything-small-hf", | |
| "depth_anything_v2_vits.onnx" | |
| ) | |
| ''' | |
| script_path = Path("convert_to_onnx.py") | |
| script_path.write_text(script_content) | |
| script_path.chmod(0o755) | |
| print(f"\n✓ Created conversion script: {script_path}") | |
| print(" Run with: python convert_to_onnx.py") | |
| def main(): | |
| """Main download function""" | |
| print("=" * 60) | |
| print("Depth-Anything V2 Model Downloader") | |
| print("=" * 60) | |
| # Create models directory | |
| models_dir = Path("./models/cache") | |
| models_dir.mkdir(parents=True, exist_ok=True) | |
| # Download models based on command line args | |
| models_to_download = sys.argv[1:] if len(sys.argv) > 1 else ['small'] | |
| if 'all' in models_to_download: | |
| models_to_download = list(MODELS.keys()) | |
| for model_type in models_to_download: | |
| download_model(model_type) | |
| # Create conversion helper | |
| print("\n" + "=" * 60) | |
| create_conversion_script() | |
| print("\n" + "=" * 60) | |
| print("Next Steps:") | |
| print("=" * 60) | |
| print("1. Convert PyTorch models to ONNX (see convert_to_onnx.py)") | |
| print("2. Place ONNX models in ./models/cache/") | |
| print("3. Update .env with correct model paths") | |
| print("4. Start the server: uvicorn api.main:app --reload") | |
| print("=" * 60) | |
| if __name__ == "__main__": | |
| main() | |