#!/bin/bash echo "Downloading optimized models for HF Spaces..." # Create directories mkdir -p pretrained_models # Install huggingface-hub if not already installed pip install "huggingface_hub[cli]" # Download only essential files for wav2vec2 (smaller model) echo "Downloading wav2vec2-base-960h (audio processing)..." huggingface-cli download facebook/wav2vec2-base-960h \ --include="*.json" --include="*.bin" --include="tokenizer*" \ --local-dir ./pretrained_models/wav2vec2-base-960h # For large models, we'll use streaming instead of full download echo "Setting up model configuration for streaming..." # Create model config files that will enable streaming/lazy loading cat > ./pretrained_models/model_config.json << EOF { "models": { "omnivatar": { "repo_id": "OmniAvatar/OmniAvatar-14B", "use_streaming": true, "cache_dir": "./cache" }, "wan_t2v": { "repo_id": "Wan-AI/Wan2.1-T2V-14B", "use_streaming": true, "cache_dir": "./cache" } } } EOF echo "Model setup completed with streaming configuration!"