Spaces:
Sleeping
Sleeping
# Base image with GPU support and TensorFlow pre-installed | |
FROM tensorflow/tensorflow:2.15.0-gpu | |
# Install system dependencies | |
RUN apt-get update && apt-get install -y \ | |
git \ | |
wget \ | |
&& rm -rf /var/lib/apt/lists/* | |
# Install system-level dependencies for OpenCV | |
RUN apt-get update && apt-get install -y \ | |
libglib2.0-0 \ | |
libsm6 \ | |
libxext6 \ | |
libxrender-dev \ | |
libgl1-mesa-glx \ | |
&& rm -rf /var/lib/apt/lists/* | |
# Install Python packages excluding blinker | |
RUN pip install --no-cache-dir --ignore-installed \ | |
torch \ | |
torchvision \ | |
transformers \ | |
requests \ | |
Flask \ | |
Pillow \ | |
huggingface_hub \ | |
tensorflow==2.15.0 \ | |
tensorflow_hub \ | |
opencv-python | |
# Set Hugging Face cache to a guaranteed writable directory | |
ENV TRANSFORMERS_CACHE=/app/cache | |
ENV HF_HOME=/app/cache | |
RUN mkdir -p /app/cache && chmod -R 777 /app/cache | |
# Set up CUDA environment variables | |
ENV CUDA_VISIBLE_DEVICES=0 | |
ENV NVIDIA_VISIBLE_DEVICES=all | |
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility | |
# Create directories for the models | |
RUN mkdir -p /models/blip /models/clip | |
# Python script to download models using tensorflow_hub and huggingface_hub | |
RUN echo "import os\n\ | |
import tensorflow_hub as hub\n\n\ | |
# Download MoveNet model from TensorFlow Hub (loaded directly in app, not saved)\n\ | |
movenet_model = hub.load('https://tfhub.dev/google/movenet/singlepose/lightning/4')\n\n\ | |
# Download BLIP model and tokenizer using huggingface_hub\n\ | |
from transformers import BlipForConditionalGeneration, BlipProcessor\n\ | |
BlipForConditionalGeneration.from_pretrained('Salesforce/blip-image-captioning-large').save_pretrained('/models/blip')\n\ | |
BlipProcessor.from_pretrained('Salesforce/blip-image-captioning-large').save_pretrained('/models/blip')\n\n\ | |
# Download CLIP model and processor using huggingface_hub\n\ | |
from transformers import CLIPModel, CLIPProcessor\n\ | |
CLIPModel.from_pretrained('openai/clip-vit-large-patch14').save_pretrained('/models/clip')\n\ | |
CLIPProcessor.from_pretrained('openai/clip-vit-large-patch14').save_pretrained('/models/clip')" > download_models.py | |
# Run the script to download models | |
RUN python download_models.py | |
# Copy the inference script (app.py) into the container | |
COPY app.py /app/app.py | |
# Expose the default port for Flask | |
EXPOSE 7860 | |
# Run the Flask app | |
CMD ["python", "/app/app.py"] |