Add Whisper service Dockerfile with GPU support
This commit is contained in:
47
services/whisper-service/Dockerfile
Normal file
47
services/whisper-service/Dockerfile
Normal file
@ -0,0 +1,47 @@
|
||||
FROM nvidia/cuda:11.8-runtime-ubuntu22.04
|
||||
|
||||
# Set environment variables
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
python3 \
|
||||
python3-pip \
|
||||
python3-dev \
|
||||
ffmpeg \
|
||||
git \
|
||||
wget \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy requirements first for better Docker layer caching
|
||||
COPY requirements.txt .
|
||||
|
||||
# Install Python dependencies
|
||||
RUN pip3 install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy source code
|
||||
COPY src/ ./src/
|
||||
|
||||
# Create necessary directories
|
||||
RUN mkdir -p /app/audio/processed /app/models /app/logs
|
||||
|
||||
# Download Whisper model cache (optional - will download on first use if not present)
|
||||
# RUN python3 -c "from faster_whisper import WhisperModel; WhisperModel('large-v2', device='cpu')"
|
||||
|
||||
# Run as non-root user
|
||||
RUN useradd -m -u 1001 whisper
|
||||
RUN chown -R whisper:whisper /app
|
||||
USER whisper
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=15s --start-period=60s --retries=3 \
|
||||
CMD python3 src/healthcheck.py || exit 1
|
||||
|
||||
# Expose port
|
||||
EXPOSE 8000
|
||||
|
||||
# Start the service
|
||||
CMD ["python3", "src/api.py"]
|
Reference in New Issue
Block a user