File size: 1,428 Bytes
de2a663
 
7158cbf
f9981ad
a03e81c
 
7158cbf
a03e81c
7158cbf
 
ec3e257
 
 
7158cbf
ec3e257
 
 
 
7158cbf
a03e81c
0bb966a
 
a03e81c
 
ec3e257
 
 
 
 
 
 
 
 
 
 
de2a663
22e6a76
 
 
ec3e257
de2a663
7158cbf
ec3e257
 
de2a663
 
ec3e257
de2a663
a03e81c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
# Base image with Python
FROM python:3.10-slim

RUN pip install --upgrade pip

# System-level dependencies
RUN apt-get update && apt-get install -y \
    curl git build-essential software-properties-common \
    && rm -rf /var/lib/apt/lists/*

# Install Ollama (replace with the latest stable version if needed)
# Check ollama.com for the latest install script if this one is outdated
RUN curl -fsSL https://ollama.com/install.sh | sh

# Set up user for security (Hugging Face Spaces often run as user `user`)
RUN useradd -m -u 1000 user
USER user
WORKDIR /home/user

# Set environment variables
ENV OLLAMA_MODELS=/home/user/models
ENV PATH="/home/user/.local/bin:$PATH" 
 

# Install Python dependencies
COPY --chown=user:user requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt

# Copy your application code
COPY --chown=user:user . .

# *** Pull Ollama Models (This is where your equivalent commands go) ***
# The `ollama serve` command must be running in the background for `ollama pull` to work.
# We'll use a trick with a shell script for this.

# Create a startup script
# Copy entrypoint script
COPY --chown=user:user start.sh .
RUN chmod +x start.sh

# Expose Streamlit and Ollama ports
EXPOSE 8501
EXPOSE 11434

# Healthcheck for Hugging Face Spaces
HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health || exit 1

# Start both Ollama and Streamlit via the script
ENTRYPOINT ["./start.sh"]