ngupta949 commited on
Commit
de2a663
·
verified ·
1 Parent(s): ec3e257

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +12 -15
Dockerfile CHANGED
@@ -1,8 +1,5 @@
1
- # Use a base image that has Python and can support Ollama (e.g., Ubuntu or a CUDA image for GPU)
2
- # For GPU support, use nvidia/cuda base image (e.g., nvidia/cuda:12.1.1-devel-ubuntu22.04)
3
- # For CPU only, a regular Ubuntu or Python image should work.
4
- # Example for CPU (adjust as needed):
5
- FROM ubuntu:22.04
6
 
7
  # Set environment variables
8
  ENV OLLAMA_HOST=0.0.0.0:11434 # Ollama's API will be accessible on all interfaces
@@ -36,25 +33,25 @@ COPY --chown=user:user . .
36
  # The `ollama serve` command must be running in the background for `ollama pull` to work.
37
  # We'll use a trick with a shell script for this.
38
 
39
- # Create a script to start Ollama and pull models
40
  RUN echo '#!/bin/bash' > /home/user/start_ollama_and_app.sh && \
41
- echo 'nohup ollama serve &' >> /home/user/start_ollama_and_app.sh && \
 
42
  echo 'sleep 5' >> /home/user/start_ollama_and_app.sh && \
43
  echo 'ollama pull nomic-embed-text' >> /home/user/start_ollama_and_app.sh && \
44
  echo 'ollama pull llama2' >> /home/user/start_ollama_and_app.sh && \
45
  echo 'ollama pull mistral' >> /home/user/start_ollama_and_app.sh && \
46
  echo 'ollama pull tinyllama' >> /home/user/start_ollama_and_app.sh && \
47
- echo 'sleep 5' >> /home/user/start_ollama_and_app.sh && \
48
- echo 'streamlit run app.py --server.port 7860 --server.address 0.0.0.0' >> /home/user/start_ollama_and_app.sh && \
49
  chmod +x /home/user/start_ollama_and_app.sh
50
 
51
- # Expose the port your Streamlit app listens on (as defined in README.md)
52
  EXPOSE 8501
53
-
54
- # Expose Ollama's API port (if your app directly connects to it)
55
  EXPOSE 11434
56
 
 
 
57
 
58
- HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health
59
-
60
- ENTRYPOINT ["streamlit", "run", "src/streamlit_app.py", "--server.port=8501", "--server.address=0.0.0.0"]
 
1
+ # Base image with Python
2
+ FROM python:3.10-slim
 
 
 
3
 
4
  # Set environment variables
5
  ENV OLLAMA_HOST=0.0.0.0:11434 # Ollama's API will be accessible on all interfaces
 
33
  # The `ollama serve` command must be running in the background for `ollama pull` to work.
34
  # We'll use a trick with a shell script for this.
35
 
36
+ # Create a startup script
37
  RUN echo '#!/bin/bash' > /home/user/start_ollama_and_app.sh && \
38
+ echo 'echo "Starting Ollama..."' >> /home/user/start_ollama_and_app.sh && \
39
+ echo 'ollama serve & ' >> /home/user/start_ollama_and_app.sh && \
40
  echo 'sleep 5' >> /home/user/start_ollama_and_app.sh && \
41
  echo 'ollama pull nomic-embed-text' >> /home/user/start_ollama_and_app.sh && \
42
  echo 'ollama pull llama2' >> /home/user/start_ollama_and_app.sh && \
43
  echo 'ollama pull mistral' >> /home/user/start_ollama_and_app.sh && \
44
  echo 'ollama pull tinyllama' >> /home/user/start_ollama_and_app.sh && \
45
+ echo 'echo "Launching Streamlit..."' >> /home/user/start_ollama_and_app.sh && \
46
+ echo 'streamlit run src/streamlit_app.py --server.port=8501 --server.address=0.0.0.0' >> /home/user/start_ollama_and_app.sh && \
47
  chmod +x /home/user/start_ollama_and_app.sh
48
 
49
+ # Expose Streamlit and Ollama ports
50
  EXPOSE 8501
 
 
51
  EXPOSE 11434
52
 
53
+ # Healthcheck for Hugging Face Spaces
54
+ HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health || exit 1
55
 
56
+ # Start both Ollama and Streamlit via the script
57
+ ENTRYPOINT ["/home/user/start_ollama_and_app.sh"]