Hashii1729 commited on
Commit
c86d940
·
1 Parent(s): 9a2f1ba

Enhance Dockerfile and start script: add debug information for Ollama installation and improve error handling for server startup and model pulling

Browse files
Files changed (2) hide show
  1. Dockerfile +8 -1
  2. start.sh +39 -2
Dockerfile CHANGED
@@ -35,6 +35,13 @@ ENV OLLAMA_ORIGINS=*
35
  # --- Create Ollama home with permissions ---
36
  RUN mkdir -p $OLLAMA_HOME && chown -R appuser:appuser /app
37
 
 
 
 
 
 
 
 
38
  # --- Expose ports for FastAPI & Ollama ---
39
  EXPOSE 7860
40
  EXPOSE 11434
@@ -43,4 +50,4 @@ EXPOSE 11434
43
  USER appuser
44
 
45
  # --- Start everything ---
46
- CMD ["bash", "/app/start.sh"]
 
35
  # --- Create Ollama home with permissions ---
36
  RUN mkdir -p $OLLAMA_HOME && chown -R appuser:appuser /app
37
 
38
+ # --- Debug: Check Ollama installation ---
39
+ RUN echo "=== Ollama Installation Check ===" && \
40
+ which ollama || echo "ollama not in PATH" && \
41
+ ls -la /usr/local/bin/ollama || echo "ollama not in /usr/local/bin/" && \
42
+ ls -la /usr/bin/ollama || echo "ollama not in /usr/bin/" && \
43
+ echo "================================="
44
+
45
  # --- Expose ports for FastAPI & Ollama ---
46
  EXPOSE 7860
47
  EXPOSE 11434
 
50
  USER appuser
51
 
52
  # --- Start everything ---
53
+ CMD ["/app/start.sh"]
start.sh CHANGED
@@ -1,7 +1,33 @@
1
  #!/bin/bash
2
 
 
 
 
 
 
 
 
 
 
 
 
3
  # Start Ollama server in background
4
- ollama serve &
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  # Wait for Ollama to start
7
  echo "Waiting for Ollama server to start..."
@@ -13,7 +39,18 @@ echo "Ollama server started!"
13
 
14
  # Pull the LLaVA model for vision analysis
15
  echo "Pulling LLaVA model for vision analysis..."
16
- ollama pull llava:7b
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  echo "Model pulled successfully!"
19
 
 
1
  #!/bin/bash
2
 
3
+ # Debug information
4
+ echo "=== Debug Information ==="
5
+ echo "Current user: $(whoami)"
6
+ echo "Current directory: $(pwd)"
7
+ echo "Home directory: $HOME"
8
+ echo "Ollama home: $OLLAMA_HOME"
9
+ echo "PATH: $PATH"
10
+ echo "Which ollama: $(which ollama)"
11
+ echo "Ollama version: $(ollama --version 2>&1 || echo 'Failed to get version')"
12
+ echo "========================="
13
+
14
  # Start Ollama server in background
15
+ echo "Starting Ollama server..."
16
+
17
+ # Try different ways to start Ollama in case of PATH issues
18
+ if command -v ollama >/dev/null 2>&1; then
19
+ echo "Using ollama from PATH"
20
+ ollama serve &
21
+ elif [ -f "/usr/local/bin/ollama" ]; then
22
+ echo "Using ollama from /usr/local/bin/"
23
+ /usr/local/bin/ollama serve &
24
+ elif [ -f "/usr/bin/ollama" ]; then
25
+ echo "Using ollama from /usr/bin/"
26
+ /usr/bin/ollama serve &
27
+ else
28
+ echo "ERROR: Could not find ollama binary!"
29
+ exit 1
30
+ fi
31
 
32
  # Wait for Ollama to start
33
  echo "Waiting for Ollama server to start..."
 
39
 
40
  # Pull the LLaVA model for vision analysis
41
  echo "Pulling LLaVA model for vision analysis..."
42
+
43
+ # Use the same approach for pulling models
44
+ if command -v ollama >/dev/null 2>&1; then
45
+ ollama pull llava:7b
46
+ elif [ -f "/usr/local/bin/ollama" ]; then
47
+ /usr/local/bin/ollama pull llava:7b
48
+ elif [ -f "/usr/bin/ollama" ]; then
49
+ /usr/bin/ollama pull llava:7b
50
+ else
51
+ echo "ERROR: Could not find ollama binary for pulling model!"
52
+ exit 1
53
+ fi
54
 
55
  echo "Model pulled successfully!"
56