black44 commited on
Commit
ce895e3
·
verified ·
1 Parent(s): 2dacdf9

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +11 -7
Dockerfile CHANGED
@@ -7,14 +7,14 @@ ENV PYTHONDONTWRITEBYTECODE=1 \
7
  HF_HOME=/app/.cache \
8
  TRANSFORMERS_VERBOSITY=error
9
 
10
- # Create directories with proper permissions (FIXED TYPO HERE)
11
  WORKDIR /app
12
  RUN mkdir -p /app/.cache /app/models && \
13
- chmod -R 777 /app/.cache /app/models # <-- Fixed path from /app.models to /app/models
14
 
15
- # Install system dependencies
16
  RUN apt-get update && apt-get install -y --no-install-recommends \
17
- libsndfile1 ffmpeg && \
18
  rm -rf /var/lib/apt/lists/*
19
 
20
  # Install Python dependencies
@@ -22,17 +22,21 @@ COPY requirements.txt .
22
  RUN pip install --no-cache-dir -U pip && \
23
  pip install --no-cache-dir -r requirements.txt
24
 
25
- # Pre-download ALL models (TTS + sentiment)
26
  RUN python -c "from transformers import AutoTokenizer, AutoProcessor, BarkModel; \
27
  model_name = 'suno/bark-small'; \
 
28
  tokenizer = AutoTokenizer.from_pretrained(model_name); \
29
  processor = AutoProcessor.from_pretrained(model_name); \
30
  model = BarkModel.from_pretrained(model_name); \
31
  tokenizer.save_pretrained('/app/models/suno-bark'); \
32
  processor.save_pretrained('/app/models/suno-bark'); \
33
- model.save_pretrained('/app/models/suno-bark')" && \
34
- python -c "from transformers import AutoModelForSequenceClassification, AutoTokenizer; \
 
 
35
  model_name = 'cardiffnlp/twitter-xlm-roberta-base-sentiment'; \
 
36
  tokenizer = AutoTokenizer.from_pretrained(model_name); \
37
  model = AutoModelForSequenceClassification.from_pretrained(model_name); \
38
  tokenizer.save_pretrained('/app/models/sentiment'); \
 
7
  HF_HOME=/app/.cache \
8
  TRANSFORMERS_VERBOSITY=error
9
 
10
+ # Create directories with proper permissions
11
  WORKDIR /app
12
  RUN mkdir -p /app/.cache /app/models && \
13
+ chmod -R 777 /app/.cache /app/models
14
 
15
+ # Install system dependencies (ADDED ca-certificates)
16
  RUN apt-get update && apt-get install -y --no-install-recommends \
17
+ libsndfile1 ffmpeg ca-certificates && \
18
  rm -rf /var/lib/apt/lists/*
19
 
20
  # Install Python dependencies
 
22
  RUN pip install --no-cache-dir -U pip && \
23
  pip install --no-cache-dir -r requirements.txt
24
 
25
+ # Pre-download TTS model
26
  RUN python -c "from transformers import AutoTokenizer, AutoProcessor, BarkModel; \
27
  model_name = 'suno/bark-small'; \
28
+ print(f'Downloading {model_name}...'); \
29
  tokenizer = AutoTokenizer.from_pretrained(model_name); \
30
  processor = AutoProcessor.from_pretrained(model_name); \
31
  model = BarkModel.from_pretrained(model_name); \
32
  tokenizer.save_pretrained('/app/models/suno-bark'); \
33
  processor.save_pretrained('/app/models/suno-bark'); \
34
+ model.save_pretrained('/app/models/suno-bark')"
35
+
36
+ # Pre-download sentiment model (SEPARATED INTO ITS OWN STEP)
37
+ RUN python -c "from transformers import AutoModelForSequenceClassification, AutoTokenizer; \
38
  model_name = 'cardiffnlp/twitter-xlm-roberta-base-sentiment'; \
39
+ print(f'Downloading {model_name}...'); \
40
  tokenizer = AutoTokenizer.from_pretrained(model_name); \
41
  model = AutoModelForSequenceClassification.from_pretrained(model_name); \
42
  tokenizer.save_pretrained('/app/models/sentiment'); \