Spaces:
Sleeping
Sleeping
Updated Dockerfile for Hugging Face Spaces deployment
Browse files- Dockerfile +1 -1
- models/nllb.py +7 -0
Dockerfile
CHANGED
@@ -47,7 +47,7 @@ ENV LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${LD_LIBRARY_PATH}
|
|
47 |
# Set environment variable for NeMo cache directory
|
48 |
ENV NEMO_NLP_TMP=/app/.cache
|
49 |
|
50 |
-
# Create cache directory
|
51 |
RUN mkdir -p /app/.cache && chmod -R 777 /app/.cache
|
52 |
|
53 |
# Copy the setup script and requirements file into the container
|
|
|
47 |
# Set environment variable for NeMo cache directory
|
48 |
ENV NEMO_NLP_TMP=/app/.cache
|
49 |
|
50 |
+
# Create cache directory and set permissions
|
51 |
RUN mkdir -p /app/.cache && chmod -R 777 /app/.cache
|
52 |
|
53 |
# Copy the setup script and requirements file into the container
|
models/nllb.py
CHANGED
@@ -19,8 +19,15 @@ def nllb():
|
|
19 |
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
20 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
21 |
# Load the tokenizer and model
|
|
|
22 |
os.environ['HF_HOME'] = '/app/.cache/huggingface'
|
23 |
os.environ['TRANSFORMERS_CACHE'] = '/app/.cache/huggingface'
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-1.3B")
|
25 |
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-1.3B").to(device)
|
26 |
# write done to the file named status.txt
|
|
|
19 |
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
20 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
21 |
# Load the tokenizer and model
|
22 |
+
# Set Hugging Face cache directory
|
23 |
os.environ['HF_HOME'] = '/app/.cache/huggingface'
|
24 |
os.environ['TRANSFORMERS_CACHE'] = '/app/.cache/huggingface'
|
25 |
+
|
26 |
+
# Create cache directory if it doesn't exist and set permissions
|
27 |
+
os.makedirs('/app/.cache/huggingface', exist_ok=True)
|
28 |
+
os.chmod('/app/.cache/huggingface', 0o777)
|
29 |
+
|
30 |
+
# Load models
|
31 |
tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-1.3B")
|
32 |
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-1.3B").to(device)
|
33 |
# write done to the file named status.txt
|