Spaces:
Sleeping
Sleeping
Update src/streamlit_app.py
Browse files- src/streamlit_app.py +1 -1
src/streamlit_app.py
CHANGED
@@ -28,7 +28,7 @@ def load_llm():
|
|
28 |
cache_dir='./hf_cache'
|
29 |
)
|
30 |
|
31 |
-
with st.spinner("Chargement du modèle LLM en mémoire
|
32 |
# ✅ MODIFICATION : n_gpu_layers=0 car nous utilisons le CPU gratuit.
|
33 |
llm = Llama(model_path=model_path, n_gpu_layers=0, n_ctx=4096, verbose=False, chat_format="llama-3")
|
34 |
return llm
|
|
|
28 |
cache_dir='./hf_cache'
|
29 |
)
|
30 |
|
31 |
+
with st.spinner("Chargement du modèle LLM en mémoire...."):
|
32 |
# ✅ MODIFICATION : n_gpu_layers=0 car nous utilisons le CPU gratuit.
|
33 |
llm = Llama(model_path=model_path, n_gpu_layers=0, n_ctx=4096, verbose=False, chat_format="llama-3")
|
34 |
return llm
|