Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -8,6 +8,8 @@ from llama_index.core import Document
|
|
8 |
from llama_index.llms.llama_cpp import LlamaCPP
|
9 |
from llama_index.core.node_parser import SemanticSplitterNodeParser
|
10 |
|
|
|
|
|
11 |
# ✅ Pour l'embedding LOCAL via transformers
|
12 |
from transformers import AutoTokenizer, AutoModel
|
13 |
import torch
|
@@ -74,6 +76,8 @@ async def chunk_text(data: ChunkRequest):
|
|
74 |
def get_text_embedding(self, text: str):
|
75 |
return get_embedding(text)
|
76 |
|
|
|
|
|
77 |
# ✅ Nouvelle configuration (⚠️ ne plus utiliser ServiceContext)
|
78 |
Settings.llm = llm
|
79 |
Settings.embed_model = SimpleEmbedding()
|
|
|
8 |
from llama_index.llms.llama_cpp import LlamaCPP
|
9 |
from llama_index.core.node_parser import SemanticSplitterNodeParser
|
10 |
|
11 |
+
from llama_index.core.base.llms.base import BaseLLM
|
12 |
+
|
13 |
# ✅ Pour l'embedding LOCAL via transformers
|
14 |
from transformers import AutoTokenizer, AutoModel
|
15 |
import torch
|
|
|
76 |
def get_text_embedding(self, text: str):
|
77 |
return get_embedding(text)
|
78 |
|
79 |
+
assert isinstance(llm, BaseLLM), "❌ Ce LLM n’est pas compatible avec Settings.llm"
|
80 |
+
|
81 |
# ✅ Nouvelle configuration (⚠️ ne plus utiliser ServiceContext)
|
82 |
Settings.llm = llm
|
83 |
Settings.embed_model = SimpleEmbedding()
|