Spaces:
Paused
Paused
ajout des modèles
Browse files- .gitattributes +2 -0
- .gitignore +2 -2
- .gthub/workflows/deploy.yaml +34 -0
- Dockerfile +29 -0
- app.py +6 -15
- rag_model.py +168 -0
- requirements._extendedtxt +42 -0
- requirements.txt +6 -5
.gitattributes
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
models/Nous-Hermes-2-Mistral-7B-DPO.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
2 |
+
models/phi-2.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
.gitignore
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
# Fichiers et dossiers à ignorer
|
2 |
llamavenv/
|
3 |
-
models/
|
4 |
-
|
5 |
__pycache__/
|
6 |
*.pyc
|
|
|
1 |
# Fichiers et dossiers à ignorer
|
2 |
llamavenv/
|
3 |
+
#models/
|
4 |
+
#*.gguf
|
5 |
__pycache__/
|
6 |
*.pyc
|
.gthub/workflows/deploy.yaml
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: 🚀 Deploy to Hugging Face Spaces
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches:
|
6 |
+
- main
|
7 |
+
|
8 |
+
jobs:
|
9 |
+
deploy:
|
10 |
+
runs-on: ubuntu-latest
|
11 |
+
|
12 |
+
steps:
|
13 |
+
- name: 📥 Checkout repository
|
14 |
+
uses: actions/checkout@v4
|
15 |
+
with:
|
16 |
+
fetch-depth: 0 # 🔧 Important : clone complet
|
17 |
+
|
18 |
+
- name: 📦 Setup Git LFS
|
19 |
+
run: |
|
20 |
+
git lfs install
|
21 |
+
git lfs pull # ✅ Télécharge les objets LFS avant push
|
22 |
+
|
23 |
+
- name: 🛠️ Setup Git config
|
24 |
+
run: |
|
25 |
+
git config --global user.email "[email protected]"
|
26 |
+
git config --global user.name "GitHub Actions Bot"
|
27 |
+
|
28 |
+
- name: 🔑 Authenticate and push to Hugging Face
|
29 |
+
env:
|
30 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
31 |
+
run: |
|
32 |
+
git remote add hf https://rkonan:${HF_TOKEN}@huggingface.co/spaces/rkonan/chatbot-fr
|
33 |
+
git fetch hf
|
34 |
+
git push hf +HEAD:main
|
Dockerfile
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Image de base Python avec support CPU
|
2 |
+
FROM python:3.10-slim
|
3 |
+
|
4 |
+
# Dépendances système nécessaires à llama-cpp-python
|
5 |
+
RUN apt-get update && apt-get install -y \
|
6 |
+
build-essential \
|
7 |
+
cmake \
|
8 |
+
libopenblas-dev \
|
9 |
+
libsqlite3-dev \
|
10 |
+
git \
|
11 |
+
&& rm -rf /var/lib/apt/lists/*
|
12 |
+
|
13 |
+
# Définir le dossier de travail
|
14 |
+
WORKDIR /code
|
15 |
+
|
16 |
+
# Copier le fichier de dépendances
|
17 |
+
COPY requirements.txt .
|
18 |
+
|
19 |
+
# Installer les dépendances Python
|
20 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
21 |
+
|
22 |
+
# Copier tout le code dans l’image
|
23 |
+
COPY . .
|
24 |
+
|
25 |
+
# Définir le port (pour FastAPI ou Streamlit)
|
26 |
+
EXPOSE 7860
|
27 |
+
|
28 |
+
# Commande de démarrage (adapte selon ton app : Streamlit, FastAPI...)
|
29 |
+
CMD ["streamlit", "run", "app.py", "--server.port=7860", "--server.address=0.0.0.0"]
|
app.py
CHANGED
@@ -1,17 +1,16 @@
|
|
1 |
import streamlit as st
|
2 |
from llama_cpp import Llama
|
3 |
import os
|
|
|
4 |
|
5 |
st.set_page_config(page_title="Chatbot RAG local",page_icon="🤖")
|
6 |
|
7 |
@st.cache_resource
|
8 |
-
def
|
9 |
-
model_path="models/
|
10 |
-
|
11 |
-
raise FileNotFoundError(f"Modèle non trouvé: {model_path}")
|
12 |
-
return Llama(model_path=model_path,n_ctx=2048,n_threads=4)
|
13 |
|
14 |
-
|
15 |
|
16 |
st.title("🤖 Chatbot LLM Local (CPU)")
|
17 |
|
@@ -19,14 +18,6 @@ user_input=st.text_area("Posez votre question :", height=100)
|
|
19 |
|
20 |
if st.button("Envoyer") and user_input.strip():
|
21 |
with st.spinner("Génération en cours..."):
|
22 |
-
|
23 |
-
#full_prompt = f"Question: {user_input.strip()}\nAnswer:"
|
24 |
-
#output = llm(full_prompt, max_tokens=100, stop=["Question:", "Answer:", "\n\n"])
|
25 |
-
output = llm(full_prompt, max_tokens=150, stop=["### Instruction:"])
|
26 |
-
#output = llm(full_prompt, max_tokens=80)
|
27 |
-
#response = output["choices"][0]["text"]
|
28 |
-
response = output["choices"][0]["text"].strip()
|
29 |
-
response = response.split("### Instruction:")[0].strip()
|
30 |
-
|
31 |
st.markdown("**Réponse :**")
|
32 |
st.success(response)
|
|
|
1 |
import streamlit as st
|
2 |
from llama_cpp import Llama
|
3 |
import os
|
4 |
+
from rag_model import RAGEngine
|
5 |
|
6 |
st.set_page_config(page_title="Chatbot RAG local",page_icon="🤖")
|
7 |
|
8 |
@st.cache_resource
|
9 |
+
def load_rag_engine():
|
10 |
+
rag = RAGEngine(model_path="models/Nous-Hermes-2-Mistral-7B-DPO.Q4_K_M.gguf")
|
11 |
+
return rag
|
|
|
|
|
12 |
|
13 |
+
rag=load_rag_engine()
|
14 |
|
15 |
st.title("🤖 Chatbot LLM Local (CPU)")
|
16 |
|
|
|
18 |
|
19 |
if st.button("Envoyer") and user_input.strip():
|
20 |
with st.spinner("Génération en cours..."):
|
21 |
+
response = rag.ask(user_input,mode="docling")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
st.markdown("**Réponse :**")
|
23 |
st.success(response)
|
rag_model.py
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pickle
|
3 |
+
import textwrap
|
4 |
+
import logging
|
5 |
+
from typing import Dict, List
|
6 |
+
|
7 |
+
import faiss
|
8 |
+
import numpy as np
|
9 |
+
from llama_cpp import Llama
|
10 |
+
from llama_index.core import VectorStoreIndex
|
11 |
+
from llama_index.core.schema import TextNode
|
12 |
+
from llama_index.vector_stores.faiss import FaissVectorStore
|
13 |
+
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
14 |
+
from sentence_transformers.util import cos_sim
|
15 |
+
|
16 |
+
# === Logger configuration ===
|
17 |
+
logger = logging.getLogger("RAGEngine")
|
18 |
+
logger.setLevel(logging.INFO)
|
19 |
+
handler = logging.StreamHandler()
|
20 |
+
formatter = logging.Formatter("[%(asctime)s] %(levelname)s - %(message)s")
|
21 |
+
handler.setFormatter(formatter)
|
22 |
+
logger.addHandler(handler)
|
23 |
+
|
24 |
+
MAX_TOKENS = 512
|
25 |
+
|
26 |
+
class RAGEngine:
|
27 |
+
def __init__(self, model_path: str, vector_modes: List[str] = ["docling"], model_threads: int = 4):
|
28 |
+
logger.info("📦 Initialisation du moteur RAG...")
|
29 |
+
self.llm = Llama(model_path=model_path, n_ctx=2048, n_threads=model_threads)
|
30 |
+
self.embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
31 |
+
self.indexes: Dict[str, Dict] = {}
|
32 |
+
|
33 |
+
for mode in vector_modes:
|
34 |
+
vectordir = f"vectordb_{mode}" if mode != "sentence" else "vectordb"
|
35 |
+
index_file = os.path.join(vectordir, "index.faiss")
|
36 |
+
chunks_file = os.path.join(vectordir, "chunks.pkl")
|
37 |
+
|
38 |
+
logger.info(f"📂 Chargement des données vectorielles pour le mode '{mode}' depuis {vectordir}")
|
39 |
+
with open(chunks_file, "rb") as f:
|
40 |
+
chunk_texts = pickle.load(f)
|
41 |
+
nodes = [TextNode(text=chunk) for chunk in chunk_texts]
|
42 |
+
|
43 |
+
faiss_index = faiss.read_index(index_file)
|
44 |
+
vector_store = FaissVectorStore(faiss_index=faiss_index)
|
45 |
+
index = VectorStoreIndex(nodes=nodes, embed_model=self.embed_model, vector_store=vector_store)
|
46 |
+
|
47 |
+
self.indexes[mode] = {
|
48 |
+
"nodes": nodes,
|
49 |
+
"index": index
|
50 |
+
}
|
51 |
+
|
52 |
+
logger.info("✅ Moteur RAG initialisé avec succès.")
|
53 |
+
|
54 |
+
def reformulate_question(self, question: str) -> str:
|
55 |
+
logger.info("🔁 Reformulation de la question (sans contexte)...")
|
56 |
+
prompt = f"""Tu es un assistant expert chargé de clarifier des questions floues.
|
57 |
+
|
58 |
+
Transforme la question suivante en une question claire, explicite et complète, sans ajouter d'informations extérieures.
|
59 |
+
|
60 |
+
Question floue : {question}
|
61 |
+
Question reformulée :"""
|
62 |
+
output = self.llm(prompt, max_tokens=128, stop=["\n"], stream=False)
|
63 |
+
reformulated = output["choices"][0]["text"].strip()
|
64 |
+
logger.info(f"📝 Reformulée : {reformulated}")
|
65 |
+
return reformulated
|
66 |
+
|
67 |
+
def reformulate_with_context(self, question: str, context_sample: str) -> str:
|
68 |
+
logger.info("🔁 Reformulation de la question avec contexte...")
|
69 |
+
prompt = f"""Tu es un assistant expert en machine learning. Ton rôle est de reformuler les questions utilisateur en tenant compte du contexte ci-dessous, extrait d’un rapport technique sur un projet de reconnaissance de maladies de plantes.
|
70 |
+
|
71 |
+
Ta mission est de transformer une question vague ou floue en une question précise et adaptée au contenu du rapport. Ne donne pas une interprétation hors sujet. Ne reformule pas en termes de produits commerciaux.
|
72 |
+
|
73 |
+
Contexte :
|
74 |
+
{context_sample}
|
75 |
+
|
76 |
+
Question initiale : {question}
|
77 |
+
Question reformulée :"""
|
78 |
+
output = self.llm(prompt, max_tokens=128, stop=["\n"], stream=False)
|
79 |
+
reformulated = output["choices"][0]["text"].strip()
|
80 |
+
logger.info(f"📝 Reformulée avec contexte : {reformulated}")
|
81 |
+
return reformulated
|
82 |
+
|
83 |
+
def get_adaptive_top_k(self, question: str) -> int:
|
84 |
+
q = question.lower()
|
85 |
+
if len(q.split()) <= 7:
|
86 |
+
top_k = 8
|
87 |
+
elif any(w in q for w in ["liste", "résume", "quels sont", "explique", "comment"]):
|
88 |
+
top_k = 10
|
89 |
+
else:
|
90 |
+
top_k = 8
|
91 |
+
logger.info(f"🔢 top_k déterminé automatiquement : {top_k}")
|
92 |
+
return top_k
|
93 |
+
|
94 |
+
def rerank_nodes(self, question: str, retrieved_nodes, top_k: int = 3):
|
95 |
+
logger.info(f"🔍 Re-ranking des {len(retrieved_nodes)} chunks pour la question : « {question} »")
|
96 |
+
q_emb = self.embed_model.get_query_embedding(question)
|
97 |
+
scored_nodes = []
|
98 |
+
|
99 |
+
for node in retrieved_nodes:
|
100 |
+
chunk_text = node.get_content()
|
101 |
+
chunk_emb = self.embed_model.get_text_embedding(chunk_text)
|
102 |
+
score = cos_sim(q_emb, chunk_emb).item()
|
103 |
+
scored_nodes.append((score, node))
|
104 |
+
|
105 |
+
ranked_nodes = sorted(scored_nodes, key=lambda x: x[0], reverse=True)
|
106 |
+
|
107 |
+
logger.info("📊 Chunks les plus pertinents :")
|
108 |
+
for i, (score, node) in enumerate(ranked_nodes[:top_k]):
|
109 |
+
chunk_preview = textwrap.shorten(node.get_content().replace("\n", " "), width=100)
|
110 |
+
logger.info(f"#{i+1} | Score: {score:.4f} | {chunk_preview}")
|
111 |
+
|
112 |
+
return [n for _, n in ranked_nodes[:top_k]]
|
113 |
+
|
114 |
+
def retrieve_context(self, question: str, mode: str, top_k: int = 3):
|
115 |
+
logger.info(f"📥 Récupération du contexte pour le mode « {mode} »...")
|
116 |
+
retriever = self.indexes[mode]["index"].as_retriever(similarity_top_k=top_k)
|
117 |
+
retrieved_nodes = retriever.retrieve(question)
|
118 |
+
reranked_nodes = self.rerank_nodes(question, retrieved_nodes, top_k)
|
119 |
+
context = "\n\n".join(n.get_content()[:500] for n in reranked_nodes)
|
120 |
+
return context, reranked_nodes
|
121 |
+
|
122 |
+
def ask(self, question_raw: str, mode: str = "docling") -> str:
|
123 |
+
logger.info(f"💬 Question reçue : {question_raw}")
|
124 |
+
if len(question_raw.split()) <= 3:
|
125 |
+
context_sample, _ = self.retrieve_context(question_raw, mode, 3)
|
126 |
+
reformulated = self.reformulate_with_context( question_raw, context_sample)
|
127 |
+
else:
|
128 |
+
reformulated = self.reformulate_question( question_raw)
|
129 |
+
|
130 |
+
print(f"📝 Question reformulée : {reformulated}")
|
131 |
+
question = reformulated
|
132 |
+
top_k = self.get_adaptive_top_k(question)
|
133 |
+
context, _ = self.retrieve_context(question, mode, top_k)
|
134 |
+
|
135 |
+
prompt = f"""### Instruction: En te basant uniquement sur le contexte ci-dessous, réponds à la question de manière précise et en français.
|
136 |
+
|
137 |
+
Si la réponse ne peut pas être déduite du contexte, indique : "Information non présente dans le contexte."
|
138 |
+
|
139 |
+
Contexte :
|
140 |
+
{context}
|
141 |
+
|
142 |
+
Question : {question}
|
143 |
+
### Réponse:"""
|
144 |
+
|
145 |
+
output = self.llm(prompt, max_tokens=MAX_TOKENS, stop=["### Instruction:"], stream=False)
|
146 |
+
response = output["choices"][0]["text"].strip().split("###")[0]
|
147 |
+
logger.info(f"🧠 Réponse générée : {response[:120]}{'...' if len(response) > 120 else ''}")
|
148 |
+
return response
|
149 |
+
|
150 |
+
def ask_stream(self, question: str, mode: str = "docling"):
|
151 |
+
logger.info(f"💬 [Stream] Question reçue : {question}")
|
152 |
+
top_k = self.get_adaptive_top_k(question)
|
153 |
+
context, _ = self.retrieve_context(question, mode, top_k)
|
154 |
+
|
155 |
+
prompt = f"""### Instruction: En te basant uniquement sur le contexte ci-dessous, réponds à la question de manière précise et en français.
|
156 |
+
|
157 |
+
Si la réponse ne peut pas être déduite du contexte, indique : "Information non présente dans le contexte."
|
158 |
+
|
159 |
+
Contexte :
|
160 |
+
{context}
|
161 |
+
|
162 |
+
Question : {question}
|
163 |
+
### Réponse:"""
|
164 |
+
|
165 |
+
logger.info("📡 Début du streaming de la réponse...")
|
166 |
+
stream = self.llm(prompt, max_tokens=MAX_TOKENS, stop=["### Instruction:"], stream=True)
|
167 |
+
for chunk in stream:
|
168 |
+
print(chunk["choices"][0]["text"], end="", flush=True)
|
requirements._extendedtxt
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# === CORE RAG with LlamaIndex ===
|
2 |
+
llama-index==0.13.0
|
3 |
+
llama-index-core==0.13.0
|
4 |
+
llama-index-cli==0.5.0
|
5 |
+
llama-index-embeddings-huggingface==0.6.0
|
6 |
+
llama-index-vector-stores-faiss==0.5.0
|
7 |
+
llama-index-readers-file==0.5.0
|
8 |
+
|
9 |
+
# === DOCILING INTEGRATION ===
|
10 |
+
docling==2.43.0
|
11 |
+
docling-core==2.44.1
|
12 |
+
docling-ibm-models==3.9.0
|
13 |
+
docling-parse==4.1.0
|
14 |
+
llama-index-readers-llama-parse==0.5.0
|
15 |
+
|
16 |
+
# === Embeddings & Models ===
|
17 |
+
sentence-transformers==5.0.0
|
18 |
+
transformers==4.54.1
|
19 |
+
torch==2.7.1
|
20 |
+
|
21 |
+
# === Vector Store ===
|
22 |
+
faiss-cpu==1.11.0.post1
|
23 |
+
|
24 |
+
# === LLM Backend (local or OpenAI) ===
|
25 |
+
llama_cpp_python==0.3.14
|
26 |
+
openai==1.98.0
|
27 |
+
|
28 |
+
# === Document Parsing (optional, recommended) ===
|
29 |
+
PyMuPDF==1.26.3 # PDF parsing
|
30 |
+
pypdf==5.9.0 # Alternative PDF
|
31 |
+
python-docx==1.2.0 # DOCX
|
32 |
+
python-pptx==1.0.2 # PowerPoint
|
33 |
+
markdown-it-py==3.0.0 # Markdown
|
34 |
+
|
35 |
+
# === App / Interface ===
|
36 |
+
streamlit==1.47.1
|
37 |
+
|
38 |
+
# === Utilities ===
|
39 |
+
pandas==2.2.3
|
40 |
+
tqdm==4.67.1
|
41 |
+
pydantic==2.11.7
|
42 |
+
typing-extensions==4.14.1
|
requirements.txt
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
-
|
2 |
-
llama-cpp-python
|
3 |
-
faiss-cpu
|
4 |
sentence-transformers
|
5 |
-
|
6 |
-
|
|
|
|
|
|
|
|
1 |
+
llama-cpp-python>=0.2.60
|
|
|
|
|
2 |
sentence-transformers
|
3 |
+
llama-index>=0.10.20
|
4 |
+
faiss-cpu
|
5 |
+
streamlit
|
6 |
+
numpy
|
7 |
+
scikit-learn
|