Spaces:
Paused
Paused
Commit
·
f1fc29e
1
Parent(s):
763e3e7
chatbot updated
Browse files- app.py +18 -46
- chatbot/chatbot.py +0 -250
- chatbot/requirements.txt +0 -2
app.py
CHANGED
@@ -34,46 +34,12 @@ import json
|
|
34 |
# -----------------------------------------------------------------------------
|
35 |
# Chatbot integration
|
36 |
#
|
37 |
-
#
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
from backend.services.codingo_chatbot import get_response as _codingo_get_response
|
45 |
-
|
46 |
-
|
47 |
-
def get_chatbot_response(user_input: str) -> str:
|
48 |
-
from llama_cpp import Llama
|
49 |
-
|
50 |
-
# Load model once
|
51 |
-
global llm
|
52 |
-
if 'llm' not in globals():
|
53 |
-
llm = Llama(
|
54 |
-
model_path="/tmp/llama_models/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf",
|
55 |
-
n_ctx=2048,
|
56 |
-
n_threads=8,
|
57 |
-
n_gpu_layers=20 # GPU acceleration if available
|
58 |
-
)
|
59 |
-
|
60 |
-
# Format prompt in TinyLlama's chat style
|
61 |
-
prompt = f"<|system|>\nYou are LUNA, a helpful assistant for the Codingo recruitment platform.\n<|user|>\n{user_input}\n<|assistant|>\n"
|
62 |
-
|
63 |
-
# Generate response with safe parameters
|
64 |
-
output = llm(
|
65 |
-
prompt,
|
66 |
-
max_tokens=256,
|
67 |
-
temperature=0.3, # lower temperature for stability
|
68 |
-
top_p=0.9,
|
69 |
-
repeat_penalty=1.1,
|
70 |
-
stop=["</s>"]
|
71 |
-
)
|
72 |
-
|
73 |
-
reply = output["choices"][0]["text"].strip()
|
74 |
-
if not reply:
|
75 |
-
reply = "I'm here to help you with Codingo. Could you please rephrase your question?"
|
76 |
-
return reply
|
77 |
|
78 |
# Initialize Flask app
|
79 |
app = Flask(
|
@@ -199,8 +165,8 @@ def chatbot_endpoint():
|
|
199 |
if not user_input:
|
200 |
return jsonify({"error": "Empty message"}), 400
|
201 |
|
202 |
-
#
|
203 |
-
reply =
|
204 |
return jsonify({"response": reply})
|
205 |
|
206 |
except Exception as exc:
|
@@ -372,11 +338,17 @@ if __name__ == '__main__':
|
|
372 |
# Pre-initialize chatbot on startup for faster first response
|
373 |
print("Initializing chatbot...")
|
374 |
try:
|
375 |
-
|
376 |
-
|
377 |
-
print("
|
|
|
|
|
|
|
|
|
378 |
except Exception as e:
|
379 |
-
print(f"Chatbot initialization
|
|
|
|
|
380 |
|
381 |
# Use port from environment or default to 7860
|
382 |
port = int(os.environ.get('PORT', 7860))
|
|
|
34 |
# -----------------------------------------------------------------------------
|
35 |
# Chatbot integration
|
36 |
#
|
37 |
+
# Import the chatbot module functions
|
38 |
+
from backend.services.codingo_chatbot import (
|
39 |
+
get_response as _codingo_get_response,
|
40 |
+
init_embedder_and_db,
|
41 |
+
init_llm
|
42 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
# Initialize Flask app
|
45 |
app = Flask(
|
|
|
165 |
if not user_input:
|
166 |
return jsonify({"error": "Empty message"}), 400
|
167 |
|
168 |
+
# Use the imported function from codingo_chatbot module
|
169 |
+
reply = _codingo_get_response(user_input)
|
170 |
return jsonify({"response": reply})
|
171 |
|
172 |
except Exception as exc:
|
|
|
338 |
# Pre-initialize chatbot on startup for faster first response
|
339 |
print("Initializing chatbot...")
|
340 |
try:
|
341 |
+
# Initialize the embedder and database
|
342 |
+
init_embedder_and_db()
|
343 |
+
print("Embedder and database initialized")
|
344 |
+
|
345 |
+
# Initialize the LLM (this will download the model if needed)
|
346 |
+
init_llm()
|
347 |
+
print("LLM initialized successfully")
|
348 |
except Exception as e:
|
349 |
+
print(f"Chatbot initialization error: {e}")
|
350 |
+
import traceback
|
351 |
+
traceback.print_exc()
|
352 |
|
353 |
# Use port from environment or default to 7860
|
354 |
port = int(os.environ.get('PORT', 7860))
|
chatbot/chatbot.py
DELETED
@@ -1,250 +0,0 @@
|
|
1 |
-
# codingo/chatbot/chatbot.py
|
2 |
-
"""Interactive chatbot using Microsoft Phi-2 for efficient, quality responses"""
|
3 |
-
|
4 |
-
import os
|
5 |
-
import shutil
|
6 |
-
from typing import List
|
7 |
-
import torch
|
8 |
-
import re
|
9 |
-
|
10 |
-
os.environ.setdefault("HF_HOME", "/tmp/huggingface")
|
11 |
-
os.environ.setdefault("TRANSFORMERS_CACHE", "/tmp/huggingface/transformers")
|
12 |
-
os.environ.setdefault("HUGGINGFACE_HUB_CACHE", "/tmp/huggingface/hub")
|
13 |
-
|
14 |
-
_model = None
|
15 |
-
_tokenizer = None
|
16 |
-
_chatbot_embedder = None
|
17 |
-
_chatbot_collection = None
|
18 |
-
_knowledge_chunks = []
|
19 |
-
|
20 |
-
_current_dir = os.path.dirname(os.path.abspath(__file__))
|
21 |
-
_knowledge_base_path = os.path.join(_current_dir, "chatbot.txt")
|
22 |
-
_chroma_db_dir = "/tmp/chroma_db"
|
23 |
-
|
24 |
-
# Phi-2: 2.7B params, great performance, fits easily on T4
|
25 |
-
MODEL_NAME = "microsoft/phi-2"
|
26 |
-
|
27 |
-
def _init_model():
|
28 |
-
global _model, _tokenizer
|
29 |
-
if _model is not None and _tokenizer is not None:
|
30 |
-
return
|
31 |
-
|
32 |
-
print("Loading Phi-2 model...")
|
33 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
34 |
-
|
35 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
36 |
-
print(f"Using device: {device}")
|
37 |
-
|
38 |
-
# Load tokenizer
|
39 |
-
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
|
40 |
-
tokenizer.pad_token = tokenizer.eos_token
|
41 |
-
|
42 |
-
# Load model
|
43 |
-
model = AutoModelForCausalLM.from_pretrained(
|
44 |
-
MODEL_NAME,
|
45 |
-
torch_dtype=torch.float16,
|
46 |
-
device_map="auto",
|
47 |
-
trust_remote_code=True
|
48 |
-
)
|
49 |
-
model.eval()
|
50 |
-
|
51 |
-
_model = model
|
52 |
-
_tokenizer = tokenizer
|
53 |
-
print("Phi-2 loaded successfully!")
|
54 |
-
|
55 |
-
def _init_vector_store():
|
56 |
-
global _chatbot_embedder, _chatbot_collection, _knowledge_chunks
|
57 |
-
if _chatbot_embedder is not None and _chatbot_collection is not None:
|
58 |
-
return
|
59 |
-
|
60 |
-
print("Initializing vector store...")
|
61 |
-
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
62 |
-
from sentence_transformers import SentenceTransformer
|
63 |
-
import chromadb
|
64 |
-
from chromadb.config import Settings
|
65 |
-
|
66 |
-
# Load knowledge base
|
67 |
-
try:
|
68 |
-
with open(_knowledge_base_path, encoding="utf-8") as f:
|
69 |
-
raw_text = f.read()
|
70 |
-
print(f"Loaded knowledge base: {len(raw_text)} characters")
|
71 |
-
except FileNotFoundError:
|
72 |
-
print("Knowledge base not found!")
|
73 |
-
raw_text = "Codingo is an AI recruitment platform."
|
74 |
-
|
75 |
-
# Split into chunks
|
76 |
-
splitter = RecursiveCharacterTextSplitter(chunk_size=400, chunk_overlap=50)
|
77 |
-
docs = [doc.strip() for doc in splitter.split_text(raw_text) if doc.strip()]
|
78 |
-
_knowledge_chunks = docs # Store for reference
|
79 |
-
|
80 |
-
# Create embeddings
|
81 |
-
embedder = SentenceTransformer("all-MiniLM-L6-v2")
|
82 |
-
embeddings = embedder.encode(docs, show_progress_bar=False)
|
83 |
-
|
84 |
-
# Create ChromaDB collection (in-memory)
|
85 |
-
client = chromadb.Client(Settings(anonymized_telemetry=False, is_persistent=False))
|
86 |
-
|
87 |
-
try:
|
88 |
-
client.delete_collection("chatbot")
|
89 |
-
except:
|
90 |
-
pass
|
91 |
-
|
92 |
-
collection = client.create_collection("chatbot")
|
93 |
-
ids = [f"doc_{i}" for i in range(len(docs))]
|
94 |
-
collection.add(documents=docs, embeddings=embeddings.tolist(), ids=ids)
|
95 |
-
|
96 |
-
_chatbot_embedder = embedder
|
97 |
-
_chatbot_collection = collection
|
98 |
-
print(f"Vector store ready with {len(docs)} chunks!")
|
99 |
-
|
100 |
-
def extract_faq_answer(query: str, docs: List[str]) -> str:
|
101 |
-
"""Try to find direct FAQ answers"""
|
102 |
-
query_lower = query.lower()
|
103 |
-
|
104 |
-
for doc in docs:
|
105 |
-
# Look for Q&A patterns
|
106 |
-
if "Q:" in doc and "A:" in doc:
|
107 |
-
lines = doc.split('\n')
|
108 |
-
for i, line in enumerate(lines):
|
109 |
-
if line.strip().startswith('Q:'):
|
110 |
-
question = line[2:].strip().lower()
|
111 |
-
# Check similarity
|
112 |
-
if any(word in question for word in query_lower.split() if len(word) > 3):
|
113 |
-
# Find the answer
|
114 |
-
for j in range(i+1, min(i+5, len(lines))):
|
115 |
-
if lines[j].strip().startswith('A:'):
|
116 |
-
return lines[j][2:].strip()
|
117 |
-
return None
|
118 |
-
|
119 |
-
def get_chatbot_response(query: str) -> str:
|
120 |
-
try:
|
121 |
-
if not query or not query.strip():
|
122 |
-
return "Hello! I'm LUNA AI, your Codingo assistant. I can help you with questions about our AI recruitment platform, job matching, CV tips, and more!"
|
123 |
-
|
124 |
-
print(f"\nProcessing: '{query}'")
|
125 |
-
|
126 |
-
# Clear GPU cache
|
127 |
-
if torch.cuda.is_available():
|
128 |
-
torch.cuda.empty_cache()
|
129 |
-
|
130 |
-
# Initialize
|
131 |
-
_init_vector_store()
|
132 |
-
_init_model()
|
133 |
-
|
134 |
-
# Search for relevant context
|
135 |
-
query_embedding = _chatbot_embedder.encode([query])[0]
|
136 |
-
results = _chatbot_collection.query(
|
137 |
-
query_embeddings=[query_embedding.tolist()],
|
138 |
-
n_results=3
|
139 |
-
)
|
140 |
-
|
141 |
-
retrieved_docs = results.get("documents", [[]])[0] if results else []
|
142 |
-
print(f"Found {len(retrieved_docs)} relevant chunks")
|
143 |
-
|
144 |
-
# Try to find FAQ answer first
|
145 |
-
faq_answer = extract_faq_answer(query, retrieved_docs)
|
146 |
-
if faq_answer:
|
147 |
-
print("Found FAQ match!")
|
148 |
-
return faq_answer
|
149 |
-
|
150 |
-
# Build context from retrieved docs
|
151 |
-
context = "\n".join(retrieved_docs[:2]) if retrieved_docs else ""
|
152 |
-
|
153 |
-
# Create an instruction-following prompt for Phi-2
|
154 |
-
prompt = f"""Instruct: You are LUNA AI, a helpful assistant for Codingo recruitment platform.
|
155 |
-
Use the following information to answer the user's question:
|
156 |
-
|
157 |
-
{context}
|
158 |
-
|
159 |
-
User Question: {query}
|
160 |
-
|
161 |
-
Output: Based on the information provided, """
|
162 |
-
|
163 |
-
# Tokenize with appropriate length
|
164 |
-
inputs = _tokenizer(
|
165 |
-
prompt,
|
166 |
-
return_tensors="pt",
|
167 |
-
truncation=True,
|
168 |
-
max_length=800,
|
169 |
-
padding=True
|
170 |
-
)
|
171 |
-
inputs = {k: v.to(_model.device) for k, v in inputs.items()}
|
172 |
-
|
173 |
-
# Generate response
|
174 |
-
with torch.no_grad():
|
175 |
-
outputs = _model.generate(
|
176 |
-
**inputs,
|
177 |
-
max_new_tokens=200,
|
178 |
-
temperature=0.7,
|
179 |
-
do_sample=True,
|
180 |
-
top_p=0.9,
|
181 |
-
repetition_penalty=1.15,
|
182 |
-
pad_token_id=_tokenizer.pad_token_id,
|
183 |
-
eos_token_id=_tokenizer.eos_token_id,
|
184 |
-
early_stopping=True
|
185 |
-
)
|
186 |
-
|
187 |
-
# Decode response
|
188 |
-
full_response = _tokenizer.decode(outputs[0], skip_special_tokens=True)
|
189 |
-
|
190 |
-
# Extract only the generated part
|
191 |
-
response = full_response.split("Output:")[-1].strip()
|
192 |
-
|
193 |
-
# Clean up common artifacts
|
194 |
-
response = response.replace("Based on the information provided,", "").strip()
|
195 |
-
|
196 |
-
# Remove the original prompt if it appears
|
197 |
-
if prompt in response:
|
198 |
-
response = response.replace(prompt, "").strip()
|
199 |
-
|
200 |
-
# Ensure quality response
|
201 |
-
if len(response) < 20 or response.lower() == query.lower():
|
202 |
-
# Generate a contextual response
|
203 |
-
query_lower = query.lower()
|
204 |
-
|
205 |
-
if "hello" in query_lower or "hi" in query_lower:
|
206 |
-
return "Hello! Welcome to Codingo! I'm LUNA AI, here to help you navigate our AI-powered recruitment platform. You can ask me about creating profiles, job matching, improving your CV, or any of our features!"
|
207 |
-
|
208 |
-
elif "what" in query_lower and "codingo" in query_lower:
|
209 |
-
return "Codingo is an innovative AI-driven recruitment platform that transforms how companies hire and how candidates find jobs. We use advanced algorithms to match skills with opportunities, provide instant CV feedback, and streamline the entire hiring process."
|
210 |
-
|
211 |
-
elif "how" in query_lower and ("work" in query_lower or "use" in query_lower):
|
212 |
-
return "Here's how Codingo works: As a candidate, you create a profile, upload your resume, and add portfolio links. Our AI then analyzes your skills and matches you with suitable jobs. You'll receive personalized recommendations and CV improvement tips. For employers, we offer smart candidate filtering and automated screening insights!"
|
213 |
-
|
214 |
-
elif "feature" in query_lower or "special" in query_lower:
|
215 |
-
return "What makes Codingo special is our combination of AI-powered job matching, real-time CV analysis, bias-aware algorithms, and focus on tech professionals. We support various roles from developers to designers, making the hiring process smarter, faster, and fairer for everyone."
|
216 |
-
|
217 |
-
else:
|
218 |
-
# Use context to create a response
|
219 |
-
if retrieved_docs:
|
220 |
-
return f"Let me help you with that! {retrieved_docs[0][:250]}..."
|
221 |
-
else:
|
222 |
-
return "I'd be happy to help you learn more about Codingo! Could you please ask about specific features like job matching, CV tips, supported job types, or how our platform works?"
|
223 |
-
|
224 |
-
print(f"Generated: {response[:100]}...")
|
225 |
-
return response
|
226 |
-
|
227 |
-
except Exception as e:
|
228 |
-
print(f"Error: {e}")
|
229 |
-
import traceback
|
230 |
-
traceback.print_exc()
|
231 |
-
return "I apologize for the technical issue. Please feel free to ask me about Codingo's features, job matching process, or how to get started!"
|
232 |
-
|
233 |
-
# Test the chatbot
|
234 |
-
if __name__ == "__main__":
|
235 |
-
print("Testing Codingo Chatbot...")
|
236 |
-
test_queries = [
|
237 |
-
"Hello!",
|
238 |
-
"What is Codingo?",
|
239 |
-
"How does it work?",
|
240 |
-
"What job types do you support?",
|
241 |
-
"How can I improve my match score?",
|
242 |
-
"Is Codingo free?",
|
243 |
-
"Tell me about CV tips"
|
244 |
-
]
|
245 |
-
|
246 |
-
for q in test_queries:
|
247 |
-
response = get_chatbot_response(q)
|
248 |
-
print(f"\nUser: {q}")
|
249 |
-
print(f"LUNA: {response}")
|
250 |
-
print("-" * 80)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
chatbot/requirements.txt
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
flask
|
2 |
-
flask-cors
|
|
|
|
|
|