husseinelsaadi commited on
Commit
987f59c
·
1 Parent(s): f1fc29e
backend/services/codingo_chatbot.py CHANGED
@@ -6,22 +6,7 @@ This module encapsulates the logic for Codingo's website chatbot. It
6
  loads a knowledge base from ``chatbot/chatbot.txt``, builds a vector
7
  database using Chroma and SentenceTransformers, and uses a local LLM
8
  powered by ``llama‑cpp‑python`` to generate answers constrained to the
9
- retrieved context. The code is written to initialise all heavy
10
- resources lazily on first use and to cache them for subsequent
11
- requests. This prevents repeated model downloads and avoids
12
- recomputing embeddings for every chat query.
13
-
14
- The underlying LLM is the TinyLlama 1.1B chat model distributed via
15
- Hugging Face in GGUF format. When the model file is not present
16
- locally it is downloaded automatically using ``huggingface_hub``.
17
- Depending on the environment the model will run on GPU if CUDA is
18
- available or fall back to CPU otherwise. See the ``init_llm``
19
- function for details.
20
-
21
- Note: This module deliberately contains no references to OpenAI. It
22
- relies solely on open‑source libraries available on PyPI (such as
23
- ``llama‑cpp‑python`` and ``chromadb``) so that it can be used on
24
- Hugging Face Spaces without requiring proprietary API keys.
25
  """
26
 
27
  from __future__ import annotations
@@ -41,80 +26,54 @@ from huggingface_hub import hf_hub_download
41
  try:
42
  from llama_cpp import Llama # type: ignore
43
  except Exception as exc: # pragma: no cover - import may fail until dependency installed
44
- # Provide a helpful error if llama_cpp isn't installed.
45
  raise ImportError(
46
  "llama_cpp is required for the chatbot. Please add 'llama-cpp-python' "
47
  "to your requirements.txt"
48
  ) from exc
49
 
50
- # ---------------------------------------------------------------------------
51
  # Configuration
52
- #
53
- # Compute the absolute path to the chatbot knowledge base. We derive this
54
- # relative to this file so that the module works regardless of the working
55
- # directory. The project structure places ``chatbot.txt`` at
56
- # ``Codingo12/chatbot/chatbot.txt``.
57
  PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
58
  CHATBOT_TXT_PATH = os.path.join(PROJECT_ROOT, "chatbot", "chatbot.txt")
59
-
60
- # Directory where Chroma will persist its database. This location is
61
- # writable on both local machines and Hugging Face Spaces. It is
62
- # intentionally distinct from the web app instance path to avoid
63
- # permission issues.
64
  CHROMA_DB_DIR = os.path.join("/tmp", "chatbot_chroma")
65
 
66
- # Settings for the TinyLlama model. These can be overridden via
67
- # environment variables if desired (for example to switch to a
68
- # different quantisation or to test with a smaller model). See
69
- # https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF for
70
- # available filenames.
71
- LLAMA_REPO = os.getenv(
72
- "LLAMA_REPO",
73
- "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF",
74
- )
75
- LLAMA_FILE = os.getenv(
76
- "LLAMA_FILE",
77
- "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf",
78
- )
79
-
80
- # Local directory where the GGUF model file will be stored. Using
81
- # ``/tmp`` avoids writing into the read‑only repository filesystem on
82
- # Hugging Face Spaces. The directory will be created as needed.
83
  LLAMA_LOCAL_DIR = os.path.join("/tmp", "llama_models")
84
 
85
- # Generation parameters. These values mirror those used in the
86
- # provided Jupyter notebook. They can be tweaked via environment
87
- # variables if necessary to trade off quality against speed.
88
- MAX_TOKENS = int(os.getenv("LLAMA_MAX_TOKENS", "256"))
89
- TEMPERATURE = float(os.getenv("LLAMA_TEMPERATURE", "0.7"))
90
  TOP_P = float(os.getenv("LLAMA_TOP_P", "0.9"))
91
- REPEAT_PENALTY = float(os.getenv("LLAMA_REPEAT_PENALTY", "1.15"))
92
 
93
- # Thread lock to guard lazy initialisation in multi‑threaded Flask
94
- # environments. Without this lock multiple concurrent requests may
95
- # attempt to download the model or populate the database at the same
96
- # time, leading to redundant work or race conditions.
97
  _init_lock = threading.Lock()
98
-
99
- # Global singletons for embedder, vector collection and LLM. These
100
- # variables are populated on first use and reused thereafter.
101
  _embedder: SentenceTransformer | None = None
102
  _collection: chromadb.Collection | None = None
103
  _llm: Llama | None = None
104
 
105
 
106
  def _load_chatbot_text() -> str:
107
- """Read the chatbot knowledge base from disk.
108
-
109
- If the file is missing, a small default description of Codingo is
110
- returned. This ensures the chatbot still provides a sensible
111
- answer rather than crashing.
112
- """
113
  try:
114
  with open(CHATBOT_TXT_PATH, encoding="utf-8") as f:
115
- return f.read()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  except FileNotFoundError:
117
- # Fallback content if the knowledge base file is missing
118
  return (
119
  "Codingo is an AI‑powered recruitment platform designed to "
120
  "streamline job applications, candidate screening and hiring. "
@@ -124,42 +83,30 @@ def _load_chatbot_text() -> str:
124
 
125
 
126
  def init_embedder_and_db() -> None:
127
- """Initialise the SentenceTransformer embedder and Chroma vector DB.
128
-
129
- This function is idempotent: if the embedder and collection are
130
- already initialised it returns immediately. Otherwise it reads
131
- ``chatbot.txt``, splits it into overlapping chunks, computes
132
- embeddings and persists them to a Chroma collection. The
133
- resulting ``SentenceTransformer`` and collection objects are saved
134
- in global variables for later reuse.
135
- """
136
  global _embedder, _collection
137
  if _embedder is not None and _collection is not None:
138
  return
139
  with _init_lock:
140
  if _embedder is not None and _collection is not None:
141
  return
142
- # Ensure persistence directory exists
143
  os.makedirs(CHROMA_DB_DIR, exist_ok=True)
144
-
145
- # Read knowledge base
146
  text = _load_chatbot_text()
147
-
148
- # Split into chunks; use double newlines to prefer splitting on
149
- # paragraph boundaries. Overlap helps the model maintain
150
- # context across neighbouring chunks.
151
  splitter = RecursiveCharacterTextSplitter(
152
- chunk_size=300,
153
  chunk_overlap=100,
154
- separators=["\n\n"],
155
  )
156
  docs: List[str] = [doc.strip() for doc in splitter.split_text(text) if doc.strip()]
157
-
158
- # Initialise embedder (MiniLM). We specify device via env.
159
  embedder = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
160
  embeddings = embedder.encode(docs, show_progress_bar=False, batch_size=32)
161
-
162
- # Initialise Chroma client
163
  client = chromadb.Client(
164
  Settings(
165
  persist_directory=CHROMA_DB_DIR,
@@ -167,15 +114,11 @@ def init_embedder_and_db() -> None:
167
  is_persistent=True,
168
  )
169
  )
170
-
171
- # Create or get collection. This returns an existing collection if
172
- # already present on disk.
173
  collection = client.get_or_create_collection("codingo_chatbot")
174
-
175
- # Populate collection only if empty. A naive call to
176
- # ``collection.get(limit=1)`` may raise if the collection does
177
- # not exist yet, so we catch any exception and treat it as an
178
- # empty DB. Distances are stored as cosine similarity.
179
  need_populate = False
180
  try:
181
  existing = collection.get(limit=1)
@@ -183,137 +126,145 @@ def init_embedder_and_db() -> None:
183
  need_populate = True
184
  except Exception:
185
  need_populate = True
 
186
  if need_populate:
187
  ids = [f"doc_{i}" for i in range(len(docs))]
188
  collection.add(documents=docs, embeddings=embeddings.tolist(), ids=ids)
 
189
  _embedder = embedder
190
  _collection = collection
191
 
192
 
193
  def init_llm() -> None:
194
- """Initialise the llama‑cpp model for response generation.
195
-
196
- This function lazily downloads the GGUF model from Hugging Face if
197
- necessary and instantiates a ``llama_cpp.Llama`` object. The
198
- resulting instance is stored in the global ``_llm`` variable. To
199
- control GPU usage set the ``CUDA_VISIBLE_DEVICES`` environment
200
- variable or override ``LLAMA_N_GPU_LAYERS``. By default we use one
201
- GPU layer when CUDA is available, otherwise the model runs on CPU.
202
- """
203
  global _llm
204
  if _llm is not None:
205
  return
206
  with _init_lock:
207
  if _llm is not None:
208
  return
209
- # Ensure the model directory exists
210
  os.makedirs(LLAMA_LOCAL_DIR, exist_ok=True)
211
- # Download model if not already present
212
  local_path = os.path.join(LLAMA_LOCAL_DIR, LLAMA_FILE)
 
213
  if not os.path.exists(local_path):
214
- # The file will be downloaded to LLAMA_LOCAL_DIR. Use
215
- # ``local_dir_use_symlinks=False`` to avoid creating
216
- # symlinks that may break on certain filesystems.
217
  local_path = hf_hub_download(
218
  repo_id=LLAMA_REPO,
219
  filename=LLAMA_FILE,
220
  local_dir=LLAMA_LOCAL_DIR,
221
  local_dir_use_symlinks=False,
222
  )
223
- # Determine GPU usage. We default to one GPU layer if CUDA
224
- # appears available. Users can override via LLAMA_N_GPU_LAYERS.
225
  try:
226
- import torch # type: ignore
227
  use_cuda = torch.cuda.is_available()
228
  except Exception:
229
  use_cuda = False
230
- n_gpu_layers_env = os.getenv("LLAMA_N_GPU_LAYERS")
231
- if n_gpu_layers_env:
232
- try:
233
- n_gpu_layers = int(n_gpu_layers_env)
234
- except ValueError:
235
- n_gpu_layers = 0
236
- else:
237
- n_gpu_layers = 1 if use_cuda else 0
238
- # Construct the Llama instance. The context window is set
239
- # generously to 2048 tokens; adjust via LLAMA_N_CTX if needed.
240
  n_ctx = int(os.getenv("LLAMA_N_CTX", "2048"))
241
- # Use half the available CPU cores for inference threads to
242
- # balance responsiveness and resource use.
243
- try:
244
- n_threads = max(1, os.cpu_count() // 2)
245
- except Exception:
246
- n_threads = 2
247
  _llm = Llama(
248
  model_path=local_path,
249
  n_ctx=n_ctx,
250
  n_threads=n_threads,
251
  n_gpu_layers=n_gpu_layers,
 
252
  )
253
 
254
 
255
  def _build_prompt(query: str, context: str) -> str:
256
- """Construct the full prompt for the TinyLlama chat model.
257
-
258
- The prompt format follows the conventions used by the model as
259
- illustrated in the provided notebook. We include a system message
260
- instructing the model to answer only using the given context and to
261
- politely decline if the information is unavailable.
262
- """
263
  system_prompt = (
264
- "You are the official chatbot of Codingo. "
265
- "Answer ONLY by using the CONTEXT. "
266
- "If the information is not available for you, say it politely."
267
- )
268
- prompt = (
269
- f"<|system|>\n{system_prompt}</s>\n"
270
- f"<|user|>\n{query}\n\nCONTEXTE:\n{context}</s>\n"
271
- f"<|assistant|>\n"
272
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273
  return prompt
274
 
275
 
276
- def get_response(query: str, k: int = 3, score_threshold: float = 2.0) -> str:
277
- """Return a chatbot response for the given query.
278
-
279
- This function performs the following steps:
280
-
281
- 1. Ensures the embedder, vector database and LLM are initialised.
282
- 2. Embeds the user's query and retrieves the top ``k`` most
283
- similar documents from the Chroma collection.
284
- 3. Filters out documents whose cosine distance exceeds
285
- ``score_threshold`` (larger distances indicate less similarity).
286
- 4. Builds a prompt containing the user query and the concatenated
287
- relevant context.
288
- 5. Feeds the prompt to the TinyLlama model and returns its
289
- response, trimming trailing whitespace.
290
-
291
- If no relevant context is found, a fallback message is returned.
292
- """
293
  if not query or not query.strip():
294
- return "Please type a question about the Codingo platform."
 
295
  init_embedder_and_db()
296
  init_llm()
 
297
  assert _embedder is not None and _collection is not None and _llm is not None
298
- # Embed query and search collection
 
 
 
 
 
 
299
  query_vector = _embedder.encode([query])[0]
300
  results = _collection.query(query_embeddings=[query_vector.tolist()], n_results=k)
 
301
  docs = results.get("documents", [[]])[0] if results else []
302
  distances = results.get("distances", [[]])[0] if results else []
303
- # Filter by score
 
304
  relevant: List[str] = [d for d, s in zip(docs, distances) if s < score_threshold]
 
305
  if not relevant:
306
- return "Sorry, I don't have enough information to answer that question."
307
- context = "\n\n".join(relevant)
 
 
 
 
 
 
 
 
308
  prompt = _build_prompt(query, context)
309
- # Generate completion
 
310
  output = _llm(
311
  prompt,
312
  max_tokens=MAX_TOKENS,
313
  temperature=TEMPERATURE,
314
  top_p=TOP_P,
315
  repeat_penalty=REPEAT_PENALTY,
316
- stop=["</s>"]
 
317
  )
 
 
318
  text = output["choices"][0]["text"].strip()
319
- return text or "I'm here to answer your questions about Codingo. What would you like to know?"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  loads a knowledge base from ``chatbot/chatbot.txt``, builds a vector
7
  database using Chroma and SentenceTransformers, and uses a local LLM
8
  powered by ``llama‑cpp‑python`` to generate answers constrained to the
9
+ retrieved context.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  """
11
 
12
  from __future__ import annotations
 
26
  try:
27
  from llama_cpp import Llama # type: ignore
28
  except Exception as exc: # pragma: no cover - import may fail until dependency installed
 
29
  raise ImportError(
30
  "llama_cpp is required for the chatbot. Please add 'llama-cpp-python' "
31
  "to your requirements.txt"
32
  ) from exc
33
 
 
34
  # Configuration
 
 
 
 
 
35
  PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
36
  CHATBOT_TXT_PATH = os.path.join(PROJECT_ROOT, "chatbot", "chatbot.txt")
 
 
 
 
 
37
  CHROMA_DB_DIR = os.path.join("/tmp", "chatbot_chroma")
38
 
39
+ # TinyLlama model settings
40
+ LLAMA_REPO = os.getenv("LLAMA_REPO", "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF")
41
+ LLAMA_FILE = os.getenv("LLAMA_FILE", "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  LLAMA_LOCAL_DIR = os.path.join("/tmp", "llama_models")
43
 
44
+ # Generation parameters - adjusted for better responses
45
+ MAX_TOKENS = int(os.getenv("LLAMA_MAX_TOKENS", "512"))
46
+ TEMPERATURE = float(os.getenv("LLAMA_TEMPERATURE", "0.3"))
 
 
47
  TOP_P = float(os.getenv("LLAMA_TOP_P", "0.9"))
48
+ REPEAT_PENALTY = float(os.getenv("LLAMA_REPEAT_PENALTY", "1.1"))
49
 
50
+ # Thread lock and globals
 
 
 
51
  _init_lock = threading.Lock()
 
 
 
52
  _embedder: SentenceTransformer | None = None
53
  _collection: chromadb.Collection | None = None
54
  _llm: Llama | None = None
55
 
56
 
57
  def _load_chatbot_text() -> str:
58
+ """Read the chatbot knowledge base from disk."""
 
 
 
 
 
59
  try:
60
  with open(CHATBOT_TXT_PATH, encoding="utf-8") as f:
61
+ content = f.read()
62
+ # Clean up the content to avoid meta-descriptions
63
+ # Remove any lines that look like instructions about the chatbot
64
+ lines = content.split('\n')
65
+ cleaned_lines = []
66
+ for line in lines:
67
+ # Skip lines that describe what the chatbot does
68
+ if any(phrase in line.lower() for phrase in [
69
+ 'the chatbot', 'this bot', 'the bot provides',
70
+ 'chatbot provides', 'chatbot is used for',
71
+ 'official chatbot of'
72
+ ]):
73
+ continue
74
+ cleaned_lines.append(line)
75
+ return '\n'.join(cleaned_lines)
76
  except FileNotFoundError:
 
77
  return (
78
  "Codingo is an AI‑powered recruitment platform designed to "
79
  "streamline job applications, candidate screening and hiring. "
 
83
 
84
 
85
  def init_embedder_and_db() -> None:
86
+ """Initialize the SentenceTransformer embedder and Chroma vector DB."""
 
 
 
 
 
 
 
 
87
  global _embedder, _collection
88
  if _embedder is not None and _collection is not None:
89
  return
90
  with _init_lock:
91
  if _embedder is not None and _collection is not None:
92
  return
93
+
94
  os.makedirs(CHROMA_DB_DIR, exist_ok=True)
 
 
95
  text = _load_chatbot_text()
96
+
97
+ # Split into chunks
 
 
98
  splitter = RecursiveCharacterTextSplitter(
99
+ chunk_size=500, # Increased for better context
100
  chunk_overlap=100,
101
+ separators=["\n\n", "\n", ". ", " "],
102
  )
103
  docs: List[str] = [doc.strip() for doc in splitter.split_text(text) if doc.strip()]
104
+
105
+ # Initialize embedder
106
  embedder = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
107
  embeddings = embedder.encode(docs, show_progress_bar=False, batch_size=32)
108
+
109
+ # Initialize Chroma
110
  client = chromadb.Client(
111
  Settings(
112
  persist_directory=CHROMA_DB_DIR,
 
114
  is_persistent=True,
115
  )
116
  )
117
+
118
+ # Create or get collection
 
119
  collection = client.get_or_create_collection("codingo_chatbot")
120
+
121
+ # Populate if empty
 
 
 
122
  need_populate = False
123
  try:
124
  existing = collection.get(limit=1)
 
126
  need_populate = True
127
  except Exception:
128
  need_populate = True
129
+
130
  if need_populate:
131
  ids = [f"doc_{i}" for i in range(len(docs))]
132
  collection.add(documents=docs, embeddings=embeddings.tolist(), ids=ids)
133
+
134
  _embedder = embedder
135
  _collection = collection
136
 
137
 
138
  def init_llm() -> None:
139
+ """Initialize the llama‑cpp model for response generation."""
 
 
 
 
 
 
 
 
140
  global _llm
141
  if _llm is not None:
142
  return
143
  with _init_lock:
144
  if _llm is not None:
145
  return
146
+
147
  os.makedirs(LLAMA_LOCAL_DIR, exist_ok=True)
 
148
  local_path = os.path.join(LLAMA_LOCAL_DIR, LLAMA_FILE)
149
+
150
  if not os.path.exists(local_path):
 
 
 
151
  local_path = hf_hub_download(
152
  repo_id=LLAMA_REPO,
153
  filename=LLAMA_FILE,
154
  local_dir=LLAMA_LOCAL_DIR,
155
  local_dir_use_symlinks=False,
156
  )
157
+
158
+ # GPU configuration
159
  try:
160
+ import torch
161
  use_cuda = torch.cuda.is_available()
162
  except Exception:
163
  use_cuda = False
164
+
165
+ n_gpu_layers = int(os.getenv("LLAMA_N_GPU_LAYERS", "35" if use_cuda else "0"))
 
 
 
 
 
 
 
 
166
  n_ctx = int(os.getenv("LLAMA_N_CTX", "2048"))
167
+ n_threads = max(1, os.cpu_count() // 2) if os.cpu_count() else 4
168
+
 
 
 
 
169
  _llm = Llama(
170
  model_path=local_path,
171
  n_ctx=n_ctx,
172
  n_threads=n_threads,
173
  n_gpu_layers=n_gpu_layers,
174
+ verbose=False, # Reduce logging
175
  )
176
 
177
 
178
  def _build_prompt(query: str, context: str) -> str:
179
+ """Construct a natural prompt for the TinyLlama chat model."""
180
+ # Use a more direct, conversational system prompt
 
 
 
 
 
181
  system_prompt = (
182
+ "You are LUNA, a friendly AI assistant for the Codingo recruitment platform. "
183
+ "Answer questions naturally and conversationally. Use the provided information "
184
+ "to give helpful, direct answers. Keep responses concise and relevant."
 
 
 
 
 
185
  )
186
+
187
+ # Build the prompt with context integrated naturally
188
+ if context:
189
+ prompt = (
190
+ f"<|system|>\n{system_prompt}</s>\n"
191
+ f"<|user|>\nContext: {context}\n\n"
192
+ f"Question: {query}</s>\n"
193
+ f"<|assistant|>\n"
194
+ )
195
+ else:
196
+ prompt = (
197
+ f"<|system|>\n{system_prompt}</s>\n"
198
+ f"<|user|>\n{query}</s>\n"
199
+ f"<|assistant|>\n"
200
+ )
201
+
202
  return prompt
203
 
204
 
205
+ def get_response(query: str, k: int = 3, score_threshold: float = 1.5) -> str:
206
+ """Return a chatbot response for the given query."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207
  if not query or not query.strip():
208
+ return "Hi! I'm LUNA, your Codingo assistant. How can I help you today?"
209
+
210
  init_embedder_and_db()
211
  init_llm()
212
+
213
  assert _embedder is not None and _collection is not None and _llm is not None
214
+
215
+ # Handle greetings directly
216
+ greetings = ['hi', 'hello', 'hey', 'good morning', 'good afternoon', 'good evening']
217
+ if query.lower().strip() in greetings:
218
+ return "Hello! I'm LUNA, your AI assistant for Codingo. How can I help you with our recruitment platform today?"
219
+
220
+ # Embed query and search
221
  query_vector = _embedder.encode([query])[0]
222
  results = _collection.query(query_embeddings=[query_vector.tolist()], n_results=k)
223
+
224
  docs = results.get("documents", [[]])[0] if results else []
225
  distances = results.get("distances", [[]])[0] if results else []
226
+
227
+ # Filter by score (lower threshold for better matching)
228
  relevant: List[str] = [d for d, s in zip(docs, distances) if s < score_threshold]
229
+
230
  if not relevant:
231
+ # Provide a helpful response even without specific context
232
+ return (
233
+ "I don't have specific information about that in my knowledge base. "
234
+ "However, I can tell you that Codingo is an AI-powered recruitment platform "
235
+ "that helps with job applications, candidate screening, and hiring. "
236
+ "Would you like to know more about our features?"
237
+ )
238
+
239
+ # Join context with better formatting
240
+ context = " ".join(relevant[:2]) # Use top 2 most relevant chunks
241
  prompt = _build_prompt(query, context)
242
+
243
+ # Generate response with better parameters
244
  output = _llm(
245
  prompt,
246
  max_tokens=MAX_TOKENS,
247
  temperature=TEMPERATURE,
248
  top_p=TOP_P,
249
  repeat_penalty=REPEAT_PENALTY,
250
+ stop=["</s>", "<|user|>", "<|system|>"],
251
+ echo=False,
252
  )
253
+
254
+ # Extract and clean the response
255
  text = output["choices"][0]["text"].strip()
256
+
257
+ # Remove any meta-descriptions that might have leaked through
258
+ lines = text.split('\n')
259
+ cleaned_lines = []
260
+ for line in lines:
261
+ if any(phrase in line.lower() for phrase in [
262
+ 'the chatbot', 'this bot', 'the bot provides',
263
+ 'in response to', 'overall,'
264
+ ]):
265
+ continue
266
+ cleaned_lines.append(line)
267
+
268
+ text = '\n'.join(cleaned_lines).strip()
269
+
270
+ return text or "I'm here to help you with Codingo. Could you please rephrase your question?"
chatbot/chatbot.txt CHANGED
@@ -1,38 +1,59 @@
1
- INTRODUCTION: Codingo is an AI-driven recruitment platform designed to streamline job applications, candidate screening, and hiring. We aim to make hiring smarter, faster, and fairer through automation and intelligent recommendations.
2
 
3
- HOW THE PLATFORM WORKS: FOR CANDIDATES: Create a profile with personal and professional details. Upload your resume (PDF or DOC). Add portfolio links (GitHub, LinkedIn, personal website). AI matches you with job postings that suit your skills. Receive instant CV improvement tips. Apply to jobs directly or save for later.
4
 
5
- HOW THE PLATFORM WORKS: FOR EMPLOYERS: Post job descriptions with required skills and qualifications. Filter and narrow down candidate lists. Review top-matched applicants. Use the chatbot to answer candidate or hiring questions. Get automated screening insights.
6
 
7
- WHAT MAKES CODINGO SPECIAL: AI that understands both tech and language. Real-time CV feedback. Bias-aware algorithms. Built-in content moderation and chatbot filtering. Tailored for developers, designers, and digital professionals.
8
 
9
- CV & PORTFOLIO TIPS: Keep resumes under 2 pages. Use action words like "developed", "led", "optimized". Include quantifiable metrics. Tailor your resume to each job. Portfolio should include 3-5 projects with clear documentation.
10
 
11
- CHATBOT RULES & LIMITATIONS: Only answers Codingo-related questions. Refuses to respond to personal, political, or unrelated queries. Does not provide legal or medical advice. Typo tolerance for job-related keywords and tech terms. Responds based on platform content only.
12
 
13
- AI MATCHING ENGINE: Uses NLP to match skills. Cosine similarity for job description and profile comparison. Named Entity Recognition for understanding job titles. Intent detection for distinguishing job search vs application help.
14
 
15
- SUPPORTED JOB TYPES: Frontend Developer, Backend Developer, Full-Stack Developer, Data Scientist, UI/UX Designer, DevOps Engineer, Product Manager, QA Engineer, Mobile Developer, AI/ML Engineer.
16
 
17
- CONTENT POLICY: Professional, unbiased, safe, and aligned with the brand voice. No profanity, hate speech, or collection of personal data. No controversial topics in responses.
18
 
19
- SECURITY & PRIVACY: User data is encrypted and stored securely. No data used for advertising. You can delete your data anytime via profile settings.
20
 
21
- FREQUENTLY ASKED QUESTIONS:
22
- Q: How can I improve my match score?
23
- A: Update your profile with accurate skills and project links. Add keywords from job descriptions.
24
 
25
- Q: Can I use the chatbot to write my resume?
26
- A: The chatbot provides tips, but doesn't generate full resumes.
27
 
28
- Q: What if I see an error in my profile?
29
- A: Edit your profile at any time. For technical issues, contact support.
30
 
31
- Q: Is Codingo free?
32
- A: Profile creation and job applications are free. Premium features may be offered for advanced analytics.
33
 
34
- CONTACT & SUPPORT:
35
36
- Website: https://www.codingo.ai
37
- LinkedIn: https://linkedin.com/company/codingo
38
- Help Center: https://www.codingo.ai/help
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Codingo Platform Overview
2
 
3
+ Codingo is an innovative AI-powered recruitment platform that revolutionizes the hiring process. We connect talented job seekers with forward-thinking employers through intelligent matching and automation.
4
 
5
+ Key Features
6
 
7
+ Smart Job Matching: Our AI analyzes your skills, experience, and preferences to match you with the most suitable job opportunities. The matching algorithm considers technical skills, soft skills, experience level, and career goals.
8
 
9
+ Automated Resume Parsing: Upload your resume in PDF or DOCX format, and our system automatically extracts relevant information including skills, education, work experience, and contact details.
10
 
11
+ AI-Powered Interviews: Practice with our AI interviewer LUNA, who conducts realistic interview simulations tailored to specific job roles. Get instant feedback on your responses and improve your interview skills.
12
 
13
+ Real-Time Application Tracking: Monitor your application status in real-time. Know when recruiters view your profile and track your progress through different stages of the hiring process.
14
 
15
+ For Job Seekers
16
 
17
+ Creating Your Profile: Sign up with your email and create a comprehensive profile. Include your skills, experience, education, and career objectives for better matching.
18
 
19
+ Improving Your Match Score: To get better job matches, ensure your profile is complete with all relevant skills listed. Use keywords from job descriptions you're interested in. Keep your experience section updated with recent projects and achievements.
20
 
21
+ Applying for Jobs: Browse available positions, view detailed job descriptions, and apply with one click. Your parsed resume data is automatically included with your application.
 
 
22
 
23
+ Interview Preparation: Use our AI interview feature to practice common questions for your target role. The system provides personalized feedback to help you improve.
 
24
 
25
+ For Recruiters
 
26
 
27
+ Posting Jobs: Create detailed job postings with required skills, experience level, and job descriptions. Our system helps you reach qualified candidates quickly.
 
28
 
29
+ Candidate Screening: Our AI automatically screens applications and ranks candidates based on skill match, experience relevance, and other criteria you specify.
30
+
31
+ Dashboard Analytics: Access comprehensive analytics about your job postings, including view counts, application rates, and candidate quality metrics.
32
+
33
+ Efficient Hiring: Reduce time-to-hire by up to 60% with our automated screening and ranking system. Focus on interviewing only the most qualified candidates.
34
+
35
+ Technical Information
36
+
37
+ Supported File Formats: We accept PDF and DOCX formats for resume uploads. Maximum file size is 10MB.
38
+
39
+ Skills Matching: Our platform recognizes over 5,000 technical and soft skills across various industries. The matching algorithm uses semantic understanding to identify related skills.
40
+
41
+ Privacy and Security: All user data is encrypted and stored securely. We never share personal information without explicit consent.
42
+
43
+ Platform Availability: Codingo is accessible via web browser on desktop and mobile devices. We recommend Chrome, Firefox, or Safari for the best experience.
44
+
45
+ Getting Started
46
+
47
+ New Users: Click "Sign Up" on the homepage. Choose whether you're a job seeker or recruiter. Complete your profile with accurate information.
48
+
49
+ Job Search: Use filters to narrow down opportunities by location, salary range, experience level, and required skills.
50
+
51
+ Application Tips: Tailor your profile to each application. Highlight relevant experience and skills that match the job requirements.
52
+
53
+ Support and Help
54
+
55
+ Contact Support: Reach out to our support team at [email protected] for technical issues or questions about the platform.
56
+
57
+ FAQs: Visit our FAQ section for answers to common questions about using Codingo effectively.
58
+
59
+ Best Practices: Regular profile updates increase visibility. Respond promptly to recruiter messages. Use professional language in all communications.