import os import sys # Hugging Face safe cache os.environ["HF_HOME"] = "/tmp/huggingface" os.environ["TRANSFORMERS_CACHE"] = "/tmp/huggingface/transformers" os.environ["HUGGINGFACE_HUB_CACHE"] = "/tmp/huggingface/hub" # Force Flask instance path to a writable temporary folder safe_instance_path = "/tmp/flask_instance" # Create the safe instance path after imports os.makedirs(safe_instance_path, exist_ok=True) from flask import Flask, render_template, redirect, url_for, flash, request, jsonify from flask_login import LoginManager, login_required, current_user from werkzeug.utils import secure_filename import sys from datetime import datetime # Adjust sys.path for import flexibility current_dir = os.path.dirname(os.path.abspath(__file__)) sys.path.append(current_dir) # Import and initialize DB from backend.models.database import db, Job, Application, init_db from backend.models.user import User from backend.routes.auth import auth_bp, handle_resume_upload # Import the resume parsing helper. This module contains lightweight # heuristics for extracting information from PDF and DOCX files without # relying on heavy external libraries. See # ``codingo/backend/services/resume_parser.py`` for details. from backend.services.resume_parser import parse_resume as _parse_resume_helper from backend.routes.interview_api import interview_api # Import additional utilities import re import json # ----------------------------------------------------------------------------- # Chatbot integration # # Import the chatbot module functions from backend.services.codingo_chatbot import ( get_response as _codingo_get_response, init_embedder_and_db, init_llm ) # Initialize Flask app app = Flask( __name__, static_folder='backend/static', static_url_path='/static', template_folder='backend/templates', instance_path=safe_instance_path ) app.config['SECRET_KEY'] = 'saadi' # Cookie configuration for Hugging Face Spaces app.config['SESSION_COOKIE_SAMESITE'] = 'None' app.config['SESSION_COOKIE_SECURE'] = True app.config['REMEMBER_COOKIE_SAMESITE'] = 'None' app.config['REMEMBER_COOKIE_SECURE'] = True # Configure the database connection app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv("DATABASE_URL") app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # Create necessary directories in writable locations os.makedirs('/tmp/static/audio', exist_ok=True) os.makedirs('/tmp/temp', exist_ok=True) # Initialize DB with app init_db(app) # Flask-Login setup login_manager = LoginManager() login_manager.login_view = 'auth.login' login_manager.init_app(app) @login_manager.user_loader def load_user(user_id): return db.session.get(User, int(user_id)) # Register blueprints app.register_blueprint(auth_bp) app.register_blueprint(interview_api, url_prefix="/api") # Routes @app.route('/') def index(): return render_template('index.html') @app.route('/jobs') def jobs(): all_jobs = Job.query.order_by(Job.date_posted.desc()).all() return render_template('jobs.html', jobs=all_jobs) @app.route('/job/') def job_detail(job_id): job = Job.query.get_or_404(job_id) return render_template('job_detail.html', job=job) @app.route('/apply/', methods=['GET', 'POST']) @login_required def apply(job_id): job = Job.query.get_or_404(job_id) if request.method == 'POST': file = request.files.get('resume') features, error, filepath = handle_resume_upload(file) if error: flash("Resume upload failed. Please try again.", "danger") return render_template('apply.html', job=job) def parse_entries(raw_value: str): import re entries = [] if raw_value: for item in re.split(r'[\n,;]+', raw_value): item = item.strip() if item: entries.append(item) return entries skills_input = request.form.get('skills', '') experience_input = request.form.get('experience', '') education_input = request.form.get('education', '') manual_features = { "skills": parse_entries(skills_input), "experience": parse_entries(experience_input), "education": parse_entries(education_input) } application = Application( job_id=job_id, user_id=current_user.id, name=current_user.username, email=current_user.email, resume_path=filepath, extracted_features=json.dumps(manual_features) ) db.session.add(application) db.session.commit() flash('Your application has been submitted successfully!', 'success') return redirect(url_for('jobs')) return render_template('apply.html', job=job) @app.route('/my_applications') @login_required def my_applications(): applications = Application.query.filter_by( user_id=current_user.id ).order_by(Application.date_applied.desc()).all() return render_template('my_applications.html', applications=applications) # Chatbot API endpoint @app.route('/chatbot', methods=['POST']) def chatbot_endpoint(): """Handle chatbot queries from the frontend.""" try: data = request.get_json(silent=True) or {} user_input = str(data.get('message', '')).strip() if not user_input: return jsonify({"error": "Empty message"}), 400 # Use the imported function from codingo_chatbot module reply = _codingo_get_response(user_input) return jsonify({"response": reply}) except Exception as exc: print(f"Chatbot endpoint error: {exc}", file=sys.stderr) return jsonify({"error": "I'm having trouble right now. Please try again."}), 500 @app.route('/parse_resume', methods=['POST']) def parse_resume(): """ Parse an uploaded resume (PDF or DOCX) and return extracted information in JSON format. This endpoint is separate from the main application flow. It saves the uploaded file to a temporary location (via ``handle_resume_upload``) so that recruiters can review the original document later, then invokes a lightweight parser to extract the candidate's name, skills, education and experience. Errors during upload or parsing are reported back to the client. """ file = request.files.get('resume') if not file or file.filename == '': return jsonify({"error": "No file uploaded"}), 400 # Save the file using the existing helper. We ignore the # ``features`` return value because ``handle_resume_upload`` no # longer parses resumes itself; it simply stores the file and # returns the path on disk. features, error, filepath = handle_resume_upload(file) if error or not filepath: return jsonify({"error": "Error processing resume. Please try again."}), 400 try: # Parse the stored file. Pass both the path and the original # filename so that the parser can fall back to the filename # when inferring the candidate's name. parsed = _parse_resume_helper(filepath, file.filename) except Exception as exc: # Log to stderr for debugging print(f"Resume parsing error: {exc}", file=sys.stderr) return jsonify({"error": "Failed to parse resume"}), 500 # Normalise the response to ensure string values for the form response = { 'name': parsed.get('name', ''), 'skills': parsed.get('skills', ''), 'education': parsed.get('education', ''), 'experience': parsed.get('experience', '') } return jsonify(response), 200 @app.route("/interview/") @login_required def interview_page(job_id): job = Job.query.get_or_404(job_id) application = Application.query.filter_by( user_id=current_user.id, job_id=job_id ).first() if not application or not application.extracted_features: flash("Please apply for this job and upload your resume first.", "warning") return redirect(url_for('job_detail', job_id=job_id)) cv_data = json.loads(application.extracted_features) return render_template("interview.html", job=job, cv=cv_data) @app.route('/post_job', methods=['GET', 'POST']) @login_required def post_job(): if current_user.role not in ('recruiter', 'admin'): flash('You do not have permission to post jobs.', 'warning') return redirect(url_for('jobs')) if request.method == 'POST': role_title = request.form.get('role', '').strip() description = request.form.get('description', '').strip() seniority = request.form.get('seniority', '').strip() skills_input = request.form.get('skills', '').strip() company = request.form.get('company', '').strip() # New field: number of interview questions. Recruiters can specify # how many questions the interview should contain. Default to 3 if # the value is missing or invalid. See templates/post_job.html for # the corresponding input element. num_questions_raw = request.form.get('num_questions', '').strip() errors = [] if not role_title: errors.append('Job title is required.') if not description: errors.append('Job description is required.') if not seniority: errors.append('Seniority level is required.') if not skills_input: errors.append('Skills are required.') if not company: errors.append('Company name is required.') # Validate number of questions; must be a positive integer. Store # errors if the input is provided but invalid. Missing values will # fall back to the default of 3. num_questions = 4 if num_questions_raw: try: parsed_nq = int(num_questions_raw) if parsed_nq <= 0: raise ValueError() num_questions = parsed_nq except ValueError: errors.append('Number of interview questions must be a positive integer.') if errors: for err in errors: flash(err, 'danger') return render_template('post_job.html') skills_list = [s.strip() for s in re.split(r'[\n,;]+', skills_input) if s.strip()] skills_json = json.dumps(skills_list) new_job = Job( role=role_title, description=description, seniority=seniority, skills=skills_json, company=company, recruiter_id=current_user.id, num_questions=num_questions ) db.session.add(new_job) db.session.commit() flash('Job posted successfully!', 'success') return redirect(url_for('jobs')) return render_template('post_job.html') @app.route('/dashboard') @login_required def dashboard(): if current_user.role not in ('recruiter', 'admin'): flash('You do not have permission to access the dashboard.', 'warning') return redirect(url_for('index')) posted_jobs = Job.query.filter_by(recruiter_id=current_user.id).all() job_ids = [job.id for job in posted_jobs] candidates_with_scores = [] if job_ids: candidate_apps = Application.query.filter(Application.job_id.in_(job_ids)).all() def compute_score(application): try: candidate_features = json.loads(application.extracted_features) if application.extracted_features else {} candidate_skills = candidate_features.get('skills', []) job_skills = json.loads(application.job.skills) if application.job and application.job.skills else [] if not job_skills: return ('Medium', 2) candidate_set = {s.lower() for s in candidate_skills} job_set = {s.lower() for s in job_skills} common = candidate_set & job_set ratio = len(common) / len(job_set) if job_set else 0 if ratio >= 0.75: return ('Excellent', 4) elif ratio >= 0.5: return ('Good', 3) elif ratio >= 0.25: return ('Medium', 2) else: return ('Poor', 1) except Exception: return ('Medium', 2) for app_record in candidate_apps: score_label, score_value = compute_score(app_record) candidates_with_scores.append({ 'application': app_record, 'score_label': score_label, 'score_value': score_value }) candidates_with_scores.sort(key=lambda item: item['score_value'], reverse=True) return render_template('dashboard.html', candidates=candidates_with_scores) if __name__ == '__main__': print("Starting Codingo application...") # Import torch to check GPU availability try: import torch if torch.cuda.is_available(): print(f"GPU Available: {torch.cuda.get_device_name(0)}") print(f"GPU Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.2f} GB") else: print("No GPU available, using CPU") except ImportError: print("PyTorch not installed, chatbot will use CPU") with app.app_context(): db.create_all() # Pre-initialize chatbot on startup for faster first response print("Initializing chatbot...") try: # Initialize the embedder and database init_embedder_and_db() print("Embedder and database initialized") # Initialize the LLM (this will download the model if needed) init_llm() print("LLM initialized successfully") except Exception as e: print(f"Chatbot initialization error: {e}") import traceback traceback.print_exc() # Use port from environment or default to 7860 port = int(os.environ.get('PORT', 7860)) app.run(debug=True, host='0.0.0.0', port=port)