import streamlit as st import pandas as pd import numpy as np import re import h5py import pdfminer from pdfminer.high_level import extract_text import pytesseract from pdf2image import convert_from_path from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from sklearn.preprocessing import LabelEncoder def cleanResume(resumeText): resumeText = re.sub('http\S+\s*', ' ', resumeText) resumeText = re.sub('RT|cc', ' ', resumeText) resumeText = re.sub('#\S+', '', resumeText) resumeText = re.sub('@\S+', ' ', resumeText) resumeText = re.sub('[%s]' % re.escape("""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""), ' ', resumeText) resumeText = re.sub(r'[^\x00-\x7f]', r' ', resumeText) resumeText = re.sub('\s+', ' ', resumeText) return resumeText def pdf_to_text(file): text = extract_text(file) if not text.strip(): # If PDF text extraction fails, use OCR images = convert_from_path(file) text = "\n".join([pytesseract.image_to_string(img) for img in images]) return text import h5py def fix_h5_model(): with h5py.File("deeprank_model_v2.h5", "r+") as f: if "model_config" in f.attrs: model_config = f.attrs["model_config"] # Ensure model_config is a string before replacing if isinstance(model_config, bytes): model_config = model_config.decode("utf-8") updated_config = model_config.replace('"time_major": false', "") # Store the updated config back as bytes f.attrs.modify("model_config", updated_config.encode("utf-8")) def load_deeprank_model(): fix_h5_model() return load_model('deeprank_model_v2.h5') def predict_category(resumes_data, selected_category, max_sequence_length, model, tokenizer, label): resumes_df = pd.DataFrame(resumes_data) resumes_text = resumes_df['ResumeText'].values tokenized_text = tokenizer.texts_to_sequences(resumes_text) padded_text = pad_sequences(tokenized_text, maxlen=max_sequence_length) predicted_probs = model.predict(padded_text) for i, category in enumerate(label.classes_): resumes_df[category] = predicted_probs[:, i] resumes_df_sorted = resumes_df.sort_values(by=selected_category, ascending=False) ranks = [{'Rank': rank + 1, 'FileName': row['FileName']} for rank, (idx, row) in enumerate(resumes_df_sorted.iterrows())] return ranks def main(): st.title("Resume Ranking App") st.write("Upload resumes and select a category to rank them based on their relevance.") model = load_deeprank_model() df = pd.read_csv('UpdatedResumeDataSet.csv') df['cleaned'] = df['Resume'].apply(cleanResume) label = LabelEncoder() df['Category'] = label.fit_transform(df['Category']) text = df['cleaned'].values tokenizer = Tokenizer() tokenizer.fit_on_texts(text) max_sequence_length = 500 uploaded_files = st.file_uploader("Upload Resumes (PDFs)", type=["pdf"], accept_multiple_files=True) if uploaded_files: resumes_data = [] for file in uploaded_files: text = cleanResume(pdf_to_text(file)) resumes_data.append({'ResumeText': text, 'FileName': file.name}) selected_category = st.selectbox("Select a category to rank by", list(label.classes_)) if st.button("Rank Resumes"): if resumes_data and selected_category: ranks = predict_category(resumes_data, selected_category, max_sequence_length, model, tokenizer, label) st.write(pd.DataFrame(ranks)) else: st.error("Please upload valid resumes and select a valid category.") if __name__ == '__main__': main()