david-thrower commited on
Commit
23e9332
·
1 Parent(s): 7067897

step 2 is working

Browse files
Files changed (1) hide show
  1. test-job-app.py +16 -8
test-job-app.py CHANGED
@@ -3,16 +3,17 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
  # Load the SmolLM model and tokenizer
6
- model_name = "HuggingFaceTB/SmolLM2-135M-Instruct" # "HuggingFaceTB/SmolLM2-360m-Instruct"
 
7
  model = AutoModelForCausalLM.from_pretrained(model_name)
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
  model.to(device)
11
 
12
- def smol_lm_process(job_description):
13
- # System Prompt: "Extract key qualifications, skills, and requirements from this job description. Output as bullet points. Remove benefits/salary fluff."
14
  prompt = f"""<|im_start|>system
15
- Extract key qualifications, skills, and requirements from this job description. Output as bullet points. Remove benefits/salary fluff.<|im_end|>
16
  <|im_start|>user
17
  {job_description}<|im_end|>
18
  <|im_start|>assistant
@@ -27,12 +28,20 @@ Extract key qualifications, skills, and requirements from this job description.
27
  return response
28
 
29
  def process_job_description(company_name, company_url, job_description):
30
- role_requirements = smol_lm_process(job_description)
 
 
 
 
 
 
 
31
  return {
32
  "Company Name": company_name,
33
  "Company URL": company_url,
34
- "Job Description": job_description,
35
- "Role Requirements": role_requirements
 
36
  }
37
 
38
  # Create the Gradio app
@@ -52,4 +61,3 @@ with demo:
52
 
53
  if __name__ == "__main__":
54
  demo.launch()
55
-
 
3
  import torch
4
 
5
  # Load the SmolLM model and tokenizer
6
+ # model_name = "HuggingFaceTB/SmolLM2-1.7B-Instruct"
7
+ model_name = "HuggingFaceTB/SmolLM2-360M-Instruct"
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
  tokenizer = AutoTokenizer.from_pretrained(model_name)
10
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
  model.to(device)
12
 
13
+ def smol_lm_process(job_description, system_prompt):
14
+ # System Prompt and job description
15
  prompt = f"""<|im_start|>system
16
+ {system_prompt}<|im_end|>
17
  <|im_start|>user
18
  {job_description}<|im_end|>
19
  <|im_start|>assistant
 
28
  return response
29
 
30
  def process_job_description(company_name, company_url, job_description):
31
+ # Step 2: Extract key qualifications, skills, and requirements
32
+ system_prompt_requirements = "Extract key qualifications, skills, and requirements from this job description. Output as bullet points. Remove benefits/salary and fluff. ONLY INCLUDE INFORMATION THAT TELLS THE USER WHAT SKILLS THE EMPLOYER SEEKS."
33
+ role_requirements = smol_lm_process(job_description, system_prompt_requirements)
34
+
35
+ # Step 3: Create a concise summary of the job description
36
+ system_prompt_summary = "Create a concise 150-200 word summary of this job description. Remove company bragging and benefits information. FOCUS ON ASPECTS THAT POINT THE USER IN WHAT THE EMPLOYER WANTS FROM A CANDIDATE IN TERMS OF SKILLS, ACCOMPLISHMENTS, AND SUCH"
37
+ clean_job_description = smol_lm_process(job_description, system_prompt_summary)
38
+
39
  return {
40
  "Company Name": company_name,
41
  "Company URL": company_url,
42
+ "Original Job Description": job_description,
43
+ "Role Requirements": role_requirements,
44
+ "Clean Job Description": clean_job_description
45
  }
46
 
47
  # Create the Gradio app
 
61
 
62
  if __name__ == "__main__":
63
  demo.launch()