Deepakraj2006 commited on
Commit
13869de
·
verified ·
1 Parent(s): 24c5da1

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +77 -0
  2. requirements.txt +2 -0
app.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import necessary packages
2
+ from ibm_watsonx_ai import Credentials
3
+ from ibm_watsonx_ai import APIClient
4
+ from ibm_watsonx_ai.foundation_models import Model, ModelInference
5
+ from ibm_watsonx_ai.foundation_models.schema import TextChatParameters
6
+ from ibm_watsonx_ai.metanames import GenTextParamsMetaNames
7
+ import gradio as gr
8
+
9
+ # Model and project settings
10
+ model_id = "meta-llama/llama-3-2-11b-vision-instruct" # Directly specifying the LLAMA3 model
11
+
12
+ watsonx_API="L0sx3BXcQRWNmz45mbBLxL1UiZGnftHFQTwITAci-523"
13
+ project_id="ed8f7a2c-e597-4a09-a98f-dbdcef57a0d0"
14
+
15
+ # Set credentials to use the model
16
+ credentials = {
17
+ "url" : "https://au-syd.ml.cloud.ibm.com",
18
+ "apikey": watsonx_API
19
+ }
20
+
21
+ # Generation parameters
22
+ params = TextChatParameters(
23
+ temperature=0.7,
24
+ max_tokens=512
25
+ )
26
+
27
+ # Initialize the model
28
+ model = ModelInference(
29
+ model_id=model_id,
30
+ credentials=credentials,
31
+ project_id=project_id,
32
+ params=params
33
+ )
34
+
35
+ # Function to polish the resume using the model, making polish_prompt optional
36
+ def polish_resume(position_name, resume_content, polish_prompt=""):
37
+ # Check if polish_prompt is provided and adjust the combined_prompt accordingly
38
+ if polish_prompt and polish_prompt.strip():
39
+ prompt_use = f"Given the resume content: '{resume_content}', polish it based on the following instructions: {polish_prompt} for the {position_name} position."
40
+ else:
41
+ prompt_use = f"Suggest improvements for the following resume content: '{resume_content}' to better align with the requirements and expectations of a {position_name} position. Return the polished version, highlighting necessary adjustments for clarity, relevance, and impact in relation to the targeted role."
42
+
43
+ messages = [
44
+ {
45
+ "role": "user",
46
+ "content": [
47
+ {
48
+ "type": "text",
49
+ "text": prompt_use
50
+ },
51
+ ]
52
+ }
53
+ ]
54
+ # Generate a response using the model with parameters
55
+ generated_response = model.chat(messages=messages)
56
+
57
+ # Extract and return the generated text
58
+ generated_text = generated_response['choices'][0]['message']['content']
59
+
60
+ return generated_text
61
+
62
+ # Create Gradio interface for the resume polish application, marking polish_prompt as optional
63
+ resume_polish_application = gr.Interface(
64
+ fn=polish_resume,
65
+ flagging_mode="never", # Deactivate the flag function in gradio as it is not needed.
66
+ inputs=[
67
+ gr.Textbox(label="Position Name", placeholder="Enter the name of the position..."),
68
+ gr.Textbox(label="Resume Content", placeholder="Paste your resume content here...", lines=20),
69
+ gr.Textbox(label="Polish Instruction (Optional)", placeholder="Enter specific instructions or areas for improvement (optional)...", lines=2),
70
+ ],
71
+ outputs=gr.Textbox(label="Polished Content"),
72
+ title="Resume Polish Application",
73
+ description="This application helps you polish your resume. Enter the position your want to apply, your resume content, and specific instructions or areas for improvement (optional), then get a polished version of your content."
74
+ )
75
+
76
+ # Launch the application
77
+ resume_polish_application.launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Gradio
2
+ ibm_watsonx_ai