Jebin2005 commited on
Commit
b97a14f
·
verified ·
1 Parent(s): cd57b5d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -0
app.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
2
+ import gradio as gr
3
+ model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
4
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
5
+ model = AutoModelForCausalLM.from_pretrained(model_name)
6
+ text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
7
+
8
+ def interview_chatbot(user_input, task):
9
+ """
10
+ Handles interview-specific chatbot tasks.
11
+
12
+ Parameters:
13
+ - user_input: str, the input text from the user.
14
+ - task: str, the type of task (e.g., "Behavioral Question", "Technical Question", "General Advice").
15
+
16
+ Returns:
17
+ - str: The generated response.
18
+ """
19
+ if task == "Behavioral Question":
20
+ prompt = f"You are an interview coach. Provide a strong response to the following behavioral question:\n{user_input}\nSuggested Response:"
21
+ elif task == "Technical Question":
22
+ prompt = f"You are a technical interview expert. Answer the following technical question clearly and concisely:\nQuestion: {user_input}\nAnswer:"
23
+ elif task == "General Advice":
24
+ prompt = f"You are an interview expert. Provide advice for the following situation:\n{user_input}\nAdvice:"
25
+ else:
26
+ return "Invalid task selected."
27
+
28
+ response = text_generator(
29
+ prompt,
30
+ max_length=200,
31
+ num_return_sequences=1,
32
+ pad_token_id=tokenizer.eos_token_id,
33
+ temperature=0.7,
34
+ top_p=0.9
35
+ )[0]["generated_text"]
36
+
37
+ return response[len(prompt):].strip()
38
+
39
+ def gradio_interface(user_input, task):
40
+ """
41
+ Interface function for Gradio integration.
42
+ """
43
+ if not user_input.strip():
44
+ return "Please enter some input."
45
+ return interview_chatbot(user_input, task)
46
+ with gr.Blocks(theme=gr.themes.Monochrome()) as interview_chat_ui:
47
+ gr.Markdown(
48
+ """
49
+ # 🌟 Interview Preparation Chatbot
50
+ Welcome to your personal interview preparation assistant! This chatbot can help you tackle:
51
+ - **Behavioral Questions**: Practice with confidence.
52
+ - **Technical Questions**: Get clear and concise explanations.
53
+ - **General Advice**: Learn how to ace your interviews.
54
+ """,
55
+ elem_id="main_header",
56
+ )
57
+
58
+ with gr.Row():
59
+ with gr.Column():
60
+ gr.Markdown(
61
+ """### 🎯 Enter your query and select the task type:""",
62
+ elem_id="sub_header",
63
+ )
64
+ user_input = gr.Textbox(
65
+ lines=5,
66
+ placeholder="Enter your question or situation here...",
67
+ label="Your Input",
68
+ elem_id="input_box",
69
+ )
70
+ task = gr.Radio(
71
+ ["Behavioral Question", "Technical Question", "General Advice"],
72
+ label="Select Task",
73
+ elem_id="task_selector",
74
+ )
75
+ submit_button = gr.Button("✨ Get Response", elem_id="submit_button")
76
+
77
+ with gr.Column():
78
+ gr.Markdown(
79
+ """### 💡 Chatbot Response:""",
80
+ elem_id="response_header",
81
+ )
82
+ output = gr.Textbox(
83
+ lines=10,
84
+ label="Response",
85
+ interactive=False,
86
+ elem_id="output_box",
87
+ )
88
+
89
+ submit_button.click(gradio_interface, inputs=[user_input, task], outputs=output)
90
+ clear_button = gr.Button("🧹 Clear All", elem_id="clear_button")
91
+ clear_button.click(lambda: ("", ""), None, [user_input, output])
92
+
93
+ gr.Markdown(
94
+ """
95
+ ---
96
+ **Tip**: Practice regularly to build confidence and improve your interview skills! 🚀
97
+ """,
98
+ elem_id="footer_text",
99
+ )
100
+
101
+ interview_chat_ui.launch()