updated working
Browse files
app.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
import torch
|
2 |
import gradio as gr
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
4 |
|
@@ -9,53 +8,81 @@ model, tokenizer = (
|
|
9 |
)
|
10 |
|
11 |
# Function to generate text (suggestions)
|
12 |
-
def generate_text(prompt, output_length):
|
13 |
inputs = tokenizer(prompt, return_tensors='pt').to(model.device)
|
14 |
generated_output = model.generate(
|
15 |
inputs['input_ids'],
|
16 |
max_length=inputs['input_ids'].shape[-1] + output_length, # Generate 10 more tokens
|
17 |
no_repeat_ngram_size=2,
|
18 |
-
temperature=
|
19 |
-
top_k=
|
20 |
-
top_p=
|
21 |
do_sample=True,
|
22 |
)
|
23 |
output_text = tokenizer.decode(generated_output[0], skip_special_tokens=True)
|
24 |
return output_text
|
25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
-
#
|
28 |
-
|
29 |
|
30 |
-
# Add a chat interface above the text boxes
|
31 |
-
with gr.
|
32 |
-
chatbox = gr.Chatbot(label="Chat", type="messages")
|
33 |
|
34 |
with gr.Row():
|
35 |
# Create a column for the two text boxes
|
36 |
with gr.Column(scale=3):
|
37 |
# Input text box for user input (first column)
|
38 |
-
input_text = gr.Textbox(label="
|
39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
# Response text box (second column)
|
41 |
-
response_text = gr.Textbox(label="
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
-
|
|
|
44 |
with gr.Column(scale=1):
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
47 |
|
48 |
# Set up the interaction between input and output
|
49 |
-
def validate_and_generate(prompt, output_length
|
50 |
if prompt.strip():
|
51 |
-
|
|
|
52 |
|
53 |
-
input_text.input(validate_and_generate, inputs=input_text, outputs=response_text)
|
54 |
replace_button.click(lambda x: x, inputs=response_text, outputs=input_text)
|
55 |
|
56 |
-
def chat_interaction(prompt, history):
|
57 |
if prompt.strip():
|
58 |
-
response = generate_text(prompt, output_length
|
59 |
|
60 |
# Exclude the input prompt text from the response
|
61 |
response = response[len(prompt):].strip()
|
@@ -63,10 +90,21 @@ with gr.Blocks() as demo:
|
|
63 |
history.append({"role": "assistant", "content": response})
|
64 |
|
65 |
# Call validate_and_generate with the response
|
66 |
-
response_text_value = validate_and_generate(response, output_length
|
67 |
return history, response_text_value[len(response):].strip(), ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
-
input_text.submit(chat_interaction, inputs=[input_text, chatbox], outputs=[chatbox, response_text, input_text])
|
|
|
|
|
70 |
|
71 |
# Launch the interface
|
72 |
demo.launch()
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
|
|
|
8 |
)
|
9 |
|
10 |
# Function to generate text (suggestions)
|
11 |
+
def generate_text(prompt, output_length, temperature, top_k, top_p):
|
12 |
inputs = tokenizer(prompt, return_tensors='pt').to(model.device)
|
13 |
generated_output = model.generate(
|
14 |
inputs['input_ids'],
|
15 |
max_length=inputs['input_ids'].shape[-1] + output_length, # Generate 10 more tokens
|
16 |
no_repeat_ngram_size=2,
|
17 |
+
temperature=temperature,
|
18 |
+
top_k=top_k,
|
19 |
+
top_p=top_p,
|
20 |
do_sample=True,
|
21 |
)
|
22 |
output_text = tokenizer.decode(generated_output[0], skip_special_tokens=True)
|
23 |
return output_text
|
24 |
|
25 |
+
# Set up the Gradio interface with custom CSS
|
26 |
+
with gr.Blocks(css="""
|
27 |
+
#response-text {
|
28 |
+
background-color: #e1bee7; /* Light purple background */
|
29 |
+
border-radius: 8px; /* Rounded corners */
|
30 |
+
padding: 10px; /* Padding inside the textbox */
|
31 |
+
font-size: 16px; /* Font size */
|
32 |
+
color: #4a148c; /* Dark purple text color */
|
33 |
+
}
|
34 |
+
""") as demo:
|
35 |
|
36 |
+
# Add a title to the interface
|
37 |
+
gr.Markdown("# Hinglish Chat Prediction")
|
38 |
|
39 |
+
# Add a chat interface above the text boxes with reduced size
|
40 |
+
with gr.Row():
|
41 |
+
chatbox = gr.Chatbot(label="Chat", type="messages", height=350, value=[{"role": "assistant", "content": "Kya kar rahe ho"}])
|
42 |
|
43 |
with gr.Row():
|
44 |
# Create a column for the two text boxes
|
45 |
with gr.Column(scale=3):
|
46 |
# Input text box for user input (first column)
|
47 |
+
input_text = gr.Textbox(label="Start chatting", interactive=True)
|
48 |
|
49 |
+
# Create a separate column for the buttons
|
50 |
+
with gr.Column(scale=1):
|
51 |
+
# Submit button placed above the replace button
|
52 |
+
submit_button = gr.Button("Submit", variant="primary", elem_id="submit-btn")
|
53 |
+
|
54 |
+
with gr.Row():
|
55 |
+
# Create a column for the two text boxes
|
56 |
+
with gr.Column(scale=3):
|
57 |
# Response text box (second column)
|
58 |
+
response_text = gr.Textbox(label="Suggestion", interactive=False, elem_id="response-text")
|
59 |
+
|
60 |
+
# Create a separate column for the buttons
|
61 |
+
with gr.Column(scale=1):
|
62 |
+
replace_button = gr.Button("Replace Text", variant="secondary", elem_id="replace-btn")
|
63 |
+
regenerate_button = gr.Button("Regenerate", variant="secondary", elem_id="regenerate-btn")
|
64 |
|
65 |
+
with gr.Row():
|
66 |
+
# Create a dropdown menu for text generation parameters
|
67 |
with gr.Column(scale=1):
|
68 |
+
with gr.Accordion("Change Parameters", open=False):
|
69 |
+
output_length_slider = gr.Slider(1, 20, value=8, label="Output Length", step=1)
|
70 |
+
temperature_slider = gr.Slider(0.1, 1.0, value=0.8, label="Temperature (Controls randomness)")
|
71 |
+
top_k_slider = gr.Slider(1, 100, value=50, label="Top-k (Limits vocabulary size)", step=1)
|
72 |
+
top_p_slider = gr.Slider(0.1, 1.0, value=0.9, label="Top-p (Nucleus sampling)")
|
73 |
|
74 |
# Set up the interaction between input and output
|
75 |
+
def validate_and_generate(prompt, output_length, temperature, top_k, top_p):
|
76 |
if prompt.strip():
|
77 |
+
print(f"Prompt: {prompt}")
|
78 |
+
return generate_text(prompt, output_length, temperature, top_k, top_p)
|
79 |
|
80 |
+
input_text.input(validate_and_generate, inputs=[input_text, output_length_slider, temperature_slider, top_k_slider, top_p_slider], outputs=response_text)
|
81 |
replace_button.click(lambda x: x, inputs=response_text, outputs=input_text)
|
82 |
|
83 |
+
def chat_interaction(prompt, history, output_length, temperature, top_k, top_p):
|
84 |
if prompt.strip():
|
85 |
+
response = generate_text(prompt, output_length, temperature, top_k, top_p)
|
86 |
|
87 |
# Exclude the input prompt text from the response
|
88 |
response = response[len(prompt):].strip()
|
|
|
90 |
history.append({"role": "assistant", "content": response})
|
91 |
|
92 |
# Call validate_and_generate with the response
|
93 |
+
response_text_value = validate_and_generate(response, output_length, temperature, top_k, top_p)
|
94 |
return history, response_text_value[len(response):].strip(), ""
|
95 |
+
|
96 |
+
return history, "", ""
|
97 |
+
|
98 |
+
def regenerate_text(input_text, history, output_length, temperature, top_k, top_p):
|
99 |
+
if input_text.strip():
|
100 |
+
return generate_text(input_text, output_length, temperature, top_k, top_p)
|
101 |
+
else:
|
102 |
+
last_message = history[-1]["content"]
|
103 |
+
return generate_text(last_message, output_length, temperature, top_k, top_p)[len(last_message):].strip()
|
104 |
|
105 |
+
input_text.submit(chat_interaction, inputs=[input_text, chatbox, output_length_slider, temperature_slider, top_k_slider, top_p_slider], outputs=[chatbox, response_text, input_text])
|
106 |
+
submit_button.click(chat_interaction, inputs=[input_text, chatbox, output_length_slider, temperature_slider, top_k_slider, top_p_slider], outputs=[chatbox, response_text, input_text])
|
107 |
+
regenerate_button.click(regenerate_text, inputs=[input_text, chatbox, output_length_slider, temperature_slider, top_k_slider, top_p_slider], outputs=response_text)
|
108 |
|
109 |
# Launch the interface
|
110 |
demo.launch()
|