Tech-Meld's picture
Update app.py
d69301c verified
raw
history blame
3.78 kB
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import time
import random
# Load the model and tokenizer for GGUF
model_id = "Tech-Meld/Hajax_Chat_1.0-Q3_K_S-GGUF"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
# --- Functions ---
def get_response(input_text, temperature, top_p, top_k, max_length):
inputs = tokenizer(input_text, return_tensors="pt")
outputs = model.generate(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
max_length=max_length,
temperature=temperature,
top_p=top_p,
top_k=top_k,
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
def analyze_text(text):
num_tokens = len(tokenizer.tokenize(text))
return {
"Number of characters": len(text),
"Number of words": len(text.split()),
"Number of tokens": num_tokens,
}
# --- Interface ---
css = """
.gradio-container {
background-color: #f0f0f0; /* Light background for the container */
}
.gradio-interface {
background-color: rgba(255, 255, 255, 0.8); /* Translucent white background */
border-radius: 15px; /* Rounded corners */
padding: 20px;
box-shadow: 0 0 10px rgba(0, 0, 0, 0.2); /* Subtle shadow */
}
.gradio-button {
background-color: #4CAF50; /* Green button color */
color: white;
border: none;
padding: 10px 20px;
text-align: center;
text-decoration: none;
display: inline-block;
font-size: 16px;
margin: 4px 2px;
cursor: pointer;
border-radius: 5px; /* Rounded corners */
}
.gradio-button:hover {
background-color: #3e8e41; /* Darker green on hover */
}
.gradio-text-area {
resize: vertical; /* Allow vertical resizing for text areas */
}
"""
iface = gr.Interface(
fn=get_response,
inputs=[
gr.Textbox(label="Your message:", lines=5, placeholder="Enter your message here...", show_label=True),
gr.Slider(label="Temperature", minimum=0.1, maximum=1.0, step=0.1, value=0.7),
gr.Slider(label="Top p", minimum=0.1, maximum=1.0, step=0.1, value=0.9),
gr.Slider(label="Top k", minimum=1, maximum=100, step=1, value=50),
gr.Slider(label="Max length", minimum=10, maximum=1000, step=10, value=250),
],
outputs=[
gr.TextArea(label="AI Response:", lines=10),
gr.Label(label="Text Analysis", elem_id="analysis"),
],
title="Chat with AI",
description="Engage in a conversation with our advanced model. Customize the response using various parameters.",
theme="default", # Use a custom theme to override the default Gradio styling
css=css, # Apply the CSS styles defined earlier
layout="vertical",
allow_flagging="never",
)
# --- Dynamic Background ---
def update_background():
while True:
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
iface.root.style.background_color = f"rgb({r}, {g}, {b})" # Set dynamic background color
time.sleep(1) # Update every second
# Start a separate thread to update the background color
gr.Interface.update(update_background, inputs=[], outputs=[], live=True)
# --- Analysis Logic ---
def update_analysis(response):
analysis = analyze_text(response)
analysis_str = f"Number of characters: {analysis['Number of characters']}\n" \
f"Number of words: {analysis['Number of words']}\n" \
f"Number of tokens: {analysis['Number of tokens']}"
iface.update(analysis=analysis_str, live=True) # Update analysis section with the generated data
iface.outputs[0].postprocess = update_analysis # Update analysis after every response
iface.launch(debug=True)