File size: 3,779 Bytes
4a48aba
 
1122788
 
4a48aba
d69301c
 
1122788
 
fa136e4
1122788
4a48aba
1122788
d69301c
1122788
d69301c
 
1122788
 
 
 
 
d69301c
4a48aba
 
1122788
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4a48aba
 
1122788
 
 
 
 
 
 
 
 
 
 
 
fa136e4
d69301c
1122788
 
 
 
4a48aba
1122788
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import time
import random

# Load the model and tokenizer for GGUF
model_id = "Tech-Meld/Hajax_Chat_1.0-Q3_K_S-GGUF"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)

# --- Functions ---

def get_response(input_text, temperature, top_p, top_k, max_length):
    inputs = tokenizer(input_text, return_tensors="pt")
    outputs = model.generate(
        input_ids=inputs["input_ids"],
        attention_mask=inputs["attention_mask"],
        max_length=max_length,
        temperature=temperature,
        top_p=top_p,
        top_k=top_k,
    )
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return response

def analyze_text(text):
    num_tokens = len(tokenizer.tokenize(text))
    return {
        "Number of characters": len(text),
        "Number of words": len(text.split()),
        "Number of tokens": num_tokens,
    }

# --- Interface ---

css = """
.gradio-container {
  background-color: #f0f0f0; /* Light background for the container */
}

.gradio-interface {
  background-color: rgba(255, 255, 255, 0.8); /* Translucent white background */
  border-radius: 15px; /* Rounded corners */
  padding: 20px;
  box-shadow: 0 0 10px rgba(0, 0, 0, 0.2); /* Subtle shadow */
}

.gradio-button {
  background-color: #4CAF50; /* Green button color */
  color: white;
  border: none;
  padding: 10px 20px;
  text-align: center;
  text-decoration: none;
  display: inline-block;
  font-size: 16px;
  margin: 4px 2px;
  cursor: pointer;
  border-radius: 5px; /* Rounded corners */
}

.gradio-button:hover {
  background-color: #3e8e41; /* Darker green on hover */
}

.gradio-text-area {
  resize: vertical; /* Allow vertical resizing for text areas */
}
"""

iface = gr.Interface(
    fn=get_response,
    inputs=[
        gr.Textbox(label="Your message:", lines=5, placeholder="Enter your message here...", show_label=True),
        gr.Slider(label="Temperature", minimum=0.1, maximum=1.0, step=0.1, value=0.7),
        gr.Slider(label="Top p", minimum=0.1, maximum=1.0, step=0.1, value=0.9),
        gr.Slider(label="Top k", minimum=1, maximum=100, step=1, value=50),
        gr.Slider(label="Max length", minimum=10, maximum=1000, step=10, value=250),
    ],
    outputs=[
        gr.TextArea(label="AI Response:", lines=10),
        gr.Label(label="Text Analysis", elem_id="analysis"),
    ],
    title="Chat with AI",
    description="Engage in a conversation with our advanced model. Customize the response using various parameters.",
    theme="default",  # Use a custom theme to override the default Gradio styling
    css=css,  # Apply the CSS styles defined earlier
    layout="vertical",
    allow_flagging="never",
)

# --- Dynamic Background ---

def update_background():
    while True:
        r = random.randint(0, 255)
        g = random.randint(0, 255)
        b = random.randint(0, 255)
        iface.root.style.background_color = f"rgb({r}, {g}, {b})"  # Set dynamic background color
        time.sleep(1)  # Update every second

# Start a separate thread to update the background color
gr.Interface.update(update_background, inputs=[], outputs=[], live=True)

# --- Analysis Logic ---

def update_analysis(response):
    analysis = analyze_text(response)
    analysis_str = f"Number of characters: {analysis['Number of characters']}\n" \
                  f"Number of words: {analysis['Number of words']}\n" \
                  f"Number of tokens: {analysis['Number of tokens']}"
    iface.update(analysis=analysis_str, live=True)  # Update analysis section with the generated data

iface.outputs[0].postprocess = update_analysis  # Update analysis after every response

iface.launch(debug=True)