File size: 4,766 Bytes
4a48aba
 
1122788
 
4a48aba
7982b4e
1482983
1122788
 
fa136e4
1122788
4a48aba
1122788
d69301c
1122788
d69301c
 
1122788
 
 
 
 
d69301c
4a48aba
 
1122788
 
 
 
 
 
 
 
 
 
 
 
7982b4e
 
1122788
 
 
7982b4e
 
1122788
 
7982b4e
1122788
 
 
7982b4e
1122788
 
 
 
 
 
 
 
 
 
 
 
 
7982b4e
1122788
 
 
 
7982b4e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1122788
 
4a48aba
 
1122788
 
7982b4e
 
1122788
 
 
 
 
 
7982b4e
 
 
1122788
7982b4e
 
1122788
 
 
 
4a48aba
1122788
 
 
 
 
 
7982b4e
1122788
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import time
import random

# Load the model and tokenizer
model_id = "microsoft/phi-2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)

# --- Functions ---

def get_response(input_text, temperature, top_p, top_k, max_length):
    inputs = tokenizer(input_text, return_tensors="pt")
    outputs = model.generate(
        input_ids=inputs["input_ids"],
        attention_mask=inputs["attention_mask"],
        max_length=max_length,
        temperature=temperature,
        top_p=top_p,
        top_k=top_k,
    )
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return response

def analyze_text(text):
    num_tokens = len(tokenizer.tokenize(text))
    return {
        "Number of characters": len(text),
        "Number of words": len(text.split()),
        "Number of tokens": num_tokens,
    }

# --- Interface ---

css = """
.gradio-container {
  background-color: #000000; /* Black background for the container */
  font-family: 'Roboto', sans-serif; /* Use a font similar to Nvidia's style */
}

.gradio-interface {
  background-color: rgba(0, 0, 0, 0.8); /* Translucent black background */
  border: 3px solid #00FF00; /* Green border */
  border-radius: 15px; /* Rounded corners */
  padding: 20px;
  box-shadow: 0 0 10px rgba(0, 255, 0, 0.5); /* Green shadow */
}

.gradio-button {
  background-color: #00FF00; /* Green button color */
  color: white;
  border: none;
  padding: 10px 20px;
  text-align: center;
  text-decoration: none;
  display: inline-block;
  font-size: 16px;
  margin: 4px 2px;
  cursor: pointer;
  border-radius: 5px; /* Rounded corners */
}

.gradio-button:hover {
  background-color: #00CC00; /* Darker green on hover */
}

.gradio-text-area {
  resize: vertical; /* Allow vertical resizing for text areas */
  background-color: #111111; /* Dark gray background for text areas */
  color: #00FF00; /* Green text color for text areas */
  border: 1px solid #00FF00; /* Green border for text areas */
}

.gradio-slider {
  background-color: #111111; /* Dark gray background for sliders */
  color: #00FF00; /* Green text color for sliders */
}

.gradio-slider .slider-bar {
  background-color: #00FF00; /* Green slider bar */
}

.gradio-slider .slider-thumb {
  background-color: #00FF00; /* Green slider thumb */
}

h1 {
  color: #00FF00; /* Green heading color */
  text-align: center;
}

h2 {
  color: #00FF00; /* Green subheading color */
}
"""

iface = gr.Interface(
    fn=get_response,
    inputs=[
        gr.Textbox(label="Your message:", lines=5, placeholder="Ask me anything...", show_label=True,
                   style={'background-color': '#111111', 'color': '#00FF00', 'border': '1px solid #00FF00'}),
        gr.Slider(label="Temperature", minimum=0.1, maximum=1.0, step=0.1, value=0.7),
        gr.Slider(label="Top p", minimum=0.1, maximum=1.0, step=0.1, value=0.9),
        gr.Slider(label="Top k", minimum=1, maximum=100, step=1, value=50),
        gr.Slider(label="Max length", minimum=10, maximum=1000, step=10, value=250),
    ],
    outputs=[
        gr.TextArea(label="AI Response:", lines=10,
                    style={'background-color': '#111111', 'color': '#00FF00', 'border': '1px solid #00FF00'}),
        gr.Label(label="Text Analysis", elem_id="analysis", style={'color': '#00FF00'}),
    ],
    title="NVIDIA AI Chat",
    description="Engage in a conversation with our advanced AI model. Customize the response using various parameters.",
    theme="default",  # Use a custom theme to override the default Gradio styling
    css=css,  # Apply the CSS styles defined earlier
    layout="vertical",
    allow_flagging="never",
)

# --- Dynamic Background ---

def update_background():
    while True:
        r = random.randint(0, 255)
        g = 255  # Keep the green component constant
        b = random.randint(0, 255)
        iface.root.style.background_color = f"rgb({r}, {g}, {b})"  # Set dynamic background color
        time.sleep(1)  # Update every second

# Start a separate thread to update the background color
gr.Interface.update(update_background, inputs=[], outputs=[], live=True)

# --- Analysis Logic ---

def update_analysis(response):
    analysis = analyze_text(response)
    analysis_str = f"Number of characters: {analysis['Number of characters']}\n" \
                  f"Number of words: {analysis['Number of words']}\n" \
                  f"Number of tokens: {analysis['Number of tokens']}"
    iface.update(analysis=analysis_str, live=True)  # Update analysis section with the generated data

iface.outputs[0].postprocess = update_analysis  # Update analysis after every response

iface.launch(debug=True)