File size: 2,783 Bytes
4a48aba
 
1122788
 
4a48aba
7982b4e
37cfd4a
1122788
 
fa136e4
1122788
4a48aba
1122788
d69301c
1122788
d69301c
 
1122788
 
 
 
 
d69301c
4a48aba
 
1122788
 
 
 
 
 
 
 
4cf5f47
 
 
 
 
 
 
1122788
 
ad00e70
 
 
4cf5f47
ad00e70
 
 
b88bef8
 
 
 
 
 
 
 
4cf5f47
b88bef8
ad00e70
 
 
 
 
 
 
 
 
 
 
37cfd4a
 
ad00e70
 
 
37cfd4a
ad00e70
1122788
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import time
import random

# Load the model and tokenizer
model_id = "microsoft/phi-2"  # Change to your desired model
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)

# --- Functions ---

def get_response(input_text, temperature, top_p, top_k, max_length):
    inputs = tokenizer(input_text, return_tensors="pt")
    outputs = model.generate(
        input_ids=inputs["input_ids"],
        attention_mask=inputs["attention_mask"],
        max_length=max_length,
        temperature=temperature,
        top_p=top_p,
        top_k=top_k,
    )
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return response

def analyze_text(text):
    num_tokens = len(tokenizer.tokenize(text))
    return {
        "Number of characters": len(text),
        "Number of words": len(text.split()),
        "Number of tokens": num_tokens,
    }

def update_analysis(response):
    analysis = analyze_text(response)
    analysis_str = f"Number of characters: {analysis['Number of characters']}<br>" \
                  f"Number of words: {analysis['Number of words']}<br>" \
                  f"Number of tokens: {analysis['Number of tokens']}"
    return analysis_str

# --- Interface ---

with gr.Blocks() as iface:
    gr.Markdown(
        """
        # Hajax Chat
        """
    )

    input_text = gr.Textbox(
        label="Your message:", lines=5, placeholder="Ask me anything...", show_label=True
    )
    temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=1.0, step=0.1, value=0.7)
    top_p = gr.Slider(label="Top p", minimum=0.1, maximum=1.0, step=0.1, value=0.9)
    top_k = gr.Slider(label="Top k", minimum=1, maximum=100, step=1, value=50)
    max_length = gr.Slider(label="Max length", minimum=10, maximum=1000, step=10, value=250)
    submit_button = gr.Button(value="Submit")
    response = gr.TextArea(label="Response:", lines=10)
    analysis_html = gr.HTML(elem_id="analysis")

    submit_button.click(fn=get_response, inputs=[input_text, temperature, top_p, top_k, max_length], outputs=[response])
    response.change(fn=update_analysis, inputs=[response], outputs=[analysis_html])

    # --- Dynamic Background ---

    def update_background():
        while True:
            r = random.randint(0, 255)
            g = 255  # Keep the green component constant
            b = random.randint(0, 255)
            gr.update(iface.root, value=f"rgb({r}, {g}, {b})",
                      _js="style.background_color") 
            time.sleep(1)

    # Start a separate thread to update the background color
    gr.update(update_background, inputs=[], outputs=[], live=True)

iface.launch(debug=True)