mariusjabami commited on
Commit
5113576
verified
1 Parent(s): e5afc30

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -45
app.py CHANGED
@@ -2,31 +2,30 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import time
4
 
5
- # Clientes
6
- chat_client = InferenceClient("lambdaindie/lambdai")
7
- image_client = InferenceClient("stabilityai/stable-diffusion-2")
8
 
9
- # CSS com JetBrains Mono for莽ado
10
  css = """
 
 
 
 
 
 
11
  body {
12
- font-family: 'JetBrains Mono', monospace;
13
  background-color: #111;
14
  color: #e0e0e0;
15
  }
16
- .gr-textbox textarea {
17
- background-color: #181818 !important;
18
- color: #fff !important;
19
- font-family: 'JetBrains Mono', monospace;
20
- border-radius: 8px;
21
- }
22
  .markdown-think {
23
  background-color: #1e1e1e;
24
  border-left: 4px solid #555;
25
  padding: 10px;
26
  margin-bottom: 8px;
27
  font-style: italic;
 
28
  animation: pulse 1.5s infinite ease-in-out;
29
  }
 
30
  @keyframes pulse {
31
  0% { opacity: 0.6; }
32
  50% { opacity: 1.0; }
@@ -43,14 +42,15 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
43
  if assistant:
44
  messages.append({"role": "assistant", "content": assistant})
45
 
46
- thinking_prompt = messages + [
47
- {"role": "user", "content": f"{message}\n\nThink step-by-step before answering."}
48
- ]
 
49
 
50
  reasoning = ""
51
  yield '<div class="markdown-think">Thinking...</div>'
52
 
53
- for chunk in chat_client.chat_completion(
54
  thinking_prompt,
55
  max_tokens=max_tokens,
56
  stream=True,
@@ -59,7 +59,8 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
59
  ):
60
  token = chunk.choices[0].delta.content or ""
61
  reasoning += token
62
- yield f'<div class="markdown-think">{reasoning.strip()}</div>'
 
63
 
64
  time.sleep(0.5)
65
 
@@ -70,7 +71,7 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
70
  ]
71
 
72
  final_answer = ""
73
- for chunk in chat_client.chat_completion(
74
  final_prompt,
75
  max_tokens=max_tokens,
76
  stream=True,
@@ -81,34 +82,19 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
81
  final_answer += token
82
  yield final_answer.strip()
83
 
84
- def generate_image(prompt):
85
- return image_client.text_to_image(prompt, guidance_scale=7.5)
86
-
87
- # Interface
88
- with gr.Blocks(css=css) as demo:
89
- gr.Markdown("# 位mabdAI")
90
-
91
- with gr.Tabs():
92
- with gr.Tab("Chat"):
93
- gr.ChatInterface(
94
- fn=respond,
95
- additional_inputs=[
96
- gr.Textbox(
97
- value="You are a concise, logical AI that explains its reasoning clearly before answering.",
98
- label="System Message"
99
- ),
100
- gr.Slider(64, 2048, value=512, step=1, label="Max Tokens"),
101
- gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Temperature"),
102
- gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
103
- ]
104
- )
105
-
106
- with gr.Tab("Image Generator"):
107
- gr.Markdown("### Generate an image from a prompt")
108
- prompt = gr.Textbox(label="Prompt")
109
- output = gr.Image(type="pil")
110
- btn = gr.Button("Generate")
111
- btn.click(fn=generate_image, inputs=prompt, outputs=output)
112
 
113
  if __name__ == "__main__":
114
  demo.launch()
 
2
  from huggingface_hub import InferenceClient
3
  import time
4
 
5
+ client = InferenceClient("lambdaindie/lambdai")
 
 
6
 
 
7
  css = """
8
+ @import url('https://fonts.googleapis.com/css2?family=JetBrains+Mono&display=swap');
9
+
10
+ * {
11
+ font-family: 'JetBrains Mono', monospace !important;
12
+ }
13
+
14
  body {
 
15
  background-color: #111;
16
  color: #e0e0e0;
17
  }
18
+
 
 
 
 
 
19
  .markdown-think {
20
  background-color: #1e1e1e;
21
  border-left: 4px solid #555;
22
  padding: 10px;
23
  margin-bottom: 8px;
24
  font-style: italic;
25
+ white-space: pre-wrap;
26
  animation: pulse 1.5s infinite ease-in-out;
27
  }
28
+
29
  @keyframes pulse {
30
  0% { opacity: 0.6; }
31
  50% { opacity: 1.0; }
 
42
  if assistant:
43
  messages.append({"role": "assistant", "content": assistant})
44
 
45
+ thinking_prompt = messages + [{
46
+ "role": "user",
47
+ "content": f"{message}\n\nThinking step-by-step before answering."
48
+ }]
49
 
50
  reasoning = ""
51
  yield '<div class="markdown-think">Thinking...</div>'
52
 
53
+ for chunk in client.chat_completion(
54
  thinking_prompt,
55
  max_tokens=max_tokens,
56
  stream=True,
 
59
  ):
60
  token = chunk.choices[0].delta.content or ""
61
  reasoning += token
62
+ styled_thought = f'<div class="markdown-think">{reasoning.strip()}</div>'
63
+ yield styled_thought
64
 
65
  time.sleep(0.5)
66
 
 
71
  ]
72
 
73
  final_answer = ""
74
+ for chunk in client.chat_completion(
75
  final_prompt,
76
  max_tokens=max_tokens,
77
  stream=True,
 
82
  final_answer += token
83
  yield final_answer.strip()
84
 
85
+ demo = gr.ChatInterface(
86
+ fn=respond,
87
+ title="位ambdAI",
88
+ theme=gr.themes.Base(),
89
+ css=css,
90
+ additional_inputs=[
91
+ gr.Textbox(value="You are a concise, logical AI that explains its reasoning clearly before answering.",
92
+ label="System Message"),
93
+ gr.Slider(64, 2048, value=512, step=1, label="Max Tokens"),
94
+ gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Temperature"),
95
+ gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
96
+ ]
97
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
 
99
  if __name__ == "__main__":
100
  demo.launch()