lambdai / app.py
mariusjabami's picture
Update app.py
3cfecb5 verified
raw
history blame
1.71 kB
import gradio as gr
from huggingface_hub import InferenceClient
client = InferenceClient("lambdaindie/lambdai")
css = """
.thinking-html {
display: flex;
align-items: center;
gap: 8px;
color: #666;
font-style: italic;
margin-bottom: 5px;
animation: pulse 1.5s infinite;
}
.loader {
width: 14px;
height: 14px;
border: 2px solid #ccc;
border-top: 2px solid #666;
border-radius: 50%;
animation: spin 1s linear infinite;
}
@keyframes spin {
to { transform: rotate(360deg); }
}
@keyframes pulse {
0% { opacity: 1; transform: scale(1); }
50% { opacity: 0.6; transform: scale(1.05); }
100% { opacity: 1; transform: scale(1); }
}
"""
def respond(message, history):
thinking = (
"<div class='thinking-html'>"
"<div class='loader'></div>"
"Thinking..."
"</div>"
)
yield history + [[message, thinking]]
prompt = f"Think step by step and explain your reasoning before answering:\n\n{message}"
response = client.chat_completion([{"role": "user", "content": prompt}], stream=False)
output = response['choices'][0]['message']['content']
if "\n\n" in output:
reasoning, answer = output.split("\n\n", 1)
else:
reasoning, answer = "No reasoning provided.", output
reasoning_md = f"> {reasoning.strip()}"
final = f"{reasoning_md}\n\n{answer.strip()}"
yield history + [[message, final]]
with gr.Blocks(css=css) as demo:
gr.Markdown("## Lambdai-v1-1B")
chatbot = gr.Chatbot()
msg = gr.Textbox(label="Message")
send = gr.Button("Send")
send.click(respond, [msg, chatbot], chatbot)
msg.submit(respond, [msg, chatbot], chatbot)
demo.launch()