File size: 1,710 Bytes
eb450e3 968b137 eb450e3 3cfecb5 fa8b0f1 e86214a 3cfecb5 fa8b0f1 e3c453c 3cfecb5 e3c453c 3cfecb5 fa8b0f1 3cfecb5 e3c453c 3cfecb5 6e60b60 3cfecb5 eb450e3 3cfecb5 09742af 3cfecb5 eb450e3 3cfecb5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
import gradio as gr
from huggingface_hub import InferenceClient
client = InferenceClient("lambdaindie/lambdai")
css = """
.thinking-html {
display: flex;
align-items: center;
gap: 8px;
color: #666;
font-style: italic;
margin-bottom: 5px;
animation: pulse 1.5s infinite;
}
.loader {
width: 14px;
height: 14px;
border: 2px solid #ccc;
border-top: 2px solid #666;
border-radius: 50%;
animation: spin 1s linear infinite;
}
@keyframes spin {
to { transform: rotate(360deg); }
}
@keyframes pulse {
0% { opacity: 1; transform: scale(1); }
50% { opacity: 0.6; transform: scale(1.05); }
100% { opacity: 1; transform: scale(1); }
}
"""
def respond(message, history):
thinking = (
"<div class='thinking-html'>"
"<div class='loader'></div>"
"Thinking..."
"</div>"
)
yield history + [[message, thinking]]
prompt = f"Think step by step and explain your reasoning before answering:\n\n{message}"
response = client.chat_completion([{"role": "user", "content": prompt}], stream=False)
output = response['choices'][0]['message']['content']
if "\n\n" in output:
reasoning, answer = output.split("\n\n", 1)
else:
reasoning, answer = "No reasoning provided.", output
reasoning_md = f"> {reasoning.strip()}"
final = f"{reasoning_md}\n\n{answer.strip()}"
yield history + [[message, final]]
with gr.Blocks(css=css) as demo:
gr.Markdown("## Lambdai-v1-1B")
chatbot = gr.Chatbot()
msg = gr.Textbox(label="Message")
send = gr.Button("Send")
send.click(respond, [msg, chatbot], chatbot)
msg.submit(respond, [msg, chatbot], chatbot)
demo.launch() |