freddyaboulton HF Staff commited on
Commit
8ce83a6
·
verified ·
1 Parent(s): 628a116

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +7 -7
  2. run.ipynb +1 -0
  3. run.py +49 -0
README.md CHANGED
@@ -1,12 +1,12 @@
 
1
  ---
2
- title: Chatinterface Thoughts
3
- emoji: 👀
4
- colorFrom: red
5
- colorTo: pink
6
  sdk: gradio
7
  sdk_version: 5.12.0
8
- app_file: app.py
9
  pinned: false
 
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+
2
  ---
3
+ title: chatinterface_thoughts
4
+ emoji: 🔥
5
+ colorFrom: indigo
6
+ colorTo: indigo
7
  sdk: gradio
8
  sdk_version: 5.12.0
9
+ app_file: run.py
10
  pinned: false
11
+ hf_oauth: true
12
  ---
 
 
run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatinterface_thoughts"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from gradio import ChatMessage\n", "import time\n", "\n", "sleep_time = 0.5\n", "\n", "def simulate_thinking_chat(message, history):\n", " start_time = time.time()\n", " response = ChatMessage(\n", " content=\"\",\n", " metadata={\"title\": \"_Thinking_ step-by-step\", \"id\": 0, \"status\": \"pending\"}\n", " )\n", " yield response\n", "\n", " thoughts = [\n", " \"First, I need to understand the core aspects of the query...\",\n", " \"Now, considering the broader context and implications...\",\n", " \"Analyzing potential approaches to formulate a comprehensive answer...\",\n", " \"Finally, structuring the response for clarity and completeness...\"\n", " ]\n", "\n", " accumulated_thoughts = \"\"\n", " for thought in thoughts:\n", " time.sleep(sleep_time)\n", " accumulated_thoughts += f\"- {thought}\\n\\n\"\n", " response.content = accumulated_thoughts.strip()\n", " yield response\n", "\n", " response.metadata[\"status\"] = \"done\"\n", " response.metadata[\"duration\"] = time.time() - start_time\n", " yield response\n", "\n", " response = [\n", " response,\n", " ChatMessage(\n", " content=\"Based on my thoughts and analysis above, my response is: This dummy repro shows how thoughts of a thinking LLM can be progressively shown before providing its final answer.\"\n", " )\n", " ]\n", " yield response\n", "\n", "\n", "demo = gr.ChatInterface(\n", " simulate_thinking_chat,\n", " title=\"Thinking LLM Chat Interface \ud83e\udd14\",\n", " type=\"messages\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
run.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio import ChatMessage
3
+ import time
4
+
5
+ sleep_time = 0.5
6
+
7
+ def simulate_thinking_chat(message, history):
8
+ start_time = time.time()
9
+ response = ChatMessage(
10
+ content="",
11
+ metadata={"title": "_Thinking_ step-by-step", "id": 0, "status": "pending"}
12
+ )
13
+ yield response
14
+
15
+ thoughts = [
16
+ "First, I need to understand the core aspects of the query...",
17
+ "Now, considering the broader context and implications...",
18
+ "Analyzing potential approaches to formulate a comprehensive answer...",
19
+ "Finally, structuring the response for clarity and completeness..."
20
+ ]
21
+
22
+ accumulated_thoughts = ""
23
+ for thought in thoughts:
24
+ time.sleep(sleep_time)
25
+ accumulated_thoughts += f"- {thought}\n\n"
26
+ response.content = accumulated_thoughts.strip()
27
+ yield response
28
+
29
+ response.metadata["status"] = "done"
30
+ response.metadata["duration"] = time.time() - start_time
31
+ yield response
32
+
33
+ response = [
34
+ response,
35
+ ChatMessage(
36
+ content="Based on my thoughts and analysis above, my response is: This dummy repro shows how thoughts of a thinking LLM can be progressively shown before providing its final answer."
37
+ )
38
+ ]
39
+ yield response
40
+
41
+
42
+ demo = gr.ChatInterface(
43
+ simulate_thinking_chat,
44
+ title="Thinking LLM Chat Interface 🤔",
45
+ type="messages",
46
+ )
47
+
48
+ if __name__ == "__main__":
49
+ demo.launch()