Spaces:
Sleeping
Sleeping
File size: 6,046 Bytes
9d47737 96d4cba 9d47737 96d4cba 9d47737 96d4cba 9d47737 96d4cba 9d47737 96d4cba 9d47737 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
from openai import AsyncAssistantEventHandler
from openai import AsyncOpenAI
import gradio as gr
import asyncio
# Set your OpenAI API key here
client = AsyncOpenAI(
api_key="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"
)
assistantID = "asst_pMk1lyBSaVZPulq44RvIJUNe"
mytitle = "<h1 align=center>Wat hunn d'Lëtzebuerger an de leschte Jore kommentéiert ?</h1>"
mydescription="""
<h3 align='center'>Wat fir een Thema interesséiert Dech : 👍 👎 🤛 ☝️ </h3>
<table width=100%>
<tr>
<th width=50% bgcolor="Moccasin">Stell deng Fro op englesch, fir einfach Froe versteet d'AI och Lëtzebuergesch !</th>
<th bgcolor="Khaki">Äntwert vum OpenAI Code-Interpreter Assistent :</th>
</tr>
</table>
"""
myarticle ="""
<h3>Hannergrënn :</h3>
<p>Dës HuggingFace Space Demo gouf vum <a href="https://github.com/mbarnig">Marco Barnig</a> realiséiert. Als kënstlech Intelligenz gëtt, mëttels API, den <a href="https://platform.openai.com/docs/models">OpenAI Modell</a> gpt-4o-mini-2024-07-18 benotzt, deen als Kontext bis 128.000 Tokens ka benotzen, eng Äntwert op eng Fro vu maximal 16.384 Tokens ka ginn a bis zu 200.000 Tokens pro Minutt (TPM) ka beaarbechten. Fir dës Demo goufen nëmmen eng News-JSON-Datei mat enger Gréisst vun 30 MB benotzt. Et ass méiglech bis zu 20 Dateien op en OpenAI Code-Interpreter Assistent opzelueden. D'Äntwerte vun de Beispiller sinn am Cache gespäichert a ginn duerfir ouni Delai ugewise.</p>
"""
myinput = gr.Textbox(lines=3, label="Wat interesséiert Dech ?")
myexamples = [
"Wat fir ee Kommentar krut déi meescht 👍 ?",
"Wat fir ee Kommentar krut déi meescht 👎 ?",
"Show me a random comment !",
"Please show a comment with 2 👍 and 2 👎 !"
]
class EventHandler(AsyncAssistantEventHandler):
def __init__(self) -> None:
super().__init__()
self.response_text = ""
async def on_text_created(self, text) -> None:
self.response_text += str(text)
async def on_text_delta(self, delta, snapshot):
self.response_text += str(delta.value)
async def on_text_done(self, text):
pass
async def on_tool_call_created(self, tool_call):
self.response_text += f"\n[Tool Call]: {str(tool_call.type)}\n"
async def on_tool_call_delta(self, delta, snapshot):
if snapshot.id != getattr(self, "current_tool_call", None):
self.current_tool_call = snapshot.id
self.response_text += f"\n[Tool Call Delta]: {str(delta.type)}\n"
if delta.type == 'code_interpreter':
if delta.code_interpreter.input:
self.response_text += str(delta.code_interpreter.input)
if delta.code_interpreter.outputs:
self.response_text += "\n\n[Output]:\n"
for output in delta.code_interpreter.outputs:
if output.type == "logs":
self.response_text += f"\n{str(output.logs)}"
async def on_tool_call_done(self, text):
pass
# Initialize session variables
session_data = {"assistant_id": assistantID, "thread_id": None}
async def initialize_thread():
# Create a Thread
thread = await client.beta.threads.create()
# Store thread ID in session_data for later use
session_data["thread_id"] = thread.id
async def generate_response(user_input):
assistant_id = session_data["assistant_id"]
thread_id = session_data["thread_id"]
# Add a Message to the Thread
oai_message = await client.beta.threads.messages.create(
thread_id=thread_id,
role="user",
content=user_input
)
# Create and Stream a Run
event_handler = EventHandler()
async with client.beta.threads.runs.stream(
thread_id=thread_id,
assistant_id=assistant_id,
instructions="""
You are a Code Interpreter to analyze JSON files with RTL comments. Here is the format of the JSON files :
[
{
"context_id": "",
"date_created": "",
"text": " ",
"user_id": "",
"referer": "",
"status": ",
"thumbs": [
{
"user_id": "",
"score": "up",
"date": ""
},
{
"user_id": "",
"score": "down",
"date": ""
},
}
]
You will search dates ("date_created" of a comment and "date" of the related thumbs), calculate the total number of "thumbs":[{"score": "up"},{"score": "down"}] and answer questions about "context_id", "text" and "referers". A 👍 means "thumbs":[{"score": "up"}], a 👎 means "thumbs":[{"score": "down"}]. Please indicate in all responses the number of thumbs. Please provide your answers in luxembourgish language.
"""
,
event_handler=event_handler,
) as stream:
# Yield incremental updates
async for _ in stream:
await asyncio.sleep(0.1) # Small delay to mimic streaming
yield event_handler.response_text
# Gradio interface function (generator)
async def gradio_chat_interface(user_input):
# Create a new event loop if none exists (or if we are in a new thread)
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# Initialize the thread if not already done
if session_data["thread_id"] is None:
await initialize_thread()
# Generate and yield responses
async for response in generate_response(user_input):
yield response
# Set up Gradio interface with streaming
interface = gr.Interface(
fn=gradio_chat_interface,
inputs=myinput,
outputs="markdown",
title=mytitle,
description=mydescription,
article=myarticle,
live=False,
allow_flagging="never",
examples=myexamples
)
# Launch the Gradio app
interface.launch() |