Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,64 +1,71 @@
|
|
1 |
import gradio as gr
|
2 |
-
from
|
|
|
3 |
|
4 |
-
|
5 |
-
|
6 |
-
"""
|
7 |
-
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
8 |
|
|
|
|
|
9 |
|
10 |
-
def
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
max_tokens,
|
15 |
-
temperature,
|
16 |
-
top_p,
|
17 |
-
):
|
18 |
-
messages = [{"role": "system", "content": system_message}]
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
-
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
stream=True,
|
34 |
-
temperature=temperature,
|
35 |
-
top_p=top_p,
|
36 |
-
):
|
37 |
-
token = message.choices[0].delta.content
|
38 |
|
39 |
-
|
40 |
-
|
|
|
41 |
|
|
|
|
|
42 |
|
43 |
-
""
|
44 |
-
|
45 |
-
""
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
gr.Slider(
|
53 |
-
minimum=0.1,
|
54 |
-
maximum=1.0,
|
55 |
-
value=0.95,
|
56 |
-
step=0.05,
|
57 |
-
label="Top-p (nucleus sampling)",
|
58 |
-
),
|
59 |
-
],
|
60 |
-
)
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
if __name__ == "__main__":
|
64 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import pipeline
|
3 |
+
import uuid
|
4 |
|
5 |
+
# Use Hugging Face hosted pipeline (won't need to load heavy model)
|
6 |
+
generator = pipeline("text-generation", model="mistralai/Mistral-7B-Instruct-v0.1")
|
|
|
|
|
7 |
|
8 |
+
# Session state store
|
9 |
+
user_sessions = {}
|
10 |
|
11 |
+
def get_session(session_id):
|
12 |
+
if session_id not in user_sessions:
|
13 |
+
user_sessions[session_id] = {"chat": []}
|
14 |
+
return user_sessions[session_id]
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
+
def build_prompt(history):
|
17 |
+
prompt = "You are a helpful assistant.\n"
|
18 |
+
for turn in history:
|
19 |
+
prompt += f"User: {turn[0]}\nAssistant: {turn[1]}\n"
|
20 |
+
return prompt
|
21 |
|
22 |
+
def chat_fn(user_input, session_id):
|
23 |
+
session = get_session(session_id)
|
24 |
+
history = session["chat"]
|
25 |
+
prompt = build_prompt(history + [[user_input, ""]]) + "Assistant:"
|
26 |
+
result = generator(prompt, max_new_tokens=128, do_sample=True, temperature=0.7)[0]["generated_text"]
|
27 |
+
reply = result[len(prompt):].strip().split("\n")[0]
|
28 |
+
history.append([user_input, reply])
|
29 |
+
return history, session_id
|
30 |
|
31 |
+
def upload_handler(file):
|
32 |
+
try:
|
33 |
+
content = file.read().decode("utf-8")
|
34 |
+
return content
|
35 |
+
except Exception as e:
|
36 |
+
return f"Error: {str(e)}"
|
37 |
|
38 |
+
def code_viewer(code):
|
39 |
+
# Simulate code output
|
40 |
+
return "Code received. Execution is disabled on this free demo for safety."
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
+
# Create unique session ID
|
43 |
+
def create_session():
|
44 |
+
return str(uuid.uuid4())
|
45 |
|
46 |
+
with gr.Blocks(title="Multi-Tool AI Chatbot") as demo:
|
47 |
+
session_id_state = gr.State(value=create_session())
|
48 |
|
49 |
+
with gr.Tab("Chatbot"):
|
50 |
+
chatbot = gr.Chatbot()
|
51 |
+
msg = gr.Textbox(label="Your message")
|
52 |
+
send = gr.Button("Send")
|
53 |
+
send.click(
|
54 |
+
fn=lambda msg, sid: chat_fn(msg, sid),
|
55 |
+
inputs=[msg, session_id_state],
|
56 |
+
outputs=[chatbot, session_id_state]
|
57 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
+
with gr.Tab("File Reader"):
|
60 |
+
file_input = gr.File(label="Upload .txt file", file_types=[".txt"])
|
61 |
+
file_output = gr.Textbox(label="File content")
|
62 |
+
file_input.change(upload_handler, inputs=file_input, outputs=file_output)
|
63 |
+
|
64 |
+
with gr.Tab("Code Viewer"):
|
65 |
+
code_input = gr.Code(language="python", label="Enter Python code")
|
66 |
+
code_output = gr.Textbox(label="Output")
|
67 |
+
run_btn = gr.Button("View Output")
|
68 |
+
run_btn.click(fn=code_viewer, inputs=code_input, outputs=code_output)
|
69 |
|
70 |
if __name__ == "__main__":
|
71 |
demo.launch()
|