Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,23 +1,13 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
"""HFΒ Space for the *Marinβ8BβInstruct* research preview
|
3 |
-
-----------------------------------------------------
|
4 |
-
A lightweight Gradio interface that
|
5 |
-
β’ streams chat completions from the `marin-community/marin-8b-instruct` model
|
6 |
-
β’ lets testers submit structured feedback (UX ratingΒ + freeβtext)
|
7 |
-
β’ appends feedback to a local JSONL *and* merges it into a private Hub dataset
|
8 |
-
The dataset is never overwritten: we always pull, merge, deduplicate, and push.
|
9 |
-
"""
|
10 |
-
|
11 |
from __future__ import annotations
|
12 |
|
13 |
-
#
|
14 |
import json
|
15 |
import os
|
16 |
import time
|
17 |
import uuid
|
18 |
from threading import Thread
|
19 |
|
20 |
-
#
|
21 |
import gradio as gr
|
22 |
from gradio_modal import Modal
|
23 |
from transformers import (
|
@@ -29,7 +19,7 @@ from datasets import Dataset, load_dataset, concatenate_datasets, DownloadMode
|
|
29 |
from huggingface_hub import HfApi, login
|
30 |
import spaces
|
31 |
|
32 |
-
# ββββββββββββββββββββββββββββ model
|
33 |
checkpoint = "marin-community/marin-8b-instruct"
|
34 |
device = "cuda" # the Space runner gives us a GPU
|
35 |
|
@@ -37,8 +27,8 @@ device = "cuda" # the Space runner gives us a GPU
|
|
37 |
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
38 |
model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)
|
39 |
|
40 |
-
# feedback
|
41 |
-
DATASET_REPO = "WillHeld/model-feedback" #
|
42 |
DATA_DIR = "./feedback_data"
|
43 |
DATA_FILE = "feedback.jsonl"
|
44 |
os.makedirs(DATA_DIR, exist_ok=True)
|
@@ -57,7 +47,7 @@ def save_feedback_locally(conversation: list[dict[str, str]],
|
|
57 |
"feedback": feedback_text,
|
58 |
}
|
59 |
fp = os.path.join(DATA_DIR, DATA_FILE)
|
60 |
-
with open(fp, "a", encoding="utf
|
61 |
f.write(json.dumps(record, ensure_ascii=False) + "\n")
|
62 |
return record["id"]
|
63 |
|
@@ -70,7 +60,7 @@ def push_feedback_to_hub(hf_token: str | None = None) -> bool: # noqa: C901
|
|
70 |
1. Authenticate with `hf_token` (fall back to $HF_TOKEN env).
|
71 |
2. Load *local* feedback just written in `feedback.jsonl`.
|
72 |
3. Pull existing remote split (if any); concat & `unique("id")`.
|
73 |
-
4. Push the merged dataset back.
|
74 |
"""
|
75 |
|
76 |
hf_token = hf_token or os.getenv("HF_TOKEN")
|
@@ -85,7 +75,7 @@ def push_feedback_to_hub(hf_token: str | None = None) -> bool: # noqa: C901
|
|
85 |
return False
|
86 |
|
87 |
# local rows β Dataset
|
88 |
-
with open(fp, encoding="utf
|
89 |
local_ds = Dataset.from_list([json.loads(l) for l in f])
|
90 |
|
91 |
# try to pull remote
|
@@ -153,7 +143,7 @@ def generate_response(message: str,
|
|
153 |
partial = ""
|
154 |
for token in streamer:
|
155 |
partial += token
|
156 |
-
yield partial, history # 1st
|
157 |
|
158 |
# once finished, commit assistant reply to history
|
159 |
history.append({"role": "assistant", "content": partial})
|
@@ -168,12 +158,12 @@ def submit_feedback(conversation_state: list[dict[str, str]],
|
|
168 |
save_feedback_locally(conversation_state, satisfaction, feedback_text)
|
169 |
pushed = push_feedback_to_hub()
|
170 |
if pushed:
|
171 |
-
return "β
Thanks!
|
172 |
return "β οΈ Saved locally; Hub push failed. Check server logs."
|
173 |
|
174 |
# ββββββββββββββββββββββββββββ UI layout ββββββββββββββββββββββββββββββββββββ
|
175 |
|
176 |
-
with gr.Blocks(title="Marin
|
177 |
# state object to surface chat history to the feedback form
|
178 |
conversation_state = gr.State([])
|
179 |
|
@@ -186,7 +176,7 @@ with gr.Blocks(title="Marinβ8B Research Preview") as demo:
|
|
186 |
gr.Slider(0.1, 2.0, value=0.7, step=0.1,
|
187 |
label="Temperature"),
|
188 |
gr.Slider(0.1, 1.0, value=0.9, step=0.05,
|
189 |
-
label="Top
|
190 |
additional_outputs=[conversation_state],
|
191 |
type="messages",
|
192 |
)
|
@@ -198,7 +188,7 @@ with gr.Blocks(title="Marinβ8B Research Preview") as demo:
|
|
198 |
# feedback modal (hidden by default)
|
199 |
with Modal(visible=False) as fb_modal:
|
200 |
gr.Markdown("## Research Preview Feedback")
|
201 |
-
gr.Markdown("We appreciate your help improving Marin
|
202 |
|
203 |
sat_radio = gr.Radio([
|
204 |
"Very satisfied", "Satisfied", "Neutral",
|
@@ -211,8 +201,8 @@ with gr.Blocks(title="Marinβ8B Research Preview") as demo:
|
|
211 |
status_box = gr.Textbox(label="Status", interactive=False)
|
212 |
|
213 |
# interactions
|
214 |
-
|
215 |
-
|
216 |
|
217 |
send_btn.click(
|
218 |
submit_feedback,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from __future__ import annotations
|
2 |
|
3 |
+
# -- standard lib
|
4 |
import json
|
5 |
import os
|
6 |
import time
|
7 |
import uuid
|
8 |
from threading import Thread
|
9 |
|
10 |
+
# -- third-party deps (declared in requirements.txt of the Space)
|
11 |
import gradio as gr
|
12 |
from gradio_modal import Modal
|
13 |
from transformers import (
|
|
|
19 |
from huggingface_hub import HfApi, login
|
20 |
import spaces
|
21 |
|
22 |
+
# ββββββββββββββββββββββββββββ model & constants βββββββββββββββββββββββββββββ
|
23 |
checkpoint = "marin-community/marin-8b-instruct"
|
24 |
device = "cuda" # the Space runner gives us a GPU
|
25 |
|
|
|
27 |
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
28 |
model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)
|
29 |
|
30 |
+
# feedback dataset details
|
31 |
+
DATASET_REPO = "WillHeld/model-feedback" # <-- change to your namespace if needed
|
32 |
DATA_DIR = "./feedback_data"
|
33 |
DATA_FILE = "feedback.jsonl"
|
34 |
os.makedirs(DATA_DIR, exist_ok=True)
|
|
|
47 |
"feedback": feedback_text,
|
48 |
}
|
49 |
fp = os.path.join(DATA_DIR, DATA_FILE)
|
50 |
+
with open(fp, "a", encoding="utf-8") as f:
|
51 |
f.write(json.dumps(record, ensure_ascii=False) + "\n")
|
52 |
return record["id"]
|
53 |
|
|
|
60 |
1. Authenticate with `hf_token` (fall back to $HF_TOKEN env).
|
61 |
2. Load *local* feedback just written in `feedback.jsonl`.
|
62 |
3. Pull existing remote split (if any); concat & `unique("id")`.
|
63 |
+
4. Push the merged dataset back. Never deletes remote shards β safe.
|
64 |
"""
|
65 |
|
66 |
hf_token = hf_token or os.getenv("HF_TOKEN")
|
|
|
75 |
return False
|
76 |
|
77 |
# local rows β Dataset
|
78 |
+
with open(fp, encoding="utf-8") as f:
|
79 |
local_ds = Dataset.from_list([json.loads(l) for l in f])
|
80 |
|
81 |
# try to pull remote
|
|
|
143 |
partial = ""
|
144 |
for token in streamer:
|
145 |
partial += token
|
146 |
+
yield partial, history # 1st out = msg, 2nd out = state
|
147 |
|
148 |
# once finished, commit assistant reply to history
|
149 |
history.append({"role": "assistant", "content": partial})
|
|
|
158 |
save_feedback_locally(conversation_state, satisfaction, feedback_text)
|
159 |
pushed = push_feedback_to_hub()
|
160 |
if pushed:
|
161 |
+
return "β
Thanks! Your feedback is safely stored."
|
162 |
return "β οΈ Saved locally; Hub push failed. Check server logs."
|
163 |
|
164 |
# ββββββββββββββββββββββββββββ UI layout ββββββββββββββββββββββββββββββββββββ
|
165 |
|
166 |
+
with gr.Blocks(title="Marin-8B Research Preview") as demo:
|
167 |
# state object to surface chat history to the feedback form
|
168 |
conversation_state = gr.State([])
|
169 |
|
|
|
176 |
gr.Slider(0.1, 2.0, value=0.7, step=0.1,
|
177 |
label="Temperature"),
|
178 |
gr.Slider(0.1, 1.0, value=0.9, step=0.05,
|
179 |
+
label="Top-P")],
|
180 |
additional_outputs=[conversation_state],
|
181 |
type="messages",
|
182 |
)
|
|
|
188 |
# feedback modal (hidden by default)
|
189 |
with Modal(visible=False) as fb_modal:
|
190 |
gr.Markdown("## Research Preview Feedback")
|
191 |
+
gr.Markdown("We appreciate your help improving Marin-8B! β¨")
|
192 |
|
193 |
sat_radio = gr.Radio([
|
194 |
"Very satisfied", "Satisfied", "Neutral",
|
|
|
201 |
status_box = gr.Textbox(label="Status", interactive=False)
|
202 |
|
203 |
# interactions
|
204 |
+
# open the modal without custom JS β use Modal update
|
205 |
+
report_btn.click(lambda: Modal(visible=True), None, fb_modal)
|
206 |
|
207 |
send_btn.click(
|
208 |
submit_feedback,
|