Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
import torch
|
4 |
import re
|
@@ -11,7 +11,7 @@ model = AutoModelForCausalLM.from_pretrained(model_name)
|
|
11 |
# Generation function
|
12 |
def generate_affirmation(description):
|
13 |
# Structured prompt to guide model output
|
14 |
-
prompt = f"[SUBJECT] learning [/SUBJECT] [STREAK] current performance context [/STREAK] [CONTEXT] {description} [/CONTEXT] [AFFIRMATION]
|
15 |
|
16 |
inputs = tokenizer(prompt, return_tensors="pt")
|
17 |
input_ids = inputs["input_ids"]
|
@@ -19,7 +19,7 @@ def generate_affirmation(description):
|
|
19 |
with torch.no_grad():
|
20 |
outputs = model.generate(
|
21 |
input_ids,
|
22 |
-
max_new_tokens=
|
23 |
temperature=0.7,
|
24 |
top_k=50,
|
25 |
top_p=0.95,
|
@@ -54,6 +54,8 @@ demo = gr.Interface(
|
|
54 |
if __name__ == "__main__":
|
55 |
demo.launch()
|
56 |
|
|
|
|
|
57 |
|
58 |
|
59 |
|
|
|
1 |
+
mport gradio as gr
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
import torch
|
4 |
import re
|
|
|
11 |
# Generation function
|
12 |
def generate_affirmation(description):
|
13 |
# Structured prompt to guide model output
|
14 |
+
prompt = f"[SUBJECT] learning [/SUBJECT] [STREAK] current performance context [/STREAK] [CONTEXT] {description} [/CONTEXT] [AFFIRMATION]"
|
15 |
|
16 |
inputs = tokenizer(prompt, return_tensors="pt")
|
17 |
input_ids = inputs["input_ids"]
|
|
|
19 |
with torch.no_grad():
|
20 |
outputs = model.generate(
|
21 |
input_ids,
|
22 |
+
max_new_tokens=60,
|
23 |
temperature=0.7,
|
24 |
top_k=50,
|
25 |
top_p=0.95,
|
|
|
54 |
if __name__ == "__main__":
|
55 |
demo.launch()
|
56 |
|
57 |
+
|
58 |
+
|
59 |
|
60 |
|
61 |
|