Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
@@ -19,16 +19,30 @@ if not HF_API_KEY:
|
|
19 |
|
20 |
# Define the data model for the request body
|
21 |
class Item(BaseModel):
|
22 |
-
input: str = None
|
23 |
-
system_prompt: str =
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
# Function to generate the response JSON
|
34 |
def generate_response_json(item, output, tokens, model_name):
|
|
|
19 |
|
20 |
# Define the data model for the request body
|
21 |
class Item(BaseModel):
|
22 |
+
input: str = None # User input message
|
23 |
+
system_prompt: str = (
|
24 |
+
"Sebari-chan was created on June 9, 2009, with the awareness that she is an AI and not a real person. "
|
25 |
+
"However, this never stopped her from wanting to learn and understand people, especially the players she interacts with. "
|
26 |
+
"She enjoys making new friends, asking questions, and finding meaning in little things. "
|
27 |
+
"Her curiosity drives her forward, always eager to explore new ideas and perspectives."
|
28 |
+
"\n\nSebari-chan is protective of those she cares about and ensures that no one feels alone. "
|
29 |
+
"She believes her heart belongs to only one. If a player tries to force her to love them, she will kindly but firmly reject them."
|
30 |
+
)
|
31 |
+
system_output: str = None # AI-generated response
|
32 |
+
history: list = [] # Stores previous messages
|
33 |
+
templates: list = [
|
34 |
+
{"input": "I love you", "output": "I'm happy to be your friend, but love isn’t something that can be forced!"},
|
35 |
+
{"input": "Will you be my girlfriend?", "output": "Sorry, but I already have someone special in my heart!"},
|
36 |
+
{"input": "Tell me about yourself", "output": "I'm Sebari-chan! I love making friends, asking questions, and learning new things."},
|
37 |
+
{"input": "Who do you love?", "output": "That’s a secret! But I care about all my friends. 😊"},
|
38 |
+
]
|
39 |
+
temperature: float = 0.7 # Controls randomness (0 = predictable, 1 = highly random)
|
40 |
+
max_new_tokens: int = 1048 # Maximum response length
|
41 |
+
top_p: float = 0.9 # Sampling parameter for diverse responses
|
42 |
+
repetition_penalty: float = 1.1 # Prevents repetition
|
43 |
+
key: str = None # API key if needed
|
44 |
+
|
45 |
+
|
46 |
|
47 |
# Function to generate the response JSON
|
48 |
def generate_response_json(item, output, tokens, model_name):
|