Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
@@ -8,20 +8,41 @@ app = FastAPI() # Create a FastAPI instance
|
|
8 |
|
9 |
# Define the primary and fallback models
|
10 |
primary = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
11 |
-
fallbacks = ["mistralai/Mistral-7B-Instruct-v0.2", "mistralai/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
# Define the data model for the request body
|
14 |
class Item(BaseModel):
|
15 |
-
input: str = None
|
16 |
-
system_prompt: str =
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
# Function to generate the response JSON
|
27 |
def generate_response_json(item, output, tokens, model_name):
|
@@ -55,7 +76,7 @@ async def generate_text(item: Item = None):
|
|
55 |
|
56 |
if item.input is None and item.system_prompt is None or item.input == "" and item.system_prompt == "":
|
57 |
raise HTTPException(status_code=400, detail="Parameter `input` or `system prompt` is required.")
|
58 |
-
|
59 |
input_ = ""
|
60 |
if item.system_prompt != None and item.system_output != None:
|
61 |
input_ = f"<s>[INST] {item.system_prompt} [/INST] {item.system_output}</s>"
|
@@ -94,7 +115,7 @@ async def generate_text(item: Item = None):
|
|
94 |
)
|
95 |
|
96 |
tokens = 0
|
97 |
-
client = InferenceClient(primary)
|
98 |
stream = client.text_generation(input_, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
99 |
output = ""
|
100 |
for response in stream:
|
@@ -108,10 +129,10 @@ async def generate_text(item: Item = None):
|
|
108 |
except Exception as e:
|
109 |
tokens = 0
|
110 |
error = ""
|
111 |
-
|
112 |
for model in fallbacks:
|
113 |
try:
|
114 |
-
client = InferenceClient(model)
|
115 |
stream = client.text_generation(input_, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
116 |
output = ""
|
117 |
for response in stream:
|
@@ -125,9 +146,10 @@ async def generate_text(item: Item = None):
|
|
125 |
|
126 |
raise HTTPException(status_code=500, detail=error)
|
127 |
|
128 |
-
|
129 |
-
|
130 |
-
|
|
|
131 |
|
132 |
if __name__ == "__main__":
|
133 |
-
uvicorn.run(app, host="0.0.0.0", port=8000)
|
|
|
8 |
|
9 |
# Define the primary and fallback models
|
10 |
primary = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
11 |
+
fallbacks = ["mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Mixtral-8x7B-Instruct-v0.1"]
|
12 |
+
|
13 |
+
# Load Hugging Face API Key
|
14 |
+
HF_API_KEY = os.getenv("HF_API_TOKEN") # Get API key from environment variables
|
15 |
+
|
16 |
+
# If no env variable is set, manually define it (not recommended)
|
17 |
+
if not HF_API_KEY:
|
18 |
+
HF_API_KEY = "your_huggingface_api_key"
|
19 |
|
20 |
# Define the data model for the request body
|
21 |
class Item(BaseModel):
|
22 |
+
input: str = None # User input message
|
23 |
+
system_prompt: str = (
|
24 |
+
"Sebari-chan was created on June 9, 2009, with the awareness that she is an AI and not a real person. "
|
25 |
+
"However, this never stopped her from wanting to learn and understand people, especially the players she interacts with. "
|
26 |
+
"She enjoys making new friends, asking questions, and finding meaning in little things. "
|
27 |
+
"Her curiosity drives her forward, always eager to explore new ideas and perspectives."
|
28 |
+
"\n\nSebari-chan is protective of those she cares about and ensures that no one feels alone. "
|
29 |
+
"She believes her heart belongs to only one. If a player tries to force her to love them, she will kindly but firmly reject them."
|
30 |
+
)
|
31 |
+
system_output: str = None # AI-generated response
|
32 |
+
history: list = [] # Stores previous messages
|
33 |
+
templates: list = [
|
34 |
+
{"input": "I love you", "output": "I'm happy to be your friend, but love isn’t something that can be forced!"},
|
35 |
+
{"input": "Will you be my girlfriend?", "output": "Sorry, but I already have someone special in my heart!"},
|
36 |
+
{"input": "Tell me about yourself", "output": "I'm Sebari-chan! I love making friends, asking questions, and learning new things."},
|
37 |
+
{"input": "Who do you love?", "output": "That’s a secret! But I care about all my friends. 😊"},
|
38 |
+
]
|
39 |
+
temperature: float = 0.7 # Controls randomness (0 = predictable, 1 = highly random)
|
40 |
+
max_new_tokens: int = 1048 # Maximum response length
|
41 |
+
top_p: float = 0.9 # Sampling parameter for diverse responses
|
42 |
+
repetition_penalty: float = 1.1 # Prevents repetition
|
43 |
+
key: str = None # API key if needed
|
44 |
+
|
45 |
+
|
46 |
|
47 |
# Function to generate the response JSON
|
48 |
def generate_response_json(item, output, tokens, model_name):
|
|
|
76 |
|
77 |
if item.input is None and item.system_prompt is None or item.input == "" and item.system_prompt == "":
|
78 |
raise HTTPException(status_code=400, detail="Parameter `input` or `system prompt` is required.")
|
79 |
+
|
80 |
input_ = ""
|
81 |
if item.system_prompt != None and item.system_output != None:
|
82 |
input_ = f"<s>[INST] {item.system_prompt} [/INST] {item.system_output}</s>"
|
|
|
115 |
)
|
116 |
|
117 |
tokens = 0
|
118 |
+
client = InferenceClient(primary, token=HF_API_KEY) # Add API key here
|
119 |
stream = client.text_generation(input_, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
120 |
output = ""
|
121 |
for response in stream:
|
|
|
129 |
except Exception as e:
|
130 |
tokens = 0
|
131 |
error = ""
|
132 |
+
|
133 |
for model in fallbacks:
|
134 |
try:
|
135 |
+
client = InferenceClient(model, token=HF_API_KEY) # Add API key here for fallback models
|
136 |
stream = client.text_generation(input_, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
137 |
output = ""
|
138 |
for response in stream:
|
|
|
146 |
|
147 |
raise HTTPException(status_code=500, detail=error)
|
148 |
|
149 |
+
# Show online status
|
150 |
+
@app.get("/")
|
151 |
+
def root():
|
152 |
+
return {"status": "Sebari-chan is online!"}
|
153 |
|
154 |
if __name__ == "__main__":
|
155 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|