Update app.py
Browse files
app.py
CHANGED
@@ -1,176 +1,22 @@
|
|
1 |
-
from fastapi import FastAPI
|
2 |
-
from fastapi.responses import HTMLResponse, StreamingResponse
|
3 |
import os
|
4 |
-
import
|
5 |
-
import requests
|
6 |
-
import random
|
7 |
|
8 |
app = FastAPI()
|
9 |
|
10 |
-
#
|
11 |
-
|
12 |
-
|
13 |
-
DISABLED = os.getenv("DISABLED") == 'True'
|
14 |
-
OPENAI_API_KEYS = os.getenv("OPENAI_API_KEYS", "").split(",")
|
15 |
-
NUM_THREADS = int(os.getenv("NUM_THREADS", 1))
|
16 |
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
<html lang="en">
|
21 |
-
<head>
|
22 |
-
<meta charset="UTF-8">
|
23 |
-
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
24 |
-
<title>GPT-4o Mini Chat</title>
|
25 |
-
<style>
|
26 |
-
body { font-family: Arial, sans-serif; background-color: #f4f4f4; margin: 0; padding: 20px; }
|
27 |
-
.container { max-width: 800px; margin: auto; background: white; padding: 20px; border-radius: 8px; box-shadow: 0 0 10px rgba(0, 0, 0, 0.1); }
|
28 |
-
h1 { color: #333; text-align: center; }
|
29 |
-
.chatbox { height: 400px; overflow-y: auto; border: 1px solid #ccc; padding: 10px; margin-bottom: 20px; }
|
30 |
-
.message { margin: 10px 0; padding: 8px; border-radius: 5px; }
|
31 |
-
.user { background: #e6f3ff; }
|
32 |
-
.assistant { background: #f0f0f0; }
|
33 |
-
form { display: flex; flex-direction: column; gap: 10px; }
|
34 |
-
input, select, button { padding: 8px; font-size: 16px; }
|
35 |
-
button { background-color: #007bff; color: white; border: none; border-radius: 5px; cursor: pointer; }
|
36 |
-
button:hover { background-color: #0056b3; }
|
37 |
-
button:disabled { background-color: #ccc; cursor: not-allowed; }
|
38 |
-
</style>
|
39 |
-
</head>
|
40 |
-
<body>
|
41 |
-
<div class="container">
|
42 |
-
<h1>GPT-4o Mini: Research Preview</h1>
|
43 |
-
<div id="chatbox" class="chatbox"></div>
|
44 |
-
<form id="chat-form" action="/chat" method="post">
|
45 |
-
<input type="text" id="input" name="input" placeholder="Type your message..." required>
|
46 |
-
<select name="top_p">
|
47 |
-
<option value="1.0">Top P: 1.0</option>
|
48 |
-
<option value="0.9">Top P: 0.9</option>
|
49 |
-
<option value="0.8">Top P: 0.8</option>
|
50 |
-
</select>
|
51 |
-
<select name="temperature">
|
52 |
-
<option value="1.0">Temperature: 1.0</option>
|
53 |
-
<option value="0.7">Temperature: 0.7</option>
|
54 |
-
<option value="0.3">Temperature: 0.3</option>
|
55 |
-
</select>
|
56 |
-
<button type="submit" id="submit-btn">Send</button>
|
57 |
-
</form>
|
58 |
-
</div>
|
59 |
-
<script>
|
60 |
-
const chatbox = document.getElementById("chatbox");
|
61 |
-
const form = document.getElementById("chat-form");
|
62 |
-
const input = document.getElementById("input");
|
63 |
-
const submitBtn = document.getElementById("submit-btn");
|
64 |
-
let history = JSON.parse(localStorage.getItem("chatHistory")) || [];
|
65 |
-
|
66 |
-
history.forEach(msg => addMessage(msg.role, msg.content));
|
67 |
-
|
68 |
-
form.addEventListener("submit", async (e) => {
|
69 |
-
e.preventDefault();
|
70 |
-
const userInput = input.value;
|
71 |
-
const topP = form.top_p.value;
|
72 |
-
const temperature = form.temperature.value;
|
73 |
-
|
74 |
-
addMessage("user", userInput);
|
75 |
-
input.value = "";
|
76 |
-
submitBtn.disabled = true;
|
77 |
-
|
78 |
-
const response = await fetch("/chat", {
|
79 |
-
method: "POST",
|
80 |
-
headers: { "Content-Type": "application/x-www-form-urlencoded" },
|
81 |
-
body: `input=${encodeURIComponent(userInput)}&top_p=${topP}&temperature=${temperature}`
|
82 |
-
});
|
83 |
-
|
84 |
-
const reader = response.body.getReader();
|
85 |
-
let assistantMessage = "";
|
86 |
-
const decoder = new TextDecoder();
|
87 |
-
|
88 |
-
while (true) {
|
89 |
-
const { done, value } = await reader.read();
|
90 |
-
if (done) break;
|
91 |
-
assistantMessage += decoder.decode(value);
|
92 |
-
updateLastMessage("assistant", assistantMessage);
|
93 |
-
}
|
94 |
-
|
95 |
-
history.push({ role: "user", content: userInput }, { role: "assistant", content: assistantMessage });
|
96 |
-
localStorage.setItem("chatHistory", JSON.stringify(history));
|
97 |
-
submitBtn.disabled = false;
|
98 |
-
});
|
99 |
-
|
100 |
-
function addMessage(role, content) {
|
101 |
-
const div = document.createElement("div");
|
102 |
-
div.className = `message ${role}`;
|
103 |
-
div.textContent = content;
|
104 |
-
chatbox.appendChild(div);
|
105 |
-
chatbox.scrollTop = chatbox.scrollHeight;
|
106 |
-
}
|
107 |
-
|
108 |
-
function updateLastMessage(role, content) {
|
109 |
-
const lastMsg = chatbox.lastElementChild;
|
110 |
-
if (lastMsg && lastMsg.className.includes(role)) {
|
111 |
-
lastMsg.textContent = content;
|
112 |
-
} else {
|
113 |
-
addMessage(role, content);
|
114 |
-
}
|
115 |
-
chatbox.scrollTop = chatbox.scrollHeight;
|
116 |
-
}
|
117 |
-
</script>
|
118 |
-
</body>
|
119 |
-
</html>
|
120 |
-
"""
|
121 |
-
|
122 |
-
@app.get("/", response_class=HTMLResponse)
|
123 |
-
async def home():
|
124 |
-
if DISABLED:
|
125 |
-
return "<h1 style='color:red;text-align:center'>This app has reached OpenAI's usage limit. Please check back tomorrow.</h1>"
|
126 |
-
return HTML_CONTENT
|
127 |
|
128 |
@app.post("/chat")
|
129 |
-
|
130 |
-
if
|
131 |
-
return
|
132 |
-
|
133 |
-
if not API_URL:
|
134 |
-
return StreamingResponse(iter(["Error: API_URL is not set in the environment."]), media_type="text/plain")
|
135 |
-
|
136 |
-
if not OPENAI_API_KEYS or OPENAI_API_KEYS == [""]:
|
137 |
-
return StreamingResponse(iter(["Error: No valid OPENAI_API_KEYS provided."]), media_type="text/plain")
|
138 |
-
|
139 |
-
payload = {
|
140 |
-
"model": MODEL,
|
141 |
-
"messages": [{"role": "user", "content": input}],
|
142 |
-
"temperature": temperature,
|
143 |
-
"top_p": top_p,
|
144 |
-
"n": 1,
|
145 |
-
"stream": True,
|
146 |
-
"presence_penalty": 0,
|
147 |
-
"frequency_penalty": 0,
|
148 |
-
}
|
149 |
-
|
150 |
-
OPENAI_API_KEY = random.choice(OPENAI_API_KEYS)
|
151 |
-
headers = {
|
152 |
-
"Content-Type": "application/json",
|
153 |
-
"Authorization": f"Bearer {OPENAI_API_KEY}",
|
154 |
-
}
|
155 |
-
|
156 |
-
def stream_response():
|
157 |
-
try:
|
158 |
-
response = requests.post(API_URL, headers=headers, json=payload, stream=True)
|
159 |
-
response.raise_for_status()
|
160 |
-
for chunk in response.iter_lines():
|
161 |
-
if chunk:
|
162 |
-
chunk_data = chunk.decode('utf-8')
|
163 |
-
if chunk_data.startswith("data: "):
|
164 |
-
chunk_json = json.loads(chunk_data[6:])
|
165 |
-
if "choices" in chunk_json and "delta" in chunk_json["choices"][0] and "content" in chunk_json["choices"][0]["delta"]:
|
166 |
-
yield chunk_json["choices"][0]["delta"]["content"]
|
167 |
-
except requests.exceptions.MissingSchema:
|
168 |
-
yield "Error: Invalid API_URL. Please provide a valid URL (e.g., https://api.openai.com/v1/chat/completions)."
|
169 |
-
except Exception as e:
|
170 |
-
yield f"Error: {str(e)}"
|
171 |
|
172 |
-
|
|
|
173 |
|
174 |
-
if __name__ == "__main__":
|
175 |
-
import uvicorn
|
176 |
-
uvicorn.run(app, host="0.0.0.0", port=7860)
|
|
|
1 |
+
from fastapi import FastAPI
|
|
|
2 |
import os
|
3 |
+
import ollama
|
|
|
|
|
4 |
|
5 |
app = FastAPI()
|
6 |
|
7 |
+
# Charger le modèle (vérifier si Hugging Face Spaces est prêt)
|
8 |
+
MODEL_NAME = "allenai/WildLlama-7b-user-assistant"
|
9 |
+
OLLAMA_READY = os.getenv("HF_SPACE", "false").lower() == "true"
|
|
|
|
|
|
|
10 |
|
11 |
+
@app.get("/")
|
12 |
+
def home():
|
13 |
+
return {"message": "API is running!", "model_ready": OLLAMA_READY}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
@app.post("/chat")
|
16 |
+
def chat(prompt: str):
|
17 |
+
if not OLLAMA_READY:
|
18 |
+
return {"error": "Ollama is not available on HF Spaces"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
+
response = ollama.chat(model=MODEL_NAME, messages=[{"role": "user", "content": prompt}])
|
21 |
+
return {"response": response["message"]}
|
22 |
|
|
|
|
|
|