Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -20,21 +20,9 @@ def fetch_message():
|
|
20 |
if not message:
|
21 |
return jsonify({"error": "No input provided."}), 400
|
22 |
|
23 |
-
# Model parameters
|
24 |
-
parameters = {
|
25 |
-
"temperature": 0.7, # Adjust creativity
|
26 |
-
"top_p": 0.9, # Top-p sampling
|
27 |
-
"max_new_tokens": 200, # Limit response length
|
28 |
-
"do_sample": True # Enable sampling for varied responses
|
29 |
-
}
|
30 |
-
|
31 |
# Process the message using the Hugging Face model
|
32 |
try:
|
33 |
-
response = client.text_generation(
|
34 |
-
message,
|
35 |
-
parameters=parameters,
|
36 |
-
timeout=300 # 5 minutes timeout
|
37 |
-
)
|
38 |
return jsonify({"response": response})
|
39 |
except Exception as e:
|
40 |
return jsonify({"error": str(e)}), 500
|
@@ -42,4 +30,5 @@ def fetch_message():
|
|
42 |
if __name__ == "__main__":
|
43 |
# Use PORT environment variable or default to 7860
|
44 |
port = int(os.getenv("PORT", 7860))
|
45 |
-
|
|
|
|
20 |
if not message:
|
21 |
return jsonify({"error": "No input provided."}), 400
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
# Process the message using the Hugging Face model
|
24 |
try:
|
25 |
+
response = client.text_generation(message)
|
|
|
|
|
|
|
|
|
26 |
return jsonify({"response": response})
|
27 |
except Exception as e:
|
28 |
return jsonify({"error": str(e)}), 500
|
|
|
30 |
if __name__ == "__main__":
|
31 |
# Use PORT environment variable or default to 7860
|
32 |
port = int(os.getenv("PORT", 7860))
|
33 |
+
ap
|
34 |
+
p.run(host="0.0.0.0", port=port)
|