Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,103 +1,50 @@
|
|
1 |
import os
|
2 |
import logging
|
3 |
-
|
4 |
import g4f
|
5 |
from g4f.client import Client
|
6 |
-
import os
|
7 |
-
import logging
|
8 |
-
from flask import Flask, render_template, request, jsonify
|
9 |
-
import g4f
|
10 |
-
from g4f.client import Client
|
11 |
-
|
12 |
-
|
13 |
-
# Configure logging
|
14 |
|
15 |
# Configure logging
|
16 |
logging.basicConfig(level=logging.DEBUG)
|
17 |
logger = logging.getLogger(__name__)
|
18 |
|
19 |
-
# Create Flask app
|
20 |
-
app = Flask(__name__)
|
21 |
-
app.secret_key = os.environ.get("SESSION_SECRET", "default_secret_key")
|
22 |
-
|
23 |
# Initialize g4f client
|
24 |
client = Client()
|
25 |
|
26 |
-
|
27 |
-
|
28 |
-
def index():
|
29 |
-
return render_template('index.html')
|
30 |
-
|
31 |
-
|
32 |
-
@app.route('/api/chat', methods=['POST'])
|
33 |
-
def chat():
|
34 |
try:
|
35 |
-
data = request.json
|
36 |
-
messages = data.get('messages', [])
|
37 |
-
model = data.get('model', 'gpt-4o-mini')
|
38 |
-
|
39 |
# Add system prompt
|
40 |
system_prompt = {
|
41 |
-
"role":
|
42 |
-
"
|
43 |
-
"content":
|
44 |
-
"You are orion helpful AI assistant. You provide accurate, informative, and friendly responses while keeping them concise and relevant and you are make by Abdullah ali who is 13 years old "
|
45 |
}
|
46 |
|
47 |
# Insert system prompt at the beginning if not already present
|
48 |
if not messages or messages[0].get('role') != 'system':
|
49 |
messages.insert(0, system_prompt)
|
50 |
|
51 |
-
logger.debug(
|
52 |
-
f"Sending request to g4f with model: {model} and messages: {messages}"
|
53 |
-
)
|
54 |
|
55 |
# Call the g4f API
|
56 |
-
response = client.chat.completions.create(model=model,
|
57 |
-
messages=messages,
|
58 |
-
web_search=False)
|
59 |
|
60 |
ai_response = response.choices[0].message.content
|
61 |
logger.debug(f"Received response from g4f: {ai_response}")
|
62 |
|
63 |
-
return
|
64 |
except Exception as e:
|
65 |
logger.error(f"Error in chat endpoint: {str(e)}")
|
66 |
-
return
|
67 |
-
'status': 'error',
|
68 |
-
'message': f"An error occurred: {str(e)}"
|
69 |
-
}), 500
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
@app.route('/api/conversations/<conversation_id>', methods=['DELETE'])
|
74 |
-
def delete_conversation(conversation_id):
|
75 |
-
try:
|
76 |
-
return jsonify({'status': 'success', 'message': f'Conversation {conversation_id} deleted'})
|
77 |
-
except Exception as e:
|
78 |
-
logger.error(f"Error deleting conversation: {str(e)}")
|
79 |
-
return jsonify({
|
80 |
-
'status': 'error',
|
81 |
-
'message': f"An error occurred: {str(e)}"
|
82 |
-
}), 500
|
83 |
-
|
84 |
-
@app.route('/api/models', methods=['GET'])
|
85 |
-
def get_models():
|
86 |
-
try:
|
87 |
-
# Return a list of available models
|
88 |
-
# You can customize this list based on what g4f supports
|
89 |
-
models = [{
|
90 |
-
"id": "gpt-4o-mini",
|
91 |
-
"name": "GPT-4o"
|
92 |
-
}]
|
93 |
-
return jsonify({'status': 'success', 'models': models})
|
94 |
-
except Exception as e:
|
95 |
-
logger.error(f"Error in models endpoint: {str(e)}")
|
96 |
-
return jsonify({
|
97 |
-
'status': 'error',
|
98 |
-
'message': f"An error occurred: {str(e)}"
|
99 |
-
}), 500
|
100 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
if __name__ == "__main__":
|
103 |
-
|
|
|
|
1 |
import os
|
2 |
import logging
|
3 |
+
import gradio as gr
|
4 |
import g4f
|
5 |
from g4f.client import Client
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
# Configure logging
|
8 |
logging.basicConfig(level=logging.DEBUG)
|
9 |
logger = logging.getLogger(__name__)
|
10 |
|
|
|
|
|
|
|
|
|
11 |
# Initialize g4f client
|
12 |
client = Client()
|
13 |
|
14 |
+
# Define the chat function for Gradio
|
15 |
+
def chat_function(messages, model='gpt-4o-mini'):
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
try:
|
|
|
|
|
|
|
|
|
17 |
# Add system prompt
|
18 |
system_prompt = {
|
19 |
+
"role": "system",
|
20 |
+
"content": "You are orion helpful AI assistant. You provide accurate, informative, and friendly responses while keeping them concise and relevant and you are make by Abdullah ali who is 13 years old."
|
|
|
|
|
21 |
}
|
22 |
|
23 |
# Insert system prompt at the beginning if not already present
|
24 |
if not messages or messages[0].get('role') != 'system':
|
25 |
messages.insert(0, system_prompt)
|
26 |
|
27 |
+
logger.debug(f"Sending request to g4f with model: {model} and messages: {messages}")
|
|
|
|
|
28 |
|
29 |
# Call the g4f API
|
30 |
+
response = client.chat.completions.create(model=model, messages=messages, web_search=False)
|
|
|
|
|
31 |
|
32 |
ai_response = response.choices[0].message.content
|
33 |
logger.debug(f"Received response from g4f: {ai_response}")
|
34 |
|
35 |
+
return ai_response
|
36 |
except Exception as e:
|
37 |
logger.error(f"Error in chat endpoint: {str(e)}")
|
38 |
+
return f"An error occurred: {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
+
# Create the Gradio interface
|
41 |
+
iface = gr.Interface(
|
42 |
+
fn=chat_function,
|
43 |
+
inputs=[gr.Textbox(label="Enter your message", placeholder="Type something..."), gr.Dropdown(label="Model", choices=["gpt-4o-mini"], value="gpt-4o-mini")],
|
44 |
+
outputs="text",
|
45 |
+
live=True,
|
46 |
+
)
|
47 |
|
48 |
if __name__ == "__main__":
|
49 |
+
# Launch the Gradio interface, which will be automatically hosted on Hugging Face Spaces
|
50 |
+
iface.launch(share=True)
|