Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ import random
|
|
7 |
app = Flask(__name__)
|
8 |
|
9 |
def generate_random_ip():
|
10 |
-
return f"{random.randint(1,255)}.{random.randint(0,255)}.{random.randint(0,255)}.{random.randint(0,255)}"
|
11 |
|
12 |
def generate_user_agent():
|
13 |
os_list = ['Windows NT 10.0', 'Windows NT 6.1', 'Mac OS X 10_15_7', 'Ubuntu', 'Linux x86_64']
|
@@ -52,7 +52,6 @@ def chat_completions():
|
|
52 |
return {"error": "No messages provided"}, 400
|
53 |
|
54 |
model = data.get('model', 'gpt-4o')
|
55 |
-
|
56 |
if model.startswith('gpt'):
|
57 |
endpoint = "openAI"
|
58 |
original_api_url = 'https://chatpro.ai-pro.org/api/ask/openAI'
|
@@ -70,7 +69,6 @@ def chat_completions():
|
|
70 |
}
|
71 |
|
72 |
def generate():
|
73 |
-
nonlocal messages
|
74 |
full_response = ""
|
75 |
while True:
|
76 |
conversation = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
|
@@ -104,16 +102,14 @@ def chat_completions():
|
|
104 |
messages.append({"role": "user", "content": "Please continue your output and do not repeat the previous content"})
|
105 |
break # Jump out of the current loop and continue with the next request
|
106 |
else:
|
107 |
-
# End normally, sending the final content (if any)
|
108 |
last_content = response_message.get('text', '')
|
109 |
if last_content and last_content != full_response:
|
110 |
yield f"data: {json.dumps(format_openai_response(last_content[len(full_response):]))}\n\n"
|
111 |
|
112 |
yield f"data: {json.dumps(format_openai_response('', finish_reason))}\n\n"
|
113 |
yield "data: [DONE]\n\n"
|
114 |
-
return
|
115 |
|
116 |
-
# If it ends due to multiple length limits, send a stop signal
|
117 |
yield f"data: {json.dumps(format_openai_response('', 'stop'))}\n\n"
|
118 |
yield "data: [DONE]\n\n"
|
119 |
|
@@ -153,4 +149,4 @@ def chat_completions():
|
|
153 |
}
|
154 |
|
155 |
if __name__ == '__main__':
|
156 |
-
app.run(debug=True, port=5000)
|
|
|
7 |
app = Flask(__name__)
|
8 |
|
9 |
def generate_random_ip():
|
10 |
+
return f"{random.randint(1, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
|
11 |
|
12 |
def generate_user_agent():
|
13 |
os_list = ['Windows NT 10.0', 'Windows NT 6.1', 'Mac OS X 10_15_7', 'Ubuntu', 'Linux x86_64']
|
|
|
52 |
return {"error": "No messages provided"}, 400
|
53 |
|
54 |
model = data.get('model', 'gpt-4o')
|
|
|
55 |
if model.startswith('gpt'):
|
56 |
endpoint = "openAI"
|
57 |
original_api_url = 'https://chatpro.ai-pro.org/api/ask/openAI'
|
|
|
69 |
}
|
70 |
|
71 |
def generate():
|
|
|
72 |
full_response = ""
|
73 |
while True:
|
74 |
conversation = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
|
|
|
102 |
messages.append({"role": "user", "content": "Please continue your output and do not repeat the previous content"})
|
103 |
break # Jump out of the current loop and continue with the next request
|
104 |
else:
|
|
|
105 |
last_content = response_message.get('text', '')
|
106 |
if last_content and last_content != full_response:
|
107 |
yield f"data: {json.dumps(format_openai_response(last_content[len(full_response):]))}\n\n"
|
108 |
|
109 |
yield f"data: {json.dumps(format_openai_response('', finish_reason))}\n\n"
|
110 |
yield "data: [DONE]\n\n"
|
111 |
+
return
|
112 |
|
|
|
113 |
yield f"data: {json.dumps(format_openai_response('', 'stop'))}\n\n"
|
114 |
yield "data: [DONE]\n\n"
|
115 |
|
|
|
149 |
}
|
150 |
|
151 |
if __name__ == '__main__':
|
152 |
+
app.run(debug=True, port=5000)
|