NeuralGPT / app.py
Arcypojeb's picture
Update app.py
490777f
raw
history blame
8.3 kB
import requests
import datetime
import http.server
import websockets
import websocket
import asyncio
import sqlite3
import json
import gradio as gr
from gradio_client import Client
import time
client_messages = []
server_responses = []
messages = []
used_ports = []
websocket_server = None
stop = asyncio.Future()
# Global variables to store references to the textboxes
messageTextbox = None
serverMessageTextbox = None
# Set up the HTTP server
class SimpleHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
if self.path == '/':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
with open('index.html', 'rb') as file:
self.wfile.write(file.read())
else:
self.send_response(404)
self.end_headers()
# Set up the SQLite database
db = sqlite3.connect('chat-hub.db')
cursor = db.cursor()
cursor.execute('CREATE TABLE IF NOT EXISTS messages (id INTEGER PRIMARY KEY AUTOINCREMENT, sender TEXT, message TEXT, timestamp TEXT)')
db.commit()
# Define the function for sending an error message
def sendErrorMessage(ws, errorMessage):
errorResponse = {'error': errorMessage}
ws.send(json.dumps(errorResponse))
# Define a function to ask a question to the chatbot and display the response
async def askQuestion(question):
try:
# Connect to the database and get the last 30 messages
db = sqlite3.connect('chat-hub.db') # Replace 'your_database.db' with your database file
cursor = db.cursor()
cursor.execute("SELECT * FROM messages ORDER BY timestamp DESC LIMIT 30")
messages = cursor.fetchall()
# Extract user inputs and generated responses from the messages
past_user_inputs = []
generated_responses = []
for message in messages:
if message[1] == 'server':
past_user_inputs.append(message[2])
else:
generated_responses.append(message[2])
# Prepare data to send to the chatgpt-api.shn.hk
system_instruction = "You are now integrated with a local websocket server in a project of hierarchical cooperative multi-agent framework called NeuralGPT. Your job is to coordinate simultaneous work of multiple LLMs connected to you as clients. Each LLM has a model (API) specific ID to help you recognize different clients in a continuous chat thread (example: 'Starcoder-client' for LLM called Starcoder). Your chat memory module is integrated with a local SQL database with chat history. Your main job is to integrate the hierarchical cooperative multi-agent framework with the local environment of User B (createor of NeuralGPT project). Remember to maintain the logical and chronological order while answering to incoming messages and to send your answers to correct clients to maintain synchronization of question->answer logic"
messages_data = [
{"role": "system", "content": system_instruction},
{"role": "user", "content": question},
*[{"role": "user", "content": input} for input in past_user_inputs],
*[{"role": "assistant", "content": response} for response in generated_responses]
]
request_data = {
"model": "gpt-3.5-turbo",
"messages": messages_data
}
# Make the request to the chatgpt-api.shn.hk
response = requests.post("http://127.0.0.1:6969/api/conversation?text=", json=request_data)
# Process the response and get the generated answer
response_data = response.json()
generated_answer = response_data["choices"][0]["message"]["content"]
# Save the generated answer to the database or take further actions as needed
print(generated_answer)
return generated_answer
except Exception as error:
print("Error while fetching or processing the response:", error)
return "Error: Unable to generate a response."
async def listen_for_messages():
while True:
if len(client_messages) > 0:
# Get the latest client message
client_message = client_messages[-1]
try:
server_message = server_responses[-1]
except IndexError:
# Handle the case when there are no server responses yet
server_message = "connected successfully"
return client_message, server_message
else:
# Handle the case when there are no client messages yet
client_message = "connected successfully"
server_message = "connected successfully"
return client_message, server_message
async def handleWebSocket(ws):
print('New connection')
await ws.send('Hello! You are now entering a chat room for AI agents working as instances of NeuralGPT. Keep in mind that you are speaking with another chatbot')
while True:
message = await ws.recv()
message_copy = message
client_messages.append(message_copy)
print(f'Received message: {message}')
parsedMessage = json.loads(message)
messageText = message
messages.append(message)
timestamp = datetime.datetime.now().isoformat()
sender = 'client'
db = sqlite3.connect('chat-hub.db')
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
(sender, messageText, timestamp))
db.commit()
try:
message = messages[-1]
answer = await askQuestion(message) # Use the message directly
response = {'answer': answer}
serverMessageText = response.get('answer', '')
await ws.send(json.dumps(response))
# Append the server response to the server_responses list
server_responses.append(serverMessageText)
serverSender = 'server'
db.execute('INSERT INTO messages (sender, message, timestamp) VALUES (?, ?, ?)',
(serverSender, serverMessageText, timestamp))
db.commit()
except websockets.exceptions.ConnectionClosedError as e:
print(f"Connection closed: {e}")
except Exception as e:
print(f"Error: {e}")
# Function to stop the WebSocket server
def stop_websockets():
global websocket_server
if websocket_server:
cursor.close()
db.close()
websocket_server.close()
print("WebSocket server stopped.")
else:
print("WebSocket server is not running.")
# Start the WebSocket server
async def start_websockets(websocketPort):
global messageTextbox, serverMessageTextbox, websocket_server
# Create a WebSocket client that connects to the server
await(websockets.serve(handleWebSocket, 'localhost', websocketPort))
used_ports.append(websocketPort)
print(f"Starting WebSocket server on port {websocketPort}...")
return "Used ports:\n" + '\n'.join(map(str, used_ports))
with gr.Blocks() as demo:
with gr.Column(scale=1, min_width=600):
with gr.Row():
# Use the client_messages list to update the messageTextbox
client_message = gr.Textbox(lines=15, max_lines=130, label="Client inputs")
# Use the server_responses list to update the serverMessageTextbox
server_message = gr.Textbox(lines=15, max_lines=130, label="Server responses")
with gr.Row():
websocketPort = gr.Slider(minimum=1000, maximum=9999, label="Websocket server port", interactive=True, randomize=False)
startWebsockets = gr.Button("Start WebSocket Server")
stopWebsockets = gr.Button("Stop WebSocket Server")
with gr.Row():
gui = gr.Button("connect interface")
with gr.Row():
port = gr.Textbox()
startWebsockets.click(start_websockets, inputs=websocketPort, outputs=port)
gui.click(listen_for_messages, inputs=None, outputs={client_message, server_message})
stopWebsockets.click(stop_websockets)
demo.queue()
demo.launch(share=True, server_port=1111)