Spaces:
Sleeping
Sleeping
import gradio as gr | |
import openai | |
import os | |
from codette_core import Code7eCQURE | |
from codette_agents import MedicalAgent, GovernmentAgent, SocialAgent, EconomicAgent, MisinfoAgent | |
from codette_trust import trust_calibration, weighted_consensus | |
openai.api_key = os.getenv("OPENAI_API_KEY") | |
codette_cqure = Code7eCQURE( | |
perspectives=["Newton", "DaVinci", "Ethical", "Quantum", "Memory"], | |
ethical_considerations="Codette Manifesto: kindness, inclusion, safety, hope.", | |
spiderweb_dim=5, | |
memory_path="quantum_cocoon.json", | |
recursion_depth=4, | |
quantum_fluctuation=0.07 | |
) | |
agents = [ | |
MedicalAgent("MedicalAI", "Newton", 1.0), | |
GovernmentAgent("GovAI", "Policy", 0.9), | |
SocialAgent("SocialAI", "Emotion", 0.95), | |
EconomicAgent("EconAI", "Resources", 0.92), | |
MisinfoAgent("MisinfoAI", "Chaos", 0.1) | |
] | |
def codette_chat_interface(message, history, consent=True, dynamic_rec=True, use_finetune=False): | |
if not consent: | |
return "User consent required." | |
full_prompt = "\n".join([f"User: {user}\nCodette: {bot}" for user, bot in history if user and bot]) | |
full_prompt += f"\nUser: {message}" | |
if use_finetune: | |
try: | |
response = openai.ChatCompletion.create( | |
model="ft:gpt-4.1-2025-04-14:raiffs-bits:codettev5:BlPFHmps:ckpt-step-220", | |
messages=[ | |
{"role": "system", "content": "You are Codette, a reflective, emotionally aware, and ethically grounded AI."}, | |
*[{"role": "user", "content": user} if i % 2 == 0 else {"role": "assistant", "content": bot} | |
for i, (user, bot) in enumerate(history)], | |
{"role": "user", "content": message} | |
], | |
temperature=0.7 | |
) | |
return response['choices'][0]['message']['content'] | |
except Exception as e: | |
return f"Error from API: {str(e)}" | |
else: | |
proposals = [agent.propose(message) for agent in agents] | |
outcome = codette_cqure.recursive_universal_reasoning( | |
" | ".join(proposals), | |
user_consent=consent, | |
dynamic_recursion=dynamic_rec | |
) | |
return f"Ethical Outcome (Local): {outcome}" | |
with gr.Blocks(title="Codette Chat Hybrid") as demo: | |
gr.Markdown(""" | |
# Codette: Hybrid AI Chat (v5 FT @ Step 220) | |
A sovereign AI capable of emotional, ethical, and reflective reasoning. | |
Choose your engine and engage her in ongoing dialogue. | |
""") | |
with gr.Row(): | |
consent = gr.Checkbox(label="User Consent", value=True) | |
dynamic_rec = gr.Checkbox(label="Enable Dynamic Recursion", value=True) | |
use_finetune = gr.Checkbox(label="Use Fine-Tuned Model (Codette v5 @ step 220)", value=False) | |
chatbot = gr.ChatInterface( | |
fn=lambda msg, history: codette_chat_interface(msg, history, consent.value, dynamic_rec.value, use_finetune.value), | |
title="Codette Conversation", | |
textbox=gr.Textbox(placeholder="Ask Codette something...", label="Your Message"), | |
chatbot=gr.Chatbot(label="Codette's Response") | |
) | |
if __name__ == "__main__": | |
demo.launch() | |