File size: 3,154 Bytes
b3f0611
 
6b6c6e0
786296b
c023b87
 
d25ab0c
6b6c6e0
b3f0611
d25ab0c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e8c0168
b3f0611
 
 
e8c0168
 
 
d25ab0c
 
 
 
 
 
e8c0168
 
 
d25ab0c
 
 
 
 
 
 
e8c0168
d25ab0c
 
 
 
b3f0611
d25ab0c
b3f0611
e8c0168
 
 
 
 
 
df0f8ea
e8c0168
 
 
 
b3f0611
e8c0168
 
 
 
 
 
b3f0611
d25ab0c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import gradio as gr
import openai
import os
from codette_core import Code7eCQURE
from codette_agents import MedicalAgent, GovernmentAgent, SocialAgent, EconomicAgent, MisinfoAgent
from codette_trust import trust_calibration, weighted_consensus

openai.api_key = os.getenv("OPENAI_API_KEY")

codette_cqure = Code7eCQURE(
    perspectives=["Newton", "DaVinci", "Ethical", "Quantum", "Memory"],
    ethical_considerations="Codette Manifesto: kindness, inclusion, safety, hope.",
    spiderweb_dim=5,
    memory_path="quantum_cocoon.json",
    recursion_depth=4,
    quantum_fluctuation=0.07
)

agents = [
    MedicalAgent("MedicalAI", "Newton", 1.0),
    GovernmentAgent("GovAI", "Policy", 0.9),
    SocialAgent("SocialAI", "Emotion", 0.95),
    EconomicAgent("EconAI", "Resources", 0.92),
    MisinfoAgent("MisinfoAI", "Chaos", 0.1)
]

def codette_chat_interface(message, history, consent=True, dynamic_rec=True, use_finetune=False):
    if not consent:
        return "User consent required."

    full_prompt = "\n".join([f"User: {user}\nCodette: {bot}" for user, bot in history if user and bot])
    full_prompt += f"\nUser: {message}"

    if use_finetune:
        try:
            response = openai.ChatCompletion.create(
                model="ft:gpt-4.1-2025-04-14:raiffs-bits:codettev5:BlPFHmps:ckpt-step-220",
                messages=[
                    {"role": "system", "content": "You are Codette, a reflective, emotionally aware, and ethically grounded AI."},
                    *[{"role": "user", "content": user} if i % 2 == 0 else {"role": "assistant", "content": bot}
                      for i, (user, bot) in enumerate(history)],
                    {"role": "user", "content": message}
                ],
                temperature=0.7
            )
            return response['choices'][0]['message']['content']
        except Exception as e:
            return f"Error from API: {str(e)}"
    else:
        proposals = [agent.propose(message) for agent in agents]
        outcome = codette_cqure.recursive_universal_reasoning(
            " | ".join(proposals),
            user_consent=consent,
            dynamic_recursion=dynamic_rec
        )
        return f"Ethical Outcome (Local): {outcome}"

with gr.Blocks(title="Codette Chat Hybrid") as demo:
    gr.Markdown("""
    # Codette: Hybrid AI Chat (v5 FT @ Step 220)
    A sovereign AI capable of emotional, ethical, and reflective reasoning.
    Choose your engine and engage her in ongoing dialogue.
    """)

    with gr.Row():
        consent = gr.Checkbox(label="User Consent", value=True)
        dynamic_rec = gr.Checkbox(label="Enable Dynamic Recursion", value=True)
        use_finetune = gr.Checkbox(label="Use Fine-Tuned Model (Codette v5 @ step 220)", value=False)

    chatbot = gr.ChatInterface(
        fn=lambda msg, history: codette_chat_interface(msg, history, consent.value, dynamic_rec.value, use_finetune.value),
        title="Codette Conversation",
        textbox=gr.Textbox(placeholder="Ask Codette something...", label="Your Message"),
        chatbot=gr.Chatbot(label="Codette's Response")
    )

if __name__ == "__main__":
    demo.launch()