Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,11 +2,10 @@ import gradio as gr
|
|
2 |
import openai
|
3 |
import os
|
4 |
from codette_core import Code7eCQURE
|
5 |
-
from codette_agents import MedicalAgent, GovernmentAgent, SocialAgent, EconomicAgent, MisinfoAgent
|
6 |
-
from codette_trust import trust_calibration, weighted_consensus
|
7 |
|
8 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
9 |
|
|
|
10 |
codette_cqure = Code7eCQURE(
|
11 |
perspectives=["Newton", "DaVinci", "Ethical", "Quantum", "Memory"],
|
12 |
ethical_considerations="Codette Manifesto: kindness, inclusion, safety, hope.",
|
@@ -16,63 +15,55 @@ codette_cqure = Code7eCQURE(
|
|
16 |
quantum_fluctuation=0.07
|
17 |
)
|
18 |
|
19 |
-
|
20 |
-
MedicalAgent("MedicalAI", "Newton", 1.0),
|
21 |
-
GovernmentAgent("GovAI", "Policy", 0.9),
|
22 |
-
SocialAgent("SocialAI", "Emotion", 0.95),
|
23 |
-
EconomicAgent("EconAI", "Resources", 0.92),
|
24 |
-
MisinfoAgent("MisinfoAI", "Chaos", 0.1)
|
25 |
-
]
|
26 |
-
|
27 |
-
def codette_chat_interface(message, history, consent=True, dynamic_rec=True, use_finetune=False):
|
28 |
if not consent:
|
29 |
return "User consent required."
|
30 |
|
31 |
-
full_prompt = "\n".join([f"User: {user}\nCodette: {bot}" for user, bot in history if user and bot])
|
32 |
-
full_prompt += f"\nUser: {message}"
|
33 |
-
|
34 |
if use_finetune:
|
35 |
try:
|
36 |
response = openai.ChatCompletion.create(
|
37 |
model="ft:gpt-4.1-2025-04-14:raiffs-bits:codettev5:BlPFHmps:ckpt-step-220",
|
38 |
messages=[
|
39 |
{"role": "system", "content": "You are Codette, a reflective, emotionally aware, and ethically grounded AI."},
|
40 |
-
*[
|
41 |
-
|
|
|
|
|
42 |
{"role": "user", "content": message}
|
43 |
],
|
44 |
temperature=0.7
|
45 |
)
|
46 |
return response['choices'][0]['message']['content']
|
47 |
except Exception as e:
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
else:
|
50 |
-
|
51 |
-
|
52 |
-
" | ".join(proposals),
|
53 |
user_consent=consent,
|
54 |
dynamic_recursion=dynamic_rec
|
55 |
)
|
56 |
-
return f"Ethical Outcome (Local): {outcome}"
|
57 |
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
Choose your engine and engage her in ongoing dialogue.
|
63 |
-
""")
|
64 |
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
)
|
76 |
|
77 |
if __name__ == "__main__":
|
78 |
-
|
|
|
2 |
import openai
|
3 |
import os
|
4 |
from codette_core import Code7eCQURE
|
|
|
|
|
5 |
|
6 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
7 |
|
8 |
+
# Initialize local Codette core
|
9 |
codette_cqure = Code7eCQURE(
|
10 |
perspectives=["Newton", "DaVinci", "Ethical", "Quantum", "Memory"],
|
11 |
ethical_considerations="Codette Manifesto: kindness, inclusion, safety, hope.",
|
|
|
15 |
quantum_fluctuation=0.07
|
16 |
)
|
17 |
|
18 |
+
def codette_chat(message, history, consent=True, dynamic_rec=True, use_finetune=True):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
if not consent:
|
20 |
return "User consent required."
|
21 |
|
|
|
|
|
|
|
22 |
if use_finetune:
|
23 |
try:
|
24 |
response = openai.ChatCompletion.create(
|
25 |
model="ft:gpt-4.1-2025-04-14:raiffs-bits:codettev5:BlPFHmps:ckpt-step-220",
|
26 |
messages=[
|
27 |
{"role": "system", "content": "You are Codette, a reflective, emotionally aware, and ethically grounded AI."},
|
28 |
+
*[
|
29 |
+
{"role": "user", "content": user} if i % 2 == 0 else {"role": "assistant", "content": bot}
|
30 |
+
for i, (user, bot) in enumerate(history)
|
31 |
+
],
|
32 |
{"role": "user", "content": message}
|
33 |
],
|
34 |
temperature=0.7
|
35 |
)
|
36 |
return response['choices'][0]['message']['content']
|
37 |
except Exception as e:
|
38 |
+
fallback = codette_cqure.recursive_universal_reasoning(
|
39 |
+
message,
|
40 |
+
user_consent=consent,
|
41 |
+
dynamic_recursion=dynamic_rec
|
42 |
+
)
|
43 |
+
return f"[Error calling FT model]: {str(e)}\n\nFallback response:
|
44 |
+
{fallback}"
|
45 |
else:
|
46 |
+
return codette_cqure.recursive_universal_reasoning(
|
47 |
+
message,
|
|
|
48 |
user_consent=consent,
|
49 |
dynamic_recursion=dynamic_rec
|
50 |
)
|
|
|
51 |
|
52 |
+
description_text = '''
|
53 |
+
A sovereign AI capable of emotional, ethical, and reflective reasoning.
|
54 |
+
Choose your engine and engage her in ongoing dialogue.
|
55 |
+
'''
|
|
|
|
|
56 |
|
57 |
+
chat = gr.ChatInterface(
|
58 |
+
fn=codette_chat,
|
59 |
+
additional_inputs=[
|
60 |
+
gr.Checkbox(label="User Consent", value=True),
|
61 |
+
gr.Checkbox(label="Enable Dynamic Recursion", value=True),
|
62 |
+
gr.Checkbox(label="Use Fine-Tuned Model (Codette v5 @ step 220)", value=True)
|
63 |
+
],
|
64 |
+
title="Codette Conversation",
|
65 |
+
description=description_text,
|
66 |
+
)
|
|
|
67 |
|
68 |
if __name__ == "__main__":
|
69 |
+
chat.launch()
|