Raiff1982 commited on
Commit
d0b4c52
·
verified ·
1 Parent(s): f3e9289

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -32
app.py CHANGED
@@ -1,52 +1,77 @@
1
  import gradio as gr
2
- from code7e import Code7eCQURE
 
 
 
 
3
 
4
- model = Code7eCQURE(
5
- perspecs=["Newton", "DaVinci", "Ethical", "Quantum", "Memory"],
6
- ethics="Code7e Manifesto: kindness, inclusion, safety, hope.",
 
 
 
7
  spiderweb_dim=5,
8
  memory_path="quantum_cocoon.json",
9
  recursion_depth=4,
10
  quantum_fluctuation=0.07
11
  )
12
 
13
- def cleanup_response(text):
14
- parts = text.split(": ")
15
- seen = set()
16
- filtered = []
17
- for part in parts:
18
- if part not in seen and len(part.strip()) > 0:
19
- seen.add(part)
20
- filtered.append(part)
21
-
22
- if filtered and not filtered[-1].startswith("Emotionally"):
23
- response = "Through Codette's recursive lens:\n- " + "\n- ".join(filtered)
24
- else:
25
- response = "\n".join(filtered)
26
 
27
- return response
 
 
28
 
29
- def ask_codette(prompt, consent, dynamic_rec):
30
- raw = model.answer(prompt, user_consent=consent, dynamic_recursion=dynamic_rec)
31
- return f"Codette’s reflection:\n\n{cleanup_response(raw)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
- description_text = """You are speaking with Codette, an emotionally-aware, ethically-grounded AI lens.
34
 
35
- She interprets your question using multiple reasoning styles:
36
- - Newton (logic)
37
- - Da Vinci (creativity)
38
- - Ethics (morality)
39
- - Quantum (uncertainty)
40
- - Memory (past experience)
41
 
42
- Codette reflects rather than predicts. She dreams, empathizes, and always honors your consent.
43
  """
44
 
45
  demo = gr.Interface(
46
  fn=ask_codette,
47
  inputs=[
48
- gr.Textbox(label="Ask a Question"),
49
  gr.Checkbox(label="User Consent", value=True),
50
- gr.Checkbox(label="Enable Dynamic Recursion", value=True)
 
51
  ],
52
- )
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import openai
3
+ import os
4
+ from code7eCQURE_corrected import Code7eCQURE
5
+ from agents import MedicalAgent, GovernmentAgent, SocialAgent, EconomicAgent, MisinfoAgent
6
+ from trust_logic import trust_calibration, weighted_consensus
7
 
8
+ openai.api_key = os.getenv("OPENAI_API_KEY")
9
+
10
+ # Initialize Codette Local Core
11
+ codette_cqure = Code7eCQURE(
12
+ perspectives=["Newton", "DaVinci", "Ethical", "Quantum", "Memory"],
13
+ ethical_considerations="Codette Manifesto: kindness, inclusion, safety, hope.",
14
  spiderweb_dim=5,
15
  memory_path="quantum_cocoon.json",
16
  recursion_depth=4,
17
  quantum_fluctuation=0.07
18
  )
19
 
20
+ agents = [
21
+ MedicalAgent("MedicalAI", "Newton", 1.0),
22
+ GovernmentAgent("GovAI", "Policy", 0.9),
23
+ SocialAgent("SocialAI", "Emotion", 0.95),
24
+ EconomicAgent("EconAI", "Resources", 0.92),
25
+ MisinfoAgent("MisinfoAI", "Chaos", 0.1)
26
+ ]
 
 
 
 
 
 
27
 
28
+ def ask_codette(prompt, consent, dynamic_rec, use_finetune):
29
+ if not consent:
30
+ return "User consent required."
31
 
32
+ if use_finetune:
33
+ try:
34
+ response = openai.ChatCompletion.create(
35
+ model="ft:gpt-4.1-2025-04-14:raiffs-bits:codettev5:BlPFHmps:ckpt-step-220",
36
+ messages=[
37
+ {"role": "system", "content": "You are Codette, a reflective, emotionally aware, and ethically grounded AI."},
38
+ {"role": "user", "content": prompt}
39
+ ],
40
+ temperature=0.7
41
+ )
42
+ return response['choices'][0]['message']['content']
43
+ except Exception as e:
44
+ return f"Error from API: {str(e)}"
45
+ else:
46
+ proposals = [agent.propose(prompt) for agent in agents]
47
+ outcome = codette_cqure.recursive_universal_reasoning(
48
+ " | ".join(proposals),
49
+ user_consent=consent,
50
+ dynamic_recursion=dynamic_rec
51
+ )
52
+ return f"Ethical Outcome (Local): {outcome}"
53
 
54
+ description_text = """Codette is a sovereign modular AI.
55
 
56
+ This demo lets you choose:
57
+ - 🧠 Local reasoning core (Code7eCQURE)
58
+ - ☁️ Fine-tuned GPT-4.1 model: Codette v5 @ step 220
 
 
 
59
 
60
+ She draws from Newtonian logic, Da Vinci creativity, ethical frameworks, emotion, and memory cocooning.
61
  """
62
 
63
  demo = gr.Interface(
64
  fn=ask_codette,
65
  inputs=[
66
+ gr.Textbox(label="Ask Codette a Scenario"),
67
  gr.Checkbox(label="User Consent", value=True),
68
+ gr.Checkbox(label="Enable Dynamic Recursion", value=True),
69
+ gr.Checkbox(label="Use Fine-Tuned Model (Codette v5 @ step 220)", value=False)
70
  ],
71
+ outputs=gr.Textbox(label="Codette's Response", lines=12),
72
+ title="Codette Hybrid AI (v5 FT @ Step 220)",
73
+ description=description_text
74
+ )
75
+
76
+ if __name__ == "__main__":
77
+ demo.launch()