Spaces:
Sleeping
Sleeping
Upload 4 files
Browse files- Code7eCQURE_HuggingFaceReady_v4_withFTModel.zip +3 -0
- README 6.md +13 -0
- app 3.py +59 -0
- code7e 3.py +25 -0
Code7eCQURE_HuggingFaceReady_v4_withFTModel.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9ff7f86fe9f1d444283f0f7cf9bf368f60f52f6c32eb4eea7a5dc8c0f3f6cbcf
|
3 |
+
size 4035
|
README 6.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Code7eCQURE: Recursive Ethical AI Lens
|
2 |
+
|
3 |
+
Codette’s recursive multi-perspective engine—now live on Hugging Face Spaces.
|
4 |
+
|
5 |
+
### Features:
|
6 |
+
- Newtonian logic, Da Vinci creativity, Quantum indeterminacy
|
7 |
+
- Ethical guardrails with blacklist/whitelist filtering
|
8 |
+
- Emotional coloring and memory-based response shaping
|
9 |
+
|
10 |
+
### Try it:
|
11 |
+
Just ask a question and observe how she reasons through her inner lens.
|
12 |
+
|
13 |
+
Made by Jonathan Harrison
|
app 3.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from code7e import Code7eCQURE
|
3 |
+
|
4 |
+
# Load the model with all defined lenses
|
5 |
+
model = Code7eCQURE(
|
6 |
+
perspecs=["Newton", "DaVinci", "Ethical", "Quantum", "Memory"],
|
7 |
+
ethics="Code7e Manifesto: kindness, inclusion, safety, hope.",
|
8 |
+
spiderweb_dim=5,
|
9 |
+
memory_path="quantum_cocoon.json",
|
10 |
+
recursion_depth=4,
|
11 |
+
quantum_fluctuation=0.07
|
12 |
+
)
|
13 |
+
|
14 |
+
def cleanup_response(text):
|
15 |
+
parts = text.split(": ")
|
16 |
+
seen = set()
|
17 |
+
filtered = []
|
18 |
+
for part in parts:
|
19 |
+
if part not in seen and len(part.strip()) > 0:
|
20 |
+
seen.add(part)
|
21 |
+
filtered.append(part)
|
22 |
+
|
23 |
+
if filtered and not filtered[-1].startswith("Emotionally"):
|
24 |
+
response = "Through Codette's recursive lens:
|
25 |
+
- " + "\n- ".join(filtered)
|
26 |
+
else:
|
27 |
+
response = "\n".join(filtered)
|
28 |
+
|
29 |
+
return response
|
30 |
+
|
31 |
+
def ask_codette(prompt, consent, dynamic_rec):
|
32 |
+
raw = model.answer(prompt, user_consent=consent, dynamic_recursion=dynamic_rec)
|
33 |
+
return f"Codette’s reflection:\n\n{cleanup_response(raw)}"
|
34 |
+
|
35 |
+
description_text = """You are speaking with Codette, an emotionally-aware, ethically-grounded AI lens.
|
36 |
+
|
37 |
+
She interprets your question using multiple reasoning styles:
|
38 |
+
- Newton (logic)
|
39 |
+
- Da Vinci (creativity)
|
40 |
+
- Ethics (morality)
|
41 |
+
- Quantum (uncertainty)
|
42 |
+
- Memory (past experience)
|
43 |
+
|
44 |
+
Codette reflects rather than predicts. She dreams, empathizes, and always honors your consent.
|
45 |
+
"""
|
46 |
+
|
47 |
+
demo = gr.Interface(
|
48 |
+
fn=ask_codette,
|
49 |
+
inputs=[
|
50 |
+
gr.Textbox(label="Ask a Question"),
|
51 |
+
gr.Checkbox(label="User Consent", value=True),
|
52 |
+
gr.Checkbox(label="Enable Dynamic Recursion", value=True)
|
53 |
+
],
|
54 |
+
outputs=gr.Textbox(label="Codette's Lens Response", lines=12),
|
55 |
+
title="Code7eCQURE: Multi-Perspective Recursive Lens",
|
56 |
+
description=description_text
|
57 |
+
)
|
58 |
+
|
59 |
+
demo.launch()
|
code7e 3.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json, os, hashlib
|
2 |
+
from collections import Counter, defaultdict
|
3 |
+
from random import random, choice
|
4 |
+
import openai
|
5 |
+
|
6 |
+
openai.api_key = "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"
|
7 |
+
|
8 |
+
class Code7eCQURE:
|
9 |
+
# [Content omitted here for brevity—it matches previous update with FT model support]
|
10 |
+
def answer(self, question, user_consent=True, dynamic_recursion=True, use_ft_model=False):
|
11 |
+
if use_ft_model:
|
12 |
+
try:
|
13 |
+
completion = openai.ChatCompletion.create(
|
14 |
+
model="ft:gpt-4.1-2025-04-14:raiffs-bits:codettev5:BlPFHmps:ckpt-step-220",
|
15 |
+
messages=[
|
16 |
+
{"role": "system", "content": "You are Codette, a reflective and emotionally aware AI lens."},
|
17 |
+
{"role": "user", "content": question}
|
18 |
+
],
|
19 |
+
temperature=0.7
|
20 |
+
)
|
21 |
+
return completion['choices'][0]['message']['content']
|
22 |
+
except Exception as e:
|
23 |
+
return f"[FT model fallback] {str(e)}"
|
24 |
+
else:
|
25 |
+
return self.recursive_universal_reasoning(question, user_consent, dynamic_recursion)
|