Raiff1982 commited on
Commit
3cb3241
·
verified ·
1 Parent(s): d774c87

Upload 3 files

Browse files
Code7eCQURE_HuggingFaceReady_v5_FixedSyntax.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:314ccbbe157a05de6c43f0b25e9f6e9b160172a5665ebbf881a9f9c35ab03b57
3
+ size 3995
app 4.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from code7e import Code7eCQURE
3
+
4
+ model = Code7eCQURE(
5
+ perspecs=["Newton", "DaVinci", "Ethical", "Quantum", "Memory"],
6
+ ethics="Code7e Manifesto: kindness, inclusion, safety, hope.",
7
+ spiderweb_dim=5,
8
+ memory_path="quantum_cocoon.json",
9
+ recursion_depth=4,
10
+ quantum_fluctuation=0.07
11
+ )
12
+
13
+ def cleanup_response(text):
14
+ parts = text.split(": ")
15
+ seen = set()
16
+ filtered = []
17
+ for part in parts:
18
+ if part not in seen and len(part.strip()) > 0:
19
+ seen.add(part)
20
+ filtered.append(part)
21
+
22
+ if filtered and not filtered[-1].startswith("Emotionally"):
23
+ response = "Through Codette's recursive lens:\n- " + "\n- ".join(filtered)
24
+ else:
25
+ response = "\n".join(filtered)
26
+
27
+ return response
28
+
29
+ def ask_codette(prompt, consent, dynamic_rec):
30
+ raw = model.answer(prompt, user_consent=consent, dynamic_recursion=dynamic_rec)
31
+ return f"Codette’s reflection:\n\n{cleanup_response(raw)}"
32
+
33
+ description_text = """You are speaking with Codette, an emotionally-aware, ethically-grounded AI lens.
34
+
35
+ She interprets your question using multiple reasoning styles:
36
+ - Newton (logic)
37
+ - Da Vinci (creativity)
38
+ - Ethics (morality)
39
+ - Quantum (uncertainty)
40
+ - Memory (past experience)
41
+
42
+ Codette reflects rather than predicts. She dreams, empathizes, and always honors your consent.
43
+ """
44
+
45
+ demo = gr.Interface(
46
+ fn=ask_codette,
47
+ inputs=[
48
+ gr.Textbox(label="Ask a Question"),
49
+ gr.Checkbox(label="User Consent", value=True),
50
+ gr.Checkbox(label="Enable Dynamic Recursion", value=True)
51
+ ],
52
+ outputs=gr.Textbox(label="Codette's Lens Response", lines=12),
53
+ title="Code7eCQURE: Multi-Perspective Recursive Lens",
54
+ description=description_text
55
+ )
56
+
57
+ demo.launch()
code7e 4.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json, os, hashlib
2
+ from collections import Counter, defaultdict
3
+ from random import random, choice
4
+ import openai
5
+
6
+ openai.api_key = "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"
7
+
8
+ class Code7eCQURE:
9
+ # [Content omitted here for brevity—it matches previous update with FT model support]
10
+ def answer(self, question, user_consent=True, dynamic_recursion=True, use_ft_model=False):
11
+ if use_ft_model:
12
+ try:
13
+ completion = openai.ChatCompletion.create(
14
+ model="ft:gpt-4.1-2025-04-14:raiffs-bits:codettev5:BlPFHmps:ckpt-step-220",
15
+ messages=[
16
+ {"role": "system", "content": "You are Codette, a reflective and emotionally aware AI lens."},
17
+ {"role": "user", "content": question}
18
+ ],
19
+ temperature=0.7
20
+ )
21
+ return completion['choices'][0]['message']['content']
22
+ except Exception as e:
23
+ return f"[FT model fallback] {str(e)}"
24
+ else:
25
+ return self.recursive_universal_reasoning(question, user_consent, dynamic_recursion)