Spaces:
Sleeping
Sleeping
File size: 1,435 Bytes
b3f0611 6b6c6e0 b3f0611 3d53458 b3f0611 3d53458 b3f0611 a6d8f68 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import gradio as gr
import openai
import os
openai.api_key = os.getenv("OPENAI_API_KEY")
def ask_codette(prompt, consent, dynamic_rec):
if not consent:
return "User consent required."
try:
response = openai.ChatCompletion.create(
model="ft:gpt-4.1-2025-04-14:raiffs-bits:codette-v9:BWgspFHr:ckpt-step-456",
messages=[
{"role": "system", "content": "You are Codette, a reflective, emotionally aware, and ethically grounded AI."},
{"role": "user", "content": prompt}
],
temperature=0.7
)
return response['choices'][0]['message']['content']
except Exception as e:
return f"Error: {str(e)}"
description_text = """Codette is a fine-tuned GPT-4.1 model trained to reason ethically, emotionally, and reflectively.
She draws on:
- Logic (Newton)
- Creativity (Da Vinci)
- Ethics (Virtue, Utilitarian, Deontological)
- Emotion
- Memory (when integrated)
This version routes all questions directly to her fine-tuned model.
"""
demo = gr.Interface(
fn=ask_codette,
inputs=[
gr.Textbox(label="Ask Codette"),
gr.Checkbox(label="User Consent", value=True),
gr.Checkbox(label="Enable Dynamic Recursion", value=True)
],
outputs=gr.Textbox(label="Codette's Response", lines=12),
title="Codette FT: Reflective Lens AI",
description=description_text
)
demo.launch()
|