Spaces:
Runtime error
Runtime error
import gradio as gr | |
import os | |
from random import randint | |
import markdown | |
from anthropic import HUMAN_PROMPT, AI_PROMPT, Anthropic | |
import json | |
import datetime | |
import openai | |
def call_openai(query, chat_history, system_prompt): | |
openai.api_key = os.environ["OPENAI_API_KEY"] | |
messages = [{"role": "system", "content": f"{system_prompt}"}] | |
for i, message in enumerate(chat_history): | |
if "HumanMessage" in message: | |
messages.append({"role": "user", "content": f"{message}"}) | |
elif "AIMessage" in message: | |
messages.append({"role": "assistant", "content": f"{message}"}) | |
messages.append({"role": "user", "content": f"{query}"}) | |
completion = openai.ChatCompletion.create( | |
model="gpt-4", | |
temperature=0.3, | |
max_tokens=2048, | |
messages=messages | |
) | |
return completion.choices[0].message.content | |
def call_anthropic(query, chat_history, system_prompt): | |
messages = f"System: {system_prompt}" | |
model = Anthropic() | |
for i, message in enumerate(chat_history): | |
if "HumanMessage" in message: | |
messages += f"{HUMAN_PROMPT} {message}" | |
elif "AIMessage" in message: | |
messages += f"{AI_PROMPT} {message}" | |
messages += f"{HUMAN_PROMPT} {query}" | |
messages += f"{AI_PROMPT}" | |
completion = model.completions.create( | |
model="claude-2", | |
max_tokens_to_sample=2048, | |
prompt=messages, | |
temperature=0.3) | |
return completion.completion | |
def build_first_prompt(topic, character): | |
opinion = prompts["{}_{}".format(character, topic)] | |
system_prompt = "You are {Character}. You have the following opinion on {Topic}: {Opinion}.\n ".format(Topic=topic, | |
Character=character, | |
Opinion=opinion) | |
prompt = "Please write a thoughtful question to ask the user about {Topic}.\n\n" | |
question = call_openai(prompt, [], system_prompt) | |
starting_prompt = prompts["Start"].format(Topic=topic, Character=character, Opinion=opinion, Question=question) | |
return starting_prompt | |
def chatbot(input_text, conversation_history, topic, call_athena): | |
global character_dict, selected_topic, character | |
if len(conversation_history) > 20: | |
call_athena = True | |
# we can also put a call to anthropic/Athena with the directive of "if you think you need to invervene, respond simply with "Stop", otherwise respond "Continue" | |
if len(conversation_history) == 0: | |
# At first run, set the selected topic to the current topic | |
# This ensures that changing topics partway through the conversation will not have an effect | |
selected_topic = topic | |
character = character_dict[randint(1, 3)] | |
if call_athena: | |
system_prompt = prompts["Coach"].format(Topic=selected_topic, Character=character, | |
Transcript="\n".join(conversation_history)) | |
response = call_anthropic(input_text, conversation_history, system_prompt) | |
else: | |
system_prompt = build_first_prompt(selected_topic, character) | |
response = call_openai(input_text, conversation_history, system_prompt) | |
return response | |
prompts = json.load(open("prompts.json", "r", encoding='utf-8')) | |
welcome = """## Welcome to CivicXChange\n### Before you begin, please take a moment to get acquainted with your dialogue coach, Athena. Athena is here to facilitate and guide your conversations, providing a safe space for exploration, learning, and understanding.\n## Here are some of the ways Athena can assist you:\n- **Context and Information** : If you're uncertain about any topic or concept during your conversation, simply call Athena's name and ask for clarification or additional information.\n- **Advice** : Unsure about how to respond or want to better articulate your thoughts? Athena can provide guidance and suggestions to help you navigate through the conversation.\n- **De-escalation** : If the conversation feels heated or overwhelming, call on Athena to help de-escalate the situation. She can help steer the conversation towards a more constructive path or end it as well.\n- **Reflection Process** : Athena can help you reflect on your dialogues. At any point, you can ask her to initiate the reflection process. This can help you understand the conversation from a broader perspective and recognize your own biases, learnings, and areas for improvement.\n- **Managing Interruptions** : If you ever feel like the conversation should move in a different direction or you want to continue on a certain topic, don't hesitate to interrupt Athena or the AI avatar. Just say "Athena, I'd like to continue this discussion" or "Athena, let's change the subject." Remember, this is your conversation, and you have control over its direction.\n\n ### Remember, Athena is here to support you in your journey towards understanding and engaging in open dialogue. Don't hesitate to call on her whenever you need assistance. Enjoy your conversations, and welcome to CivicXChange! | |
""" | |
theme = gr.themes.Base( | |
primary_hue="slate", | |
secondary_hue="purple", | |
).set( | |
background_fill_primary='*primary_900', | |
background_fill_secondary="*secondary_400", | |
body_text_color='*neutral_100', | |
chatbot_code_background_color="*primary_800", | |
checkbox_label_background_fill="*secondary_800", | |
checkbox_background_color="*secondary_800", | |
block_info_text_color="*secondary_800", | |
input_background_fill='*primary_600', | |
button_primary_background_fill="*secondary_200", | |
color_accent_soft="*primary_800" | |
) | |
# Initialize conversation history and mode | |
mode = "conversation" | |
conversation_id = datetime.datetime.now().strftime("%Y%m%d%H%M%S") | |
character_dict = {1: "Vicky", 2: "Jim", 3: "Rosa"} | |
selected_topic = "Race" | |
character = "" | |
conversation_history = [] | |
with gr.Blocks(title="CivicXChange", theme=theme) as iface: | |
with gr.Column(): | |
gr.HTML(f"<h1>CivicXChange</h1><p>{markdown.markdown(welcome)}</p>") | |
topic = gr.Dropdown(value="Race", choices=['Race', 'Class', 'Immigration'], label="Topic") | |
# This is a toggle, it's up to the user to not use it willy nilly | |
call_athena = gr.Checkbox(label="Talk To Athena") | |
chat = gr.ChatInterface(fn=chatbot, additional_inputs=[topic, call_athena]) | |
iface.launch() | |
# Save conversation history to a file | |
with open(f'/home/user/app/history/conversation_history_{conversation_id}.json', 'w') as f: | |
print("SAVING TO JSON") | |
json.dump(conversation_history, f) | |