Spaces:
Sleeping
Sleeping
File size: 7,037 Bytes
3efcf63 49bb356 3efcf63 b9695d2 3efcf63 ae07547 3efcf63 b9695d2 3efcf63 b9695d2 3efcf63 b6f71cb b9695d2 3efcf63 071eef2 3efcf63 64ef3d3 b6f71cb 3efcf63 ae07547 b6f71cb 3efcf63 1cadf7f 3efcf63 df189a8 24ea4f9 df189a8 24ea4f9 636ded7 691f25c 636ded7 2dbbdc4 fa3352f b9efc09 e6879db a16daee a8a0178 fbffc44 24ea4f9 f6b2f23 7947d57 3efcf63 eae964b 3efcf63 3120433 43a5f18 bd06c9f 7b00c4c fbffc44 43a5f18 bd06c9f 3efcf63 0a3dcce 82f9a23 fbffc44 82f9a23 fbffc44 fc4ddae 82f9a23 3efcf63 b6f71cb 3efcf63 1cadf7f 3efcf63 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
import gradio as gr
import requests
import os
import random
import pandas as pd
# Load instructions from local files
def load_instruction(persona):
try:
with open(f"instructions/{persona.lower()}.txt", "r") as file:
return file.read()
except FileNotFoundError:
return ""
# Call Cohere API
def call_cohere_api(system_instruction, user_prompt):
headers = {
"Authorization": f"Bearer {os.getenv('COHERE_API_KEY')}",
"Content-Type": "application/json"
}
# Append word limit instruction
user_prompt += "\n\nWhen possible, make your answer relevant to Bristol or surrounding South West England context."
user_prompt += "\n\nAnswer in 100 words or fewer."
payload = {
"model": "command-r-plus",
"message": user_prompt,
"preamble": system_instruction,
"max_tokens": 300
}
response = requests.post("https://api.cohere.ai/v1/chat", headers=headers, json=payload)
return response.json().get("text", "No response").strip()
# Load questions from file
def load_questions():
try:
with open("questions.txt", "r") as file:
return [line.strip() for line in file if line.strip()]
except FileNotFoundError:
return []
questions_list = load_questions()
# Generate random question
def get_random_question():
return random.choice(questions_list) if questions_list else "No questions available."
# Load counter-narratives CSV
def load_counternarratives():
try:
df = pd.read_csv("counternarratives.csv")
return df
except FileNotFoundError:
print("counternarratives.csv not found.")
return pd.DataFrame(columns=["myth", "fact", "persona"])
counternarratives = load_counternarratives()
# Generate Random Myth or Fact and trigger persona response
def get_random_myth_or_fact():
if counternarratives.empty:
return "No myths or facts available.", "Fact-Checker", "", "", ""
# 🔄 Randomly select a row from the dataframe
row = counternarratives.sample(1).iloc[0]
selected_column = random.choice(["myth", "fact"])
myth_or_fact = row[selected_column]
persona = row["persona"]
# 🔄 Call the Cohere API to get the persona's response
persona_instruction = load_instruction(persona)
persona_response = call_cohere_api(persona_instruction, myth_or_fact)
# preparat the fact checker response
fact_check_response = f"{myth_or_fact} - Myth or Fact?\n\n"
# ✅ Fact-checker response logic
if selected_column == "myth":
fact_check_response += f"❌ **MYTH**\nThe fact is: {row['fact']}"
else:
fact_response=call_cohere_api("You are an ecolinguistic aware assistant.", f"Elaborate on this fact: {row['fact']}")
fact_check_response += f"✅ **FACT**\n{fact_response}"
# Return the myth/fact, update the personas, and fill the responses
return myth_or_fact, persona, persona, fact_check_response, persona_response,f"### Fact Checker", f"### .. and what the {persona} would say about it?:"
def ask_with_titles(p1, p2, q):
# Generate responses
response1 = call_cohere_api(load_instruction(p1), q)
response2 = call_cohere_api(load_instruction(p2), q)
# Generate titles
title1 = f"### {p1} Responds"
title2 = f"### {p2} Responds"
# Return responses and titles
return response1, response2, title1, title2
# Dynamically load persona names from instructions folder
personas = [os.path.splitext(f)[0].capitalize() for f in os.listdir("instructions") if f.endswith(".txt")]
# Gradio Interface
with gr.Blocks() as demo:
with gr.Row():
with gr.Column(scale=0.10):
gr.Image(value="data/WildVoices.png", label="Wild Voices", show_label=False)
with gr.Column(scale=0.75):
gr.Markdown("""
# 🌲 **Wild Voices** — *Listening to the More-than-Human World*
Welcome to **Wild Voices**, a unique space where you can converse with the more-than-human world.
Here, you are invited to ask questions to *rivers*, *trees*, *owls*, *foxes*, and many more.
Listen as they respond from their own perspectives—offering the wisdom of the forest, the resilience of the river, and the gentle whispers of the wind.
🦄 **Generate Myths and Facts:** Challenge common narratives with our *Myth/Fact Generator*, guided by nature’s voice of truth.
🎲 **Ask Random Questions:** Get inspired by thought-provoking questions that spark connection with the natural world.
🦉 **Discover Hidden Wisdom:** Experience the reflections of *Oak*, *Dragonfly*, *Rain*, and even the humble *Dandelion* as they share their stories.
---
**Space created and powered by [The H4rmony Project](https://TheH4rmonyproject.org)** — Promoting Sustainable Narratives Through AI.
_Personae, questions and myth/fact datasets have been generated by [Theophrastus](https://chatgpt.com/g/g-XKAVRvxwc-theophrastus), a H4rmony chat assistant._
_Based on an original concept by [Crystal Campbell](https://www.linkedin.com/in/earthly/) for a more-than-human AI Council of Beings._
""")
with gr.Row():
with gr.Column(scale=0.15):
persona1 = gr.Dropdown(personas, label="Choose First Persona", value="Earth")
with gr.Column(scale=0.15):
persona2 = gr.Dropdown(personas, label="Choose Second Persona", value="Crow")
user_input = gr.Textbox(label="🌱 Your Question", placeholder="e.g., What do you think of humans?")
with gr.Row():
with gr.Column(scale=0.15):
pass
with gr.Column(scale=0.15):
pass
with gr.Column(scale=0.15):
random_button = gr.Button("🎲 Generate Random Question")
with gr.Column(scale=0.15):
ask_button = gr.Button("🌎 Submit Question")
with gr.Column(scale=0.15):
myth_fact_button = gr.Button("🤔 Generate Random Myth/Fact")
with gr.Row():
with gr.Column(scale=0.50):
output1_title = gr.Markdown("### ")
with gr.Column(scale=0.50):
output2_title = gr.Markdown("### ")
with gr.Row():
output1 = gr.Textbox(label="")
output2 = gr.Textbox(label="")
# Button events
random_button.click(fn=get_random_question, inputs=[], outputs=[user_input])
# Myth/Fact button click event
myth_fact_button.click(
fn=get_random_myth_or_fact,
inputs=[],
outputs=[user_input, persona1, persona2, output1, output2, output1_title, output2_title]
)
ask_button.click(
fn=ask_with_titles,
inputs=[persona1, persona2, user_input],
outputs=[output1, output2, output1_title, output2_title]
)
if __name__ == "__main__":
demo.launch()
|