Spaces:
Running
Running
import os | |
import asyncio | |
import textwrap | |
import re | |
import httpx | |
import gradio as gr | |
from PIL import Image | |
from io import BytesIO | |
from llama_index.core.workflow import ( | |
Workflow, | |
step, | |
StartEvent, | |
StopEvent, | |
Context, | |
Event, | |
) | |
from llama_index.llms.groq import Groq | |
# --- Secrets --- | |
GROQ_API_KEY = os.environ.get("Groq_Token") | |
HF_TOKEN = os.environ.get("HF_TOKEN2") | |
# --- Event and Workflow Definitions --- | |
class StoryContext(Event): | |
story_part: str | |
inventory: list[str] | |
class SceneReadyEvent(Event): | |
pass | |
class UserChoice(Event): | |
choice: str | |
class StoryEnd(Event): | |
final_message: str | |
# Helper function to generate an image and return its path | |
async def generate_image(prompt: str, hf_token: str) -> str | None: | |
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell" | |
headers = {"Authorization": f"Bearer {hf_token}"} | |
full_prompt = f"epic fantasy art, digital painting, cinematic lighting, masterpiece, {prompt}" | |
payload = {"inputs": full_prompt} | |
try: | |
async with httpx.AsyncClient() as client: | |
response = await client.post(API_URL, headers=headers, json=payload, timeout=180.0) | |
response.raise_for_status() | |
image = Image.open(BytesIO(response.content)) | |
# Save to a temporary file that Gradio can serve | |
image.save("scene_image.png") | |
return "scene_image.png" | |
except (httpx.TimeoutException, httpx.RequestError, IOError) as e: | |
print(f"Image generation failed: {e}") | |
return None | |
# The full workflow class | |
class StorytellerWorkflow(Workflow): | |
def __init__(self, **kwargs): | |
super().__init__(timeout=300, **kwargs) | |
async def generate_story_part(self, ev: StartEvent | UserChoice, ctx: Context) -> StoryContext | StoryEnd: | |
inventory = await ctx.store.get("inventory", []) | |
prompt = "" | |
if isinstance(ev, StartEvent): | |
# The prompt for the first turn | |
prompt = """ | |
You are a creative text adventure game master. Your output is for a console game. | |
Start a new story about a curious explorer entering a recently discovered, glowing cave. | |
Keep the tone mysterious and exciting. After the story part, provide two distinct choices for the player to make. | |
Format your response exactly like this: STORY: [The story text goes here] CHOICES: 1. [First choice] 2. [Second choice] | |
""" | |
elif isinstance(ev, UserChoice): | |
last_story_part = await ctx.store.get("last_story_part") | |
# --- EXPLICIT PROMPT --- | |
prompt = f""" | |
You are a creative text adventure game master. | |
The story so far: "{last_story_part}" | |
The player chose: "{ev.choice}" | |
The player's inventory: {inventory} | |
Continue the story. | |
If a choice results in an item, use `[ADD_ITEM: item name]`. If the story should end, write "[END]". | |
Format your response exactly like this: | |
STORY: | |
[The story text goes here] | |
CHOICES: | |
1. [First choice on its own line] | |
2. [Second choice on its own line] | |
""" | |
llm = Groq(model="llama3-8b-8192", api_key=GROQ_API_KEY) | |
response = await llm.acomplete(prompt) | |
response_text = str(response) | |
items_found = re.findall(r"\[ADD_ITEM: (.*?)\]", response_text) | |
if items_found: | |
for item in items_found: | |
if item not in inventory: | |
inventory.append(item) | |
response_text = re.sub(r"\[ADD_ITEM: (.*?)\]", "", response_text).strip() | |
if response_text.strip().startswith("[END]"): | |
final_message = response_text.strip().replace("[END]", "") | |
return StoryEnd(final_message=f"\n--- THE END ---\n{final_message}") | |
try: | |
story_section = response_text.split("STORY:")[1].split("CHOICES:")[0].strip() | |
choices_section = response_text.split("CHOICES:")[1].strip() | |
full_story_part = f"{story_section}\n\nChoices:\n{choices_section}" | |
except IndexError: | |
full_story_part = "The story continues... but the path is blurry." | |
await ctx.store.set("last_story_part", full_story_part) | |
await ctx.store.set("inventory", inventory) | |
# Return the simplified event, without the is_new_scene flag | |
return StoryContext(story_part=full_story_part, inventory=inventory) | |
def end_story(self, ev: StoryEnd) -> StopEvent: | |
"""This step satisfies the workflow validator by providing a path to a StopEvent.""" | |
return StopEvent(result=ev.final_message) | |
# These two steps are no longer needed, as Gradio's UI will handle the display logic. | |
# @step async def display_scene(...) | |
# @step async def get_user_choice(...) | |
# --- Gradio UI and Application Logic --- | |
async def run_turn(user_input, game_state): | |
# This function no longer needs the component objects passed in. | |
if game_state is None: | |
game_state = {'inventory': [], 'last_story_part': None} | |
event = StartEvent() | |
else: | |
event = UserChoice(choice=user_input) | |
workflow = StorytellerWorkflow() | |
ctx = Context(workflow=workflow) | |
await ctx.store.set("inventory", game_state['inventory']) | |
await ctx.store.set("last_story_part", game_state['last_story_part']) | |
result_event = await workflow.generate_story_part(event, ctx) | |
if isinstance(result_event, StoryEnd): | |
yield (None, result_event.final_message, "", None) | |
return | |
if isinstance(result_event, StoryContext): | |
narrative, choices_text = result_event.story_part.split("Choices:", 1) | |
# --- THIS IS THE NEW DEFENSIVE CODE --- | |
# It finds any choice number (like " 2." or " 3.") that has a space before it | |
# and replaces that space with a newline character. This forces them to be on separate lines. | |
choices_text = re.sub(r" (\d\.)", r"\n\1", choices_text) | |
story_display_text = f"{textwrap.fill(narrative, width=80)}\n\nChoices:{choices_text}" | |
new_game_state = { | |
'inventory': result_event.inventory, | |
'last_story_part': result_event.story_part | |
} | |
inventory_text = f"**Inventory:** {', '.join(new_game_state['inventory']) if new_game_state['inventory'] else 'Empty'}" | |
yield (None, story_display_text, inventory_text, new_game_state) | |
# 2. Generate the image. | |
image_path = None | |
if HF_TOKEN: | |
image_path = await generate_image(narrative, HF_TOKEN) | |
# 3. Yield a second, complete tuple with the new image path. | |
yield (image_path, story_display_text, inventory_text, new_game_state) | |
def create_demo(): | |
with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
game_state = gr.State(None) | |
gr.Markdown("# LlamaIndex Workflow: Dynamic Storyteller") | |
gr.Markdown("An AI-powered text adventure game where every scene can be illustrated by AI.") | |
with gr.Row(): | |
with gr.Column(scale=1): | |
image_display = gr.Image(label="Scene", interactive=False) | |
inventory_display = gr.Markdown("**Inventory:** Empty") | |
with gr.Column(scale=2): | |
story_display = gr.Textbox(label="Story", lines=15, interactive=False) | |
user_input = gr.Textbox(label="What do you do?", placeholder="Type your choice and press Enter...") | |
# The inputs and outputs lists should be simple. | |
inputs = [user_input, game_state] | |
outputs = [image_display, story_display, inventory_display, game_state] | |
user_input.submit( | |
fn=run_turn, | |
inputs=inputs, | |
outputs=outputs | |
) | |
load_inputs = [gr.State(None), game_state] | |
demo.load( | |
fn=run_turn, | |
inputs=load_inputs, | |
outputs=outputs | |
) | |
return demo | |
if __name__ == "__main__": | |
if not GROQ_API_KEY or not HF_TOKEN: | |
print("ERROR: API keys not found. Make sure to set GROQ_API_KEY and HF_TOKEN in your Hugging Face Space Secrets.") | |
else: | |
app = create_demo() | |
app.launch() |