Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -25,7 +25,6 @@ HF_TOKEN = os.environ.get("HF_TOKEN")
|
|
| 25 |
class StoryContext(Event):
|
| 26 |
story_part: str
|
| 27 |
inventory: list[str]
|
| 28 |
-
is_new_scene: bool
|
| 29 |
|
| 30 |
class SceneReadyEvent(Event):
|
| 31 |
pass
|
|
@@ -38,7 +37,7 @@ class StoryEnd(Event):
|
|
| 38 |
|
| 39 |
# Helper function to generate an image and return its path
|
| 40 |
async def generate_image(prompt: str, hf_token: str) -> str | None:
|
| 41 |
-
API_URL = "https://api-inference.huggingface.co/models/stabilityai/
|
| 42 |
headers = {"Authorization": f"Bearer {hf_token}"}
|
| 43 |
full_prompt = f"epic fantasy art, digital painting, cinematic lighting, masterpiece, {prompt}"
|
| 44 |
payload = {"inputs": full_prompt}
|
|
@@ -62,13 +61,15 @@ class StorytellerWorkflow(Workflow):
|
|
| 62 |
super().__init__(timeout=300, **kwargs)
|
| 63 |
|
| 64 |
@step
|
|
|
|
|
|
|
|
|
|
| 65 |
async def generate_story_part(self, ev: StartEvent | UserChoice, ctx: Context) -> StoryContext | StoryEnd:
|
| 66 |
inventory = await ctx.store.get("inventory", [])
|
| 67 |
prompt = ""
|
| 68 |
-
is_new_scene_flag = False
|
| 69 |
|
| 70 |
if isinstance(ev, StartEvent):
|
| 71 |
-
|
| 72 |
prompt = """
|
| 73 |
You are a creative text adventure game master. Your output is for a console game.
|
| 74 |
Start a new story about a curious explorer entering a recently discovered, glowing cave.
|
|
@@ -77,12 +78,14 @@ class StorytellerWorkflow(Workflow):
|
|
| 77 |
"""
|
| 78 |
elif isinstance(ev, UserChoice):
|
| 79 |
last_story_part = await ctx.store.get("last_story_part")
|
|
|
|
|
|
|
| 80 |
prompt = f"""
|
| 81 |
You are a creative text adventure game master.
|
| 82 |
The story so far: "{last_story_part}"
|
| 83 |
The player chose: "{ev.choice}"
|
| 84 |
The player's inventory: {inventory}
|
| 85 |
-
Continue the story.
|
| 86 |
If a choice results in an item, use `[ADD_ITEM: item name]`. If the story should end, write "[END]".
|
| 87 |
Format your response exactly like this: STORY: [The story text goes here] CHOICES: 1. [First choice] 2. [Second choice]
|
| 88 |
"""
|
|
@@ -91,9 +94,7 @@ class StorytellerWorkflow(Workflow):
|
|
| 91 |
response = await llm.acomplete(prompt)
|
| 92 |
response_text = str(response)
|
| 93 |
|
| 94 |
-
|
| 95 |
-
is_new_scene_flag = True
|
| 96 |
-
response_text = response_text.replace("[NEW_SCENE]", "").strip()
|
| 97 |
|
| 98 |
items_found = re.findall(r"\[ADD_ITEM: (.*?)\]", response_text)
|
| 99 |
if items_found:
|
|
@@ -115,9 +116,10 @@ class StorytellerWorkflow(Workflow):
|
|
| 115 |
|
| 116 |
await ctx.store.set("last_story_part", full_story_part)
|
| 117 |
await ctx.store.set("inventory", inventory)
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
|
|
|
| 121 |
@step
|
| 122 |
def end_story(self, ev: StoryEnd) -> StopEvent:
|
| 123 |
"""This step satisfies the workflow validator by providing a path to a StopEvent."""
|
|
@@ -134,56 +136,61 @@ class StorytellerWorkflow(Workflow):
|
|
| 134 |
# This is the final, correct version of the main application logic function.
|
| 135 |
# This is the final, correct version of the main application logic function.
|
| 136 |
|
|
|
|
|
|
|
| 137 |
async def run_turn(user_input, game_state):
|
| 138 |
-
# game_state is now a dictionary: {'inventory': [], 'last_story_part': None}
|
| 139 |
-
|
| 140 |
-
# On the first turn, initialize the game state
|
| 141 |
if game_state is None:
|
| 142 |
game_state = {'inventory': [], 'last_story_part': None}
|
| 143 |
event = StartEvent()
|
| 144 |
else:
|
| 145 |
-
# For subsequent turns, create a UserChoice event
|
| 146 |
event = UserChoice(choice=user_input)
|
| 147 |
|
| 148 |
-
# --- THIS IS THE CORRECTED LOGIC ---
|
| 149 |
-
# 1. Create a fresh workflow instance for this turn
|
| 150 |
workflow = StorytellerWorkflow()
|
| 151 |
-
|
| 152 |
-
# 2. Create a context object, PASSING THE WORKFLOW to it.
|
| 153 |
-
ctx = Context(workflow=workflow) # This is the fix.
|
| 154 |
-
|
| 155 |
-
# 3. Asynchronously set the values in the context's store
|
| 156 |
await ctx.store.set("inventory", game_state['inventory'])
|
| 157 |
await ctx.store.set("last_story_part", game_state['last_story_part'])
|
| 158 |
|
| 159 |
-
# 4. Call the specific step method directly, passing the populated context
|
| 160 |
result_event = await workflow.generate_story_part(event, ctx)
|
| 161 |
-
# ---
|
| 162 |
|
| 163 |
-
# Process the result from the step
|
| 164 |
if isinstance(result_event, StoryEnd):
|
| 165 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 166 |
|
| 167 |
if isinstance(result_event, StoryContext):
|
| 168 |
narrative, choices = result_event.story_part.split("Choices:", 1)
|
| 169 |
story_display = f"{textwrap.fill(narrative, width=80)}\n\nChoices:{choices}"
|
| 170 |
|
| 171 |
-
image_path = None
|
| 172 |
-
if result_event.is_new_scene and HF_TOKEN:
|
| 173 |
-
image_path = await generate_image(narrative, HF_TOKEN)
|
| 174 |
-
|
| 175 |
-
# Update the game state for the next turn
|
| 176 |
new_game_state = {
|
| 177 |
'inventory': result_event.inventory,
|
| 178 |
'last_story_part': result_event.story_part
|
| 179 |
}
|
| 180 |
inventory_text = f"**Inventory:** {', '.join(new_game_state['inventory']) if new_game_state['inventory'] else 'Empty'}"
|
| 181 |
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 187 |
def create_demo():
|
| 188 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 189 |
# State object to hold the workflow instance between turns
|
|
|
|
| 25 |
class StoryContext(Event):
|
| 26 |
story_part: str
|
| 27 |
inventory: list[str]
|
|
|
|
| 28 |
|
| 29 |
class SceneReadyEvent(Event):
|
| 30 |
pass
|
|
|
|
| 37 |
|
| 38 |
# Helper function to generate an image and return its path
|
| 39 |
async def generate_image(prompt: str, hf_token: str) -> str | None:
|
| 40 |
+
API_URL = "https://api-inference.huggingface.co/models/stabilityai/sd-turbo"
|
| 41 |
headers = {"Authorization": f"Bearer {hf_token}"}
|
| 42 |
full_prompt = f"epic fantasy art, digital painting, cinematic lighting, masterpiece, {prompt}"
|
| 43 |
payload = {"inputs": full_prompt}
|
|
|
|
| 61 |
super().__init__(timeout=300, **kwargs)
|
| 62 |
|
| 63 |
@step
|
| 64 |
+
# app.py
|
| 65 |
+
|
| 66 |
+
@step
|
| 67 |
async def generate_story_part(self, ev: StartEvent | UserChoice, ctx: Context) -> StoryContext | StoryEnd:
|
| 68 |
inventory = await ctx.store.get("inventory", [])
|
| 69 |
prompt = ""
|
|
|
|
| 70 |
|
| 71 |
if isinstance(ev, StartEvent):
|
| 72 |
+
# The prompt for the first turn remains the same.
|
| 73 |
prompt = """
|
| 74 |
You are a creative text adventure game master. Your output is for a console game.
|
| 75 |
Start a new story about a curious explorer entering a recently discovered, glowing cave.
|
|
|
|
| 78 |
"""
|
| 79 |
elif isinstance(ev, UserChoice):
|
| 80 |
last_story_part = await ctx.store.get("last_story_part")
|
| 81 |
+
# --- SIMPLIFIED PROMPT ---
|
| 82 |
+
# Removed the instruction about [NEW_SCENE]
|
| 83 |
prompt = f"""
|
| 84 |
You are a creative text adventure game master.
|
| 85 |
The story so far: "{last_story_part}"
|
| 86 |
The player chose: "{ev.choice}"
|
| 87 |
The player's inventory: {inventory}
|
| 88 |
+
Continue the story.
|
| 89 |
If a choice results in an item, use `[ADD_ITEM: item name]`. If the story should end, write "[END]".
|
| 90 |
Format your response exactly like this: STORY: [The story text goes here] CHOICES: 1. [First choice] 2. [Second choice]
|
| 91 |
"""
|
|
|
|
| 94 |
response = await llm.acomplete(prompt)
|
| 95 |
response_text = str(response)
|
| 96 |
|
| 97 |
+
# --- REMOVED THE [NEW_SCENE] CHECK ---
|
|
|
|
|
|
|
| 98 |
|
| 99 |
items_found = re.findall(r"\[ADD_ITEM: (.*?)\]", response_text)
|
| 100 |
if items_found:
|
|
|
|
| 116 |
|
| 117 |
await ctx.store.set("last_story_part", full_story_part)
|
| 118 |
await ctx.store.set("inventory", inventory)
|
| 119 |
+
|
| 120 |
+
# Return the simplified event, without the is_new_scene flag
|
| 121 |
+
return StoryContext(story_part=full_story_part, inventory=inventory)
|
| 122 |
+
|
| 123 |
@step
|
| 124 |
def end_story(self, ev: StoryEnd) -> StopEvent:
|
| 125 |
"""This step satisfies the workflow validator by providing a path to a StopEvent."""
|
|
|
|
| 136 |
# This is the final, correct version of the main application logic function.
|
| 137 |
# This is the final, correct version of the main application logic function.
|
| 138 |
|
| 139 |
+
# app.py
|
| 140 |
+
|
| 141 |
async def run_turn(user_input, game_state):
|
|
|
|
|
|
|
|
|
|
| 142 |
if game_state is None:
|
| 143 |
game_state = {'inventory': [], 'last_story_part': None}
|
| 144 |
event = StartEvent()
|
| 145 |
else:
|
|
|
|
| 146 |
event = UserChoice(choice=user_input)
|
| 147 |
|
|
|
|
|
|
|
| 148 |
workflow = StorytellerWorkflow()
|
| 149 |
+
ctx = Context(workflow=workflow)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 150 |
await ctx.store.set("inventory", game_state['inventory'])
|
| 151 |
await ctx.store.set("last_story_part", game_state['last_story_part'])
|
| 152 |
|
|
|
|
| 153 |
result_event = await workflow.generate_story_part(event, ctx)
|
|
|
|
| 154 |
|
|
|
|
| 155 |
if isinstance(result_event, StoryEnd):
|
| 156 |
+
yield {
|
| 157 |
+
image_display: None,
|
| 158 |
+
story_display: result_event.final_message,
|
| 159 |
+
inventory_display: "",
|
| 160 |
+
game_state: None
|
| 161 |
+
}
|
| 162 |
+
return
|
| 163 |
|
| 164 |
if isinstance(result_event, StoryContext):
|
| 165 |
narrative, choices = result_event.story_part.split("Choices:", 1)
|
| 166 |
story_display = f"{textwrap.fill(narrative, width=80)}\n\nChoices:{choices}"
|
| 167 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 168 |
new_game_state = {
|
| 169 |
'inventory': result_event.inventory,
|
| 170 |
'last_story_part': result_event.story_part
|
| 171 |
}
|
| 172 |
inventory_text = f"**Inventory:** {', '.join(new_game_state['inventory']) if new_game_state['inventory'] else 'Empty'}"
|
| 173 |
|
| 174 |
+
# Instantly yield the text
|
| 175 |
+
yield {
|
| 176 |
+
image_display: None,
|
| 177 |
+
story_display: story_display,
|
| 178 |
+
inventory_display: inventory_text,
|
| 179 |
+
game_state: new_game_state
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
# --- KEY CHANGE: ALWAYS GENERATE THE IMAGE ---
|
| 183 |
+
# Removed the `if result_event.is_new_scene` condition
|
| 184 |
+
image_path = None
|
| 185 |
+
if HF_TOKEN:
|
| 186 |
+
image_path = await generate_image(narrative, HF_TOKEN)
|
| 187 |
+
|
| 188 |
+
# Yield the final update with the image when it's ready
|
| 189 |
+
if image_path:
|
| 190 |
+
yield {
|
| 191 |
+
image_display: image_path
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
def create_demo():
|
| 195 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 196 |
# State object to hold the workflow instance between turns
|