Spaces:
Running
Running
fix: update get_user_state and set_user_state calls to be asynchronous
Browse files- src/agent/llm_graph.py +1 -1
- src/agent/runner.py +1 -1
- src/agent/tools.py +10 -10
src/agent/llm_graph.py
CHANGED
@@ -75,7 +75,7 @@ async def node_init_game(state: GraphState) -> GraphState:
|
|
75 |
|
76 |
async def node_player_step(state: GraphState) -> GraphState:
|
77 |
logger.debug("[Graph] node_player_step state: %s", state)
|
78 |
-
user_state = get_user_state(state.user_hash)
|
79 |
scene_id = user_state.current_scene_id
|
80 |
if state.choice_text:
|
81 |
await update_state_with_choice.ainvoke(
|
|
|
75 |
|
76 |
async def node_player_step(state: GraphState) -> GraphState:
|
77 |
logger.debug("[Graph] node_player_step state: %s", state)
|
78 |
+
user_state = await get_user_state(state.user_hash)
|
79 |
scene_id = user_state.current_scene_id
|
80 |
if state.choice_text:
|
81 |
await update_state_with_choice.ainvoke(
|
src/agent/runner.py
CHANGED
@@ -38,7 +38,7 @@ async def process_step(
|
|
38 |
|
39 |
final_state = await llm_game_graph.ainvoke(asdict(graph_state))
|
40 |
|
41 |
-
user_state: UserState = get_user_state(user_hash)
|
42 |
response: Dict = {}
|
43 |
|
44 |
ending = final_state.get("ending")
|
|
|
38 |
|
39 |
final_state = await llm_game_graph.ainvoke(asdict(graph_state))
|
40 |
|
41 |
+
user_state: UserState = await get_user_state(user_hash)
|
42 |
response: Dict = {}
|
43 |
|
44 |
ending = final_state.get("ending")
|
src/agent/tools.py
CHANGED
@@ -53,9 +53,9 @@ async def generate_story_frame(
|
|
53 |
character=character,
|
54 |
genre=genre,
|
55 |
)
|
56 |
-
state = get_user_state(user_hash)
|
57 |
state.story_frame = story_frame
|
58 |
-
set_user_state(user_hash, state)
|
59 |
return story_frame.dict()
|
60 |
|
61 |
|
@@ -65,7 +65,7 @@ async def generate_scene(
|
|
65 |
last_choice: Annotated[str, "Last user choice"],
|
66 |
) -> Annotated[Dict, "Generated scene"]:
|
67 |
"""Generate a new scene based on the current user state."""
|
68 |
-
state = get_user_state(user_hash)
|
69 |
if not state.story_frame:
|
70 |
return _err("Story frame not initialized")
|
71 |
llm = create_llm().with_structured_output(SceneLLM)
|
@@ -98,7 +98,7 @@ async def generate_scene(
|
|
98 |
)
|
99 |
state.current_scene_id = scene_id
|
100 |
state.scenes[scene_id] = scene
|
101 |
-
set_user_state(user_hash, state)
|
102 |
return scene.dict()
|
103 |
|
104 |
|
@@ -119,10 +119,10 @@ async def generate_scene_image(
|
|
119 |
# for now always modify the image to avoid the generating an update in a completely wrong style
|
120 |
else modify_image(current_image, change_scene.scene_description)
|
121 |
)
|
122 |
-
state = get_user_state(user_hash)
|
123 |
if scene_id in state.scenes:
|
124 |
state.scenes[scene_id].image = image_path
|
125 |
-
set_user_state(user_hash, state)
|
126 |
return image_path
|
127 |
except Exception as exc: # noqa: BLE001
|
128 |
return _err(str(exc))
|
@@ -137,7 +137,7 @@ async def update_state_with_choice(
|
|
137 |
"""Record the player's choice in the state."""
|
138 |
import datetime
|
139 |
|
140 |
-
state = get_user_state(user_hash)
|
141 |
state.user_choices.append(
|
142 |
UserChoice(
|
143 |
scene_id=scene_id,
|
@@ -145,7 +145,7 @@ async def update_state_with_choice(
|
|
145 |
timestamp=datetime.datetime.utcnow().isoformat(),
|
146 |
)
|
147 |
)
|
148 |
-
set_user_state(user_hash, state)
|
149 |
return state.dict()
|
150 |
|
151 |
|
@@ -154,7 +154,7 @@ async def check_ending(
|
|
154 |
user_hash: Annotated[str, "User session ID"],
|
155 |
) -> Annotated[Dict, "Ending check result"]:
|
156 |
"""Check whether an ending has been reached."""
|
157 |
-
state = get_user_state(user_hash)
|
158 |
if not state.story_frame:
|
159 |
return _err("No story frame")
|
160 |
llm = create_llm().with_structured_output(EndingCheckResult)
|
@@ -166,6 +166,6 @@ async def check_ending(
|
|
166 |
resp: EndingCheckResult = await llm.ainvoke(prompt)
|
167 |
if resp.ending_reached and resp.ending:
|
168 |
state.ending = resp.ending
|
169 |
-
set_user_state(user_hash, state)
|
170 |
return {"ending_reached": True, "ending": resp.ending.dict()}
|
171 |
return {"ending_reached": False}
|
|
|
53 |
character=character,
|
54 |
genre=genre,
|
55 |
)
|
56 |
+
state = await get_user_state(user_hash)
|
57 |
state.story_frame = story_frame
|
58 |
+
await set_user_state(user_hash, state)
|
59 |
return story_frame.dict()
|
60 |
|
61 |
|
|
|
65 |
last_choice: Annotated[str, "Last user choice"],
|
66 |
) -> Annotated[Dict, "Generated scene"]:
|
67 |
"""Generate a new scene based on the current user state."""
|
68 |
+
state = await get_user_state(user_hash)
|
69 |
if not state.story_frame:
|
70 |
return _err("Story frame not initialized")
|
71 |
llm = create_llm().with_structured_output(SceneLLM)
|
|
|
98 |
)
|
99 |
state.current_scene_id = scene_id
|
100 |
state.scenes[scene_id] = scene
|
101 |
+
await set_user_state(user_hash, state)
|
102 |
return scene.dict()
|
103 |
|
104 |
|
|
|
119 |
# for now always modify the image to avoid the generating an update in a completely wrong style
|
120 |
else modify_image(current_image, change_scene.scene_description)
|
121 |
)
|
122 |
+
state = await get_user_state(user_hash)
|
123 |
if scene_id in state.scenes:
|
124 |
state.scenes[scene_id].image = image_path
|
125 |
+
await set_user_state(user_hash, state)
|
126 |
return image_path
|
127 |
except Exception as exc: # noqa: BLE001
|
128 |
return _err(str(exc))
|
|
|
137 |
"""Record the player's choice in the state."""
|
138 |
import datetime
|
139 |
|
140 |
+
state = await get_user_state(user_hash)
|
141 |
state.user_choices.append(
|
142 |
UserChoice(
|
143 |
scene_id=scene_id,
|
|
|
145 |
timestamp=datetime.datetime.utcnow().isoformat(),
|
146 |
)
|
147 |
)
|
148 |
+
await set_user_state(user_hash, state)
|
149 |
return state.dict()
|
150 |
|
151 |
|
|
|
154 |
user_hash: Annotated[str, "User session ID"],
|
155 |
) -> Annotated[Dict, "Ending check result"]:
|
156 |
"""Check whether an ending has been reached."""
|
157 |
+
state = await get_user_state(user_hash)
|
158 |
if not state.story_frame:
|
159 |
return _err("No story frame")
|
160 |
llm = create_llm().with_structured_output(EndingCheckResult)
|
|
|
166 |
resp: EndingCheckResult = await llm.ainvoke(prompt)
|
167 |
if resp.ending_reached and resp.ending:
|
168 |
state.ending = resp.ending
|
169 |
+
await set_user_state(user_hash, state)
|
170 |
return {"ending_reached": True, "ending": resp.ending.dict()}
|
171 |
return {"ending_reached": False}
|