Dylan commited on
Commit
7d14b9f
·
1 Parent(s): 9bcff58

max concurrenct 1 fix

Browse files
Files changed (2) hide show
  1. agents.py +1 -1
  2. app.py +1 -1
agents.py CHANGED
@@ -108,7 +108,7 @@ def describe_with_voice(state: State) -> State:
108
  input_len = inputs["input_ids"].shape[-1]
109
 
110
  with torch.inference_mode():
111
- generation = model.generate(**inputs, max_new_tokens=1000, do_sample=False)
112
  generation = generation[0][input_len:]
113
 
114
  description = processor.decode(generation, skip_special_tokens=True)
 
108
  input_len = inputs["input_ids"].shape[-1]
109
 
110
  with torch.inference_mode():
111
+ generation = model.generate(**inputs, max_new_tokens=1000, do_sample=True, temperature=0.7)
112
  generation = generation[0][input_len:]
113
 
114
  description = processor.decode(generation, skip_special_tokens=True)
app.py CHANGED
@@ -13,7 +13,7 @@ def process_and_display(image, voice):
13
  state = {"image": image, "voice": voice, "caption": "", "description": ""}
14
 
15
  # Run the graph
16
- result = graph.invoke(state, max_concurrency=1)
17
 
18
  descriptions:list[str] = result["descriptions"]
19
  description = "\n---\n".join(descriptions)
 
13
  state = {"image": image, "voice": voice, "caption": "", "description": ""}
14
 
15
  # Run the graph
16
+ result = graph.invoke(state, {"max_concurrency" : 1})
17
 
18
  descriptions:list[str] = result["descriptions"]
19
  description = "\n---\n".join(descriptions)