da03 commited on
Commit
9309dab
·
1 Parent(s): b21a8cb
Files changed (1) hide show
  1. main.py +5 -1
main.py CHANGED
@@ -24,12 +24,14 @@ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
24
 
25
  DEBUG_MODE = False
26
  DEBUG_MODE_2 = True
27
- NUM_MAX_FRAMES = 2
28
 
29
  SCREEN_WIDTH = 512
30
  SCREEN_HEIGHT = 384
31
  NUM_SAMPLING_STEPS = 32
32
 
 
 
33
  with open('latent_stats.json', 'r') as f:
34
  latent_stats = json.load(f)
35
  DATA_NORMALIZATION = {'mean': torch.tensor(latent_stats['mean']).to(device), 'std': torch.tensor(latent_stats['std']).to(device)}
@@ -269,6 +271,8 @@ async def websocket_endpoint(websocket: WebSocket):
269
  inputs = prepare_model_inputs(previous_frame, hidden_states, x, y, is_right_click, is_left_click, list(keys_down), stoi, itos, frame_num)
270
  print(f"[{time.perf_counter():.3f}] Starting model inference...")
271
  previous_frame, sample_img, hidden_states, timing_info = await process_frame(model, inputs)
 
 
272
 
273
  timing_info['full_frame'] = time.perf_counter() - process_start_time
274
 
 
24
 
25
  DEBUG_MODE = False
26
  DEBUG_MODE_2 = True
27
+ NUM_MAX_FRAMES = 8
28
 
29
  SCREEN_WIDTH = 512
30
  SCREEN_HEIGHT = 384
31
  NUM_SAMPLING_STEPS = 32
32
 
33
+ print (f'setting: DEBUG_MODE: {DEBUG_MODE}, DEBUG_MODE_2: {DEBUG_MODE_2}, NUM_MAX_FRAMES: {NUM_MAX_FRAMES}, NUM_SAMPLING_STEPS: {NUM_SAMPLING_STEPS}')
34
+
35
  with open('latent_stats.json', 'r') as f:
36
  latent_stats = json.load(f)
37
  DATA_NORMALIZATION = {'mean': torch.tensor(latent_stats['mean']).to(device), 'std': torch.tensor(latent_stats['std']).to(device)}
 
271
  inputs = prepare_model_inputs(previous_frame, hidden_states, x, y, is_right_click, is_left_click, list(keys_down), stoi, itos, frame_num)
272
  print(f"[{time.perf_counter():.3f}] Starting model inference...")
273
  previous_frame, sample_img, hidden_states, timing_info = await process_frame(model, inputs)
274
+ print (f'aaa setting: DEBUG_MODE: {DEBUG_MODE}, DEBUG_MODE_2: {DEBUG_MODE_2}, NUM_MAX_FRAMES: {NUM_MAX_FRAMES}, NUM_SAMPLING_STEPS: {NUM_SAMPLING_STEPS}')
275
+
276
 
277
  timing_info['full_frame'] = time.perf_counter() - process_start_time
278