da03 commited on
Commit
eac6a5b
·
1 Parent(s): 7a54290
Files changed (1) hide show
  1. main.py +6 -5
main.py CHANGED
@@ -131,7 +131,7 @@ def load_initial_images(width, height):
131
  initial_images = []
132
  if DEBUG_TEACHER_FORCING:
133
  # Load the previous 7 frames for image_81
134
- for i in range(114-7, 114): # Load images 74-80
135
  img = Image.open(f"record_10003/image_{i}.png")#.resize((width, height))
136
  initial_images.append(np.array(img))
137
  else:
@@ -338,8 +338,8 @@ async def websocket_endpoint(websocket: WebSocket):
338
  'L + 0 4 7 3 : + 0 0 8 7', 'N + 0 1 0 9 : + 0 3 4 4',
339
  'N + 0 0 5 2 : + 0 1 9 4', 'N + 0 3 6 5 : + 0 2 3 2',
340
  'N + 0 3 8 9 : + 0 2 4 5', 'N + 0 0 2 0 : + 0 0 5 9',
341
- 'N + 0 4 7 3 : + 0 1 5 7', ]#'L + 0 1 9 1 : + 0 0 8 7',
342
- #'L + 0 1 9 1 : + 0 0 8 7', 'N + 0 3 4 3 : + 0 2 6 3', ]
343
  #'N + 0 2 0 5 : + 0 1 3 3']
344
  previous_actions = []
345
  for action in debug_actions[-8:]:
@@ -374,6 +374,7 @@ async def websocket_endpoint(websocket: WebSocket):
374
  'N + 0 2 9 0 : + 0 1 4 1', 'N + 0 4 0 2 : + 0 0 0 9',
375
  'N + 0 3 0 7 : + 0 3 3 2', 'N + 0 2 2 0 : + 0 3 7 1',
376
  'N + 0 0 8 2 : + 0 1 5 1']
 
377
  #positions = positions[:4]
378
  position = positions[0]
379
  positions = positions[1:]
@@ -417,13 +418,13 @@ async def websocket_endpoint(websocket: WebSocket):
417
 
418
  # Predict the next frame based on the previous frames and actions
419
  if DEBUG_TEACHER_FORCING:
420
- print ('predicting', f"record_10003/image_{114+len(previous_frames)}.png")
421
 
422
  next_frame, next_frame_append = predict_next_frame(previous_frames, previous_actions)
423
  # Load and append the corresponding ground truth image instead of model output
424
  print ('here4', len(previous_frames))
425
  if True and DEBUG_TEACHER_FORCING:
426
- img = Image.open(f"record_10003/image_{114+len(previous_frames)}.png")
427
  previous_frames.append(img)
428
  elif True:
429
  assert False
 
131
  initial_images = []
132
  if DEBUG_TEACHER_FORCING:
133
  # Load the previous 7 frames for image_81
134
+ for i in range(117-7, 117): # Load images 74-80
135
  img = Image.open(f"record_10003/image_{i}.png")#.resize((width, height))
136
  initial_images.append(np.array(img))
137
  else:
 
338
  'L + 0 4 7 3 : + 0 0 8 7', 'N + 0 1 0 9 : + 0 3 4 4',
339
  'N + 0 0 5 2 : + 0 1 9 4', 'N + 0 3 6 5 : + 0 2 3 2',
340
  'N + 0 3 8 9 : + 0 2 4 5', 'N + 0 0 2 0 : + 0 0 5 9',
341
+ 'N + 0 4 7 3 : + 0 1 5 7', 'L + 0 1 9 1 : + 0 0 8 7',
342
+ 'L + 0 1 9 1 : + 0 0 8 7', 'N + 0 3 4 3 : + 0 2 6 3', ]
343
  #'N + 0 2 0 5 : + 0 1 3 3']
344
  previous_actions = []
345
  for action in debug_actions[-8:]:
 
374
  'N + 0 2 9 0 : + 0 1 4 1', 'N + 0 4 0 2 : + 0 0 0 9',
375
  'N + 0 3 0 7 : + 0 3 3 2', 'N + 0 2 2 0 : + 0 3 7 1',
376
  'N + 0 0 8 2 : + 0 1 5 1']
377
+ positions = positions[3:]
378
  #positions = positions[:4]
379
  position = positions[0]
380
  positions = positions[1:]
 
418
 
419
  # Predict the next frame based on the previous frames and actions
420
  if DEBUG_TEACHER_FORCING:
421
+ print ('predicting', f"record_10003/image_{117+len(previous_frames)}.png")
422
 
423
  next_frame, next_frame_append = predict_next_frame(previous_frames, previous_actions)
424
  # Load and append the corresponding ground truth image instead of model output
425
  print ('here4', len(previous_frames))
426
  if True and DEBUG_TEACHER_FORCING:
427
+ img = Image.open(f"record_10003/image_{117+len(previous_frames)}.png")
428
  previous_frames.append(img)
429
  elif True:
430
  assert False