python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
import os
import sys
root_dir = os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/..")
sys.path.insert(0, root_dir)
import ai2thor.controller
c = ai2thor.controller.Controller(
scene="FloorPlan1_physics",
gridSize=0.25,
width=900,
height=900,
agentMode="arm",
fieldOfView=100,
agentControllerType="mid-level",
targetFrameRate=30,
fixedDeltaTime=0.02,
)
print(c.build_url())
c.step(
action="TeleportFull",
x=-1,
y=0.9009995460510254,
z=1,
rotation=dict(x=0, y=180, z=0),
horizon=0,
)
c.step(
action="MoveMidLevelArm",
disableRendering=False,
position=dict(x=0.01, y=0, z=0.01),
speed=2,
returnToStart=False,
handCameraSpace=False,
)
c.step(
action="MoveArmBase", disableRendering=False, y=0.9, speed=2, returnToStart=False
)
pose = {"x": -1.0, "y": 0.9009995460510254, "z": 1, "rotation": 135, "horizon": 0}
c.step(
action="TeleportFull",
x=pose["x"],
y=pose["y"],
z=pose["z"],
rotation=dict(x=0.0, y=pose["rotation"], z=0.0),
horizon=pose["horizon"],
)
actions = [
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.059573404, "y": 0.0, "z": 0.0281161666},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.8314809351552201,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.8018513467217335,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.7722217582882469,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.7425921698547602,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.7129625740138691,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.6833329855803827,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.6537033971468961,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295712, "y": 5.96046448e-08, "z": 0.0781169713},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295711979, "y": 0.0, "z": 0.098116833},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.029570736, "y": 1.1920929e-07, "z": 0.11811674},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.02957208, "y": 0.0, "z": 0.13811702},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295723379, "y": -1.1920929e-07, "z": 0.15811688000000002},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.02957233, "y": 1.1920929e-07, "z": 0.17811683099999998},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295722485, "y": 0.0, "z": 0.198116782},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.02957207, "y": -2.38418579e-07, "z": 0.2181169},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.029571943, "y": -1.1920929e-07, "z": 0.23811695300000002},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295718536, "y": -1.1920929e-07, "z": 0.258116919},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295715183, "y": -1.1920929e-07, "z": 0.278117019},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295714289, "y": -1.1920929e-07, "z": 0.298117208},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295709223, "y": -1.1920929e-07, "z": 0.31811716},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295703411, "y": 0.0, "z": 0.338116872},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295695812, "y": -2.38418579e-07, "z": 0.358116376},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295692533, "y": 0.0, "z": 0.378115761},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.6240738087134093,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5944442202799227,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.020431030000000003, "y": -5.96046448e-08, "z": 0.3481152},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.00956957, "y": -2.38418579e-07, "z": 0.398114669},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569496, "y": -1.1920929e-07, "z": 0.41811468},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569377, "y": -1.1920929e-07, "z": 0.43811484},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{"action": ""},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569332, "y": -1.1920929e-07, "z": 0.4581149},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569213, "y": -2.38418579e-07, "z": 0.478115},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5648146688834588,
"speed": 2,
"returnToStart": False,
},
{"action": ""},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5351850508203542,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5055554327572495,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.47592585913857194,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.47592585913857194,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569228, "y": -2.38418579e-07, "z": 0.498115051},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569168, "y": -2.38418579e-07, "z": 0.51811533},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569213, "y": -1.1920929e-07, "z": 0.5381154300000001},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{"action": ""},
{"action": "PickUpMidLevelHand"},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.575925859138572,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.059569168000000006, "y": -2.98023224e-07, "z": 0.508115649},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.07956888000000001, "y": -3.57627869e-07, "z": 0.5081153},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495688021, "y": -2.38418579e-07, "z": 0.558114934},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495688021, "y": -3.57627869e-07, "z": 0.5781150340000001},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495687425, "y": -2.38418579e-07, "z": 0.5981153},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495686531, "y": -2.38418579e-07, "z": 0.6181155},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495686829, "y": -2.38418579e-07, "z": 0.6381157000000001},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495687127, "y": -3.57627869e-07, "z": 0.6181159},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495687127, "y": -4.76837158e-07, "z": 0.638116169},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495688319, "y": 0.0, "z": 0.6181162},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495688021, "y": 0.0, "z": 0.6381164},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495688021, "y": -2.38418579e-07, "z": 0.6181165000000001},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5055554327572495,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5203702417888018,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5351850508203542,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.00043116810000000394, "y": -1.1920929e-07, "z": 0.5881169},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.020431060000000004, "y": -1.1920929e-07, "z": 0.5881164},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.0004312277000000017, "y": -2.38418579e-07, "z": 0.5881171},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.020431149000000003, "y": 0.0, "z": 0.588116944},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.0004310200000000042, "y": -2.38418579e-07, "z": 0.5881175},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.07956903, "y": -1.1920929e-07, "z": 0.588117361},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.059569168000000006, "y": -3.57627869e-07, "z": 0.5881177},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.039569217000000004, "y": 2.38418579e-07, "z": 0.588118434},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.019569176400000002, "y": -2.38418579e-07, "z": 0.588119},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.0004308938999999998, "y": -2.38418579e-07, "z": 0.5881196},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.020430815199999994, "y": -1.1920929e-07, "z": 0.5881202},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {
"x": -0.040430869999999994,
"y": -2.38418579e-07,
"z": 0.588120937,
},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.060430716999999995, "y": -1.1920929e-07, "z": 0.5881218},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.08043068299999999, "y": 0.0, "z": 0.588122368},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.10043055999999999, "y": 0.0, "z": 0.5881231},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.12043055600000001, "y": 0.0, "z": 0.5881235},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.140430626, "y": -2.38418579e-07, "z": 0.588124156},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.16043072600000002, "y": 0.0, "z": 0.588124752},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{},
]
counter = 0
for a in actions:
if a == {} or a == {"action": ""}:
continue
a["disableRendering"] = True
c.step(a)
| ai2thor-main | arm_test/arm_counter_30fps_simulate.py |
import os
import sys
root_dir = os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/..")
sys.path.insert(0, root_dir)
from arm_test.base import standard_pose, execute_actions
import arm_test.base
# pp
# inita
# rr
# mmlah 1 1
# telefull
# mmlah 0.5203709825292535 2 True
# pac
# mmla 0.01000303 -1.63912773e-06 0.558107364 2 armBase True
# pac
# mmlah 0.49074136446614885 2 True
actions = [
{
"action": "MoveArmBase",
"y": 0.5203709825292535,
"speed": 2,
"disableRendering": True,
"returnToStart": True,
},
{
"action": "MoveMidLevelArm",
"position": {"x": 0.01000303, "y": -1.63912773e-06, "z": 0.558107364},
"speed": 2,
"handCameraSpace": False,
"disableRendering": True,
"returnToStart": True,
},
]
standard_pose()
execute_actions(
actions, disableRendering=False, waitForFixedUpdate=False, returnToStart=True
)
event = arm_test.base.controller.step(
"MoveArmBase",
y=0.49074136446614885,
disableRendering=False,
returnToStart=True,
speed=2.0,
waitForFixedUpdate=False,
)
assert event.metadata["lastActionSuccess"], "MoveArmBase failed; arm is stuck"
| ai2thor-main | arm_test/arm_stuck_test_wait_frame.py |
import os
import sys
root_dir = os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/..")
sys.path.insert(0, root_dir)
import time
import random
from ai2thor.controller import Controller
c = Controller(
scene="FloorPlan1_physics",
gridSize=0.25,
width=900,
height=900,
agentMode="arm",
fieldOfView=100,
agentControllerType="mid-level",
targetFrameRate=30,
fixedDeltaTime=0.005,
)
print(c.build_url())
c.step(
action="TeleportFull",
x=-1,
y=0.9009995460510254,
z=1,
rotation=dict(x=0, y=180, z=0),
horizon=0,
)
c.step(
action="MoveMidLevelArm",
disableRendering=False,
position=dict(x=0.01, y=0, z=0.01),
speed=2,
returnToStart=False,
handCameraSpace=False,
)
c.step(
action="MoveArmBase", disableRendering=False, y=0.9, speed=2, returnToStart=False
)
pose = {"x": -1.0, "y": 0.9009995460510254, "z": 1, "rotation": 135, "horizon": 0}
c.step(
action="TeleportFull",
x=pose["x"],
y=pose["y"],
z=pose["z"],
rotation=dict(x=0.0, y=pose["rotation"], z=0.0),
horizon=pose["horizon"],
)
actions = [
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.059573404, "y": 0.0, "z": 0.0281161666},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.8314809351552201,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.8018513467217335,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.7722217582882469,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.7425921698547602,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.7129625740138691,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.6833329855803827,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.6537033971468961,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295712, "y": 5.96046448e-08, "z": 0.0781169713},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295711979, "y": 0.0, "z": 0.098116833},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.029570736, "y": 1.1920929e-07, "z": 0.11811674},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.02957208, "y": 0.0, "z": 0.13811702},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295723379, "y": -1.1920929e-07, "z": 0.15811688000000002},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.02957233, "y": 1.1920929e-07, "z": 0.17811683099999998},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295722485, "y": 0.0, "z": 0.198116782},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.02957207, "y": -2.38418579e-07, "z": 0.2181169},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.029571943, "y": -1.1920929e-07, "z": 0.23811695300000002},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295718536, "y": -1.1920929e-07, "z": 0.258116919},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295715183, "y": -1.1920929e-07, "z": 0.278117019},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295714289, "y": -1.1920929e-07, "z": 0.298117208},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295709223, "y": -1.1920929e-07, "z": 0.31811716},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295703411, "y": 0.0, "z": 0.338116872},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295695812, "y": -2.38418579e-07, "z": 0.358116376},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295692533, "y": 0.0, "z": 0.378115761},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.6240738087134093,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5944442202799227,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.020431030000000003, "y": -5.96046448e-08, "z": 0.3481152},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.00956957, "y": -2.38418579e-07, "z": 0.398114669},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569496, "y": -1.1920929e-07, "z": 0.41811468},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569377, "y": -1.1920929e-07, "z": 0.43811484},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{"action": ""},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569332, "y": -1.1920929e-07, "z": 0.4581149},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569213, "y": -2.38418579e-07, "z": 0.478115},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5648146688834588,
"speed": 2,
"returnToStart": False,
},
{"action": ""},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5351850508203542,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5055554327572495,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.47592585913857194,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.47592585913857194,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569228, "y": -2.38418579e-07, "z": 0.498115051},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569168, "y": -2.38418579e-07, "z": 0.51811533},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569213, "y": -1.1920929e-07, "z": 0.5381154300000001},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{"action": ""},
{"action": "PickUpMidLevelHand"},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.575925859138572,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.059569168000000006, "y": -2.98023224e-07, "z": 0.508115649},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.07956888000000001, "y": -3.57627869e-07, "z": 0.5081153},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495688021, "y": -2.38418579e-07, "z": 0.558114934},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495688021, "y": -3.57627869e-07, "z": 0.5781150340000001},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495687425, "y": -2.38418579e-07, "z": 0.5981153},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495686531, "y": -2.38418579e-07, "z": 0.6181155},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495686829, "y": -2.38418579e-07, "z": 0.6381157000000001},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495687127, "y": -3.57627869e-07, "z": 0.6181159},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495687127, "y": -4.76837158e-07, "z": 0.638116169},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495688319, "y": 0.0, "z": 0.6181162},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495688021, "y": 0.0, "z": 0.6381164},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495688021, "y": -2.38418579e-07, "z": 0.6181165000000001},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5055554327572495,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5203702417888018,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5351850508203542,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.00043116810000000394, "y": -1.1920929e-07, "z": 0.5881169},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.020431060000000004, "y": -1.1920929e-07, "z": 0.5881164},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.0004312277000000017, "y": -2.38418579e-07, "z": 0.5881171},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.020431149000000003, "y": 0.0, "z": 0.588116944},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.0004310200000000042, "y": -2.38418579e-07, "z": 0.5881175},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.07956903, "y": -1.1920929e-07, "z": 0.588117361},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.059569168000000006, "y": -3.57627869e-07, "z": 0.5881177},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.039569217000000004, "y": 2.38418579e-07, "z": 0.588118434},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.019569176400000002, "y": -2.38418579e-07, "z": 0.588119},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.0004308938999999998, "y": -2.38418579e-07, "z": 0.5881196},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.020430815199999994, "y": -1.1920929e-07, "z": 0.5881202},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {
"x": -0.040430869999999994,
"y": -2.38418579e-07,
"z": 0.588120937,
},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.060430716999999995, "y": -1.1920929e-07, "z": 0.5881218},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.08043068299999999, "y": 0.0, "z": 0.588122368},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.10043055999999999, "y": 0.0, "z": 0.5881231},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.12043055600000001, "y": 0.0, "z": 0.5881235},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.140430626, "y": -2.38418579e-07, "z": 0.588124156},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.16043072600000002, "y": 0.0, "z": 0.588124752},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{},
]
counter = 0
for a in actions:
if a == {} or a == {"action": ""}:
continue
c.step(a)
time.sleep(random.randint(0, 10) / float(100))
| ai2thor-main | arm_test/arm_counter_30fps_fixed_update_random_sleep.py |
import os
import sys
root_dir = os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/..")
sys.path.insert(0, root_dir)
from arm_test.base import standard_pose, execute_actions
import arm_test.base
# pp
# inita
# rr
# mmlah 1 1
# telefull
# mmlah 0.5203709825292535 2 True
# pac
# mmla 0.01000303 -1.63912773e-06 0.558107364 2 armBase True
# pac
# mmlah 0.49074136446614885 2 True
actions = [
{
"action": "MoveArmBase",
"y": 0.5203709825292535,
"speed": 2,
"disableRendering": True,
"returnToStart": True,
},
{
"action": "MoveMidLevelArm",
"position": {"x": 0.01000303, "y": -1.63912773e-06, "z": 0.558107364},
"speed": 2,
"handCameraSpace": False,
"disableRendering": True,
"returnToStart": True,
},
]
standard_pose()
execute_actions(actions, disableRendering=True, returnToStart=True)
event = arm_test.base.controller.step(
"MoveArmBase",
y=0.49074136446614885,
disableRendering=True,
returnToStart=True,
speed=2.0,
)
assert event.metadata["lastActionSuccess"], "MoveArmBase failed; arm is stuck"
| ai2thor-main | arm_test/arm_stuck_test.py |
import os
import sys
import argparse
import random
import time
import boto3
import getpass
import ai2thor.controller
import ai2thor.fifo_server
import uuid
import cv2
from tasks import _local_build_path
# ai2thor.controller.COMMIT_ID ='fd7cf8d59c5a01f5aadc7f9379b0f579e9139ace'
parser = argparse.ArgumentParser(description="Thor Arm Tester")
parser.add_argument("--record-video", action="store_true")
args = parser.parse_args()
controller = ai2thor.controller.Controller(
# port=8200, start_unity=False,
local_executable_path=_local_build_path(),
server_class=ai2thor.fifo_server.FifoServer,
scene="FloorPlan1_physics",
gridSize=0.25,
width=900,
height=900,
agentMode="arm",
# fastActionEmit=True,
# fieldOfView=100,
agentControllerType="mid-level",
)
def upload_video(data):
s3 = boto3.resource("s3")
acl = "public-read"
key = os.path.join(
sys.argv[0].split("/")[-1].split(".")[0], str(uuid.uuid4()) + ".webm"
)
print(
"Video is available at: https://ai2-thor-exproom-arm-test.s3-us-west-2.amazonaws.com/%s"
% key
)
metadata = dict(
test=sys.argv[0].split("/")[-1].split(".")[0],
build_url=controller.build_url()[0],
user=getpass.getuser(),
)
for k, v in controller.initialization_parameters.items():
metadata["aithorInit-%s" % k] = str(v)
s3.Object("ai2-thor-exproom-arm-test", key).put(
Body=data, ACL=acl, ContentType="video/mp4", Metadata=metadata
)
def write_video(frames):
if not args.record_video:
return
temp_file = (
str(time.time())
+ "-"
+ str(random.randint(0, 2 ** 32))
+ "-"
+ str(os.getpid())
+ ".webm"
)
video = cv2.VideoWriter(
temp_file,
cv2.VideoWriter_fourcc(*"VP80"),
30,
(frames[0].shape[1], frames[0].shape[0]),
)
for frame in frames:
# assumes that the frames are RGB images. CV2 uses BGR.
for i in range(10):
video.write(frame)
cv2.destroyAllWindows()
with open(temp_file, "rb") as f:
data = f.read()
os.unlink(temp_file)
upload_video(data)
def standard_pose():
controller.step(
action="TeleportFull",
x=-1,
y=0.9009995460510254,
z=1,
rotation=dict(x=0, y=180, z=0),
horizon=10,
)
controller.step("PausePhysicsAutoSim")
pose = {"x": -1.0, "y": 0.9009995460510254, "z": 1.0, "rotation": 0, "horizon": 10}
controller.step(
action="TeleportFull",
x=pose["x"],
y=pose["y"],
z=pose["z"],
rotation=dict(x=0.0, y=pose["rotation"], z=0.0),
horizon=pose["horizon"],
)
controller.step(
action="MoveMidLevelArm",
disableRendering=False,
position=dict(x=0.00, y=0, z=0.35),
speed=2,
returnToStart=False,
handCameraSpace=False,
)
controller.step(
action="MoveArmBase",
disableRendering=False,
y=0.8,
speed=2,
returnToStart=False,
)
def execute_actions(actions, **kwargs):
for a in actions:
if a == {} or a == {"action": ""}:
continue
for k, v in kwargs.items():
a[k] = v
controller.step(a)
print("success: %s" % controller.last_event.metadata["lastActionSuccess"])
print("return: %s" % controller.last_event.metadata["actionReturn"])
print(
"position: %s" % (controller.last_event.metadata["arm"]["handSphereCenter"])
)
for j in controller.last_event.metadata["arm"]["joints"]:
rot = " ".join(map(lambda x: str(j["rotation"][x]), ["x", "y", "z", "w"]))
print("%s %s" % (j["name"], rot))
# print("%s %s" % (j['name'], j['position']))
print(controller.last_event.metadata["arm"]["pickupableObjects"])
# frames.append(controller.last_event.cv2img)
# write_video(frames)
| ai2thor-main | arm_test/base.py |
import os
import sys
root_dir = os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/..")
sys.path.insert(0, root_dir)
from arm_test.base import standard_pose, execute_actions
actions = [
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.059573404, "y": 0.0, "z": 0.0281161666},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.8314809351552201,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.8018513467217335,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.7722217582882469,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.7425921698547602,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.7129625740138691,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.6833329855803827,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.6537033971468961,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295712, "y": 5.96046448e-08, "z": 0.0781169713},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295711979, "y": 0.0, "z": 0.098116833},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.029570736, "y": 1.1920929e-07, "z": 0.11811674},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.02957208, "y": 0.0, "z": 0.13811702},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295723379, "y": -1.1920929e-07, "z": 0.15811688000000002},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.02957233, "y": 1.1920929e-07, "z": 0.17811683099999998},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295722485, "y": 0.0, "z": 0.198116782},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.02957207, "y": -2.38418579e-07, "z": 0.2181169},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.029571943, "y": -1.1920929e-07, "z": 0.23811695300000002},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295718536, "y": -1.1920929e-07, "z": 0.258116919},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295715183, "y": -1.1920929e-07, "z": 0.278117019},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295714289, "y": -1.1920929e-07, "z": 0.298117208},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295709223, "y": -1.1920929e-07, "z": 0.31811716},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295703411, "y": 0.0, "z": 0.338116872},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295695812, "y": -2.38418579e-07, "z": 0.358116376},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295692533, "y": 0.0, "z": 0.378115761},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.6240738087134093,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5944442202799227,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.020431030000000003, "y": -5.96046448e-08, "z": 0.3481152},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.00956957, "y": -2.38418579e-07, "z": 0.398114669},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569496, "y": -1.1920929e-07, "z": 0.41811468},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569377, "y": -1.1920929e-07, "z": 0.43811484},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{"action": ""},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569332, "y": -1.1920929e-07, "z": 0.4581149},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569213, "y": -2.38418579e-07, "z": 0.478115},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5648146688834588,
"speed": 2,
"returnToStart": False,
},
{"action": ""},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5351850508203542,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5055554327572495,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.47592585913857194,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.47592585913857194,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569228, "y": -2.38418579e-07, "z": 0.498115051},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569168, "y": -2.38418579e-07, "z": 0.51811533},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569213, "y": -1.1920929e-07, "z": 0.5381154300000001},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{"action": ""},
{"action": "PickUpMidLevelHand"},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.575925859138572,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.059569168000000006, "y": -2.98023224e-07, "z": 0.508115649},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.07956888000000001, "y": -3.57627869e-07, "z": 0.5081153},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495688021, "y": -2.38418579e-07, "z": 0.558114934},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495688021, "y": -3.57627869e-07, "z": 0.5781150340000001},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495687425, "y": -2.38418579e-07, "z": 0.5981153},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495686531, "y": -2.38418579e-07, "z": 0.6181155},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495686829, "y": -2.38418579e-07, "z": 0.6381157000000001},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495687127, "y": -3.57627869e-07, "z": 0.6181159},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495687127, "y": -4.76837158e-07, "z": 0.638116169},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495688319, "y": 0.0, "z": 0.6181162},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495688021, "y": 0.0, "z": 0.6381164},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495688021, "y": -2.38418579e-07, "z": 0.6181165000000001},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5055554327572495,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5203702417888018,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5351850508203542,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.00043116810000000394, "y": -1.1920929e-07, "z": 0.5881169},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.020431060000000004, "y": -1.1920929e-07, "z": 0.5881164},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.0004312277000000017, "y": -2.38418579e-07, "z": 0.5881171},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.020431149000000003, "y": 0.0, "z": 0.588116944},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.0004310200000000042, "y": -2.38418579e-07, "z": 0.5881175},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.07956903, "y": -1.1920929e-07, "z": 0.588117361},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.059569168000000006, "y": -3.57627869e-07, "z": 0.5881177},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.039569217000000004, "y": 2.38418579e-07, "z": 0.588118434},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.019569176400000002, "y": -2.38418579e-07, "z": 0.588119},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.0004308938999999998, "y": -2.38418579e-07, "z": 0.5881196},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.020430815199999994, "y": -1.1920929e-07, "z": 0.5881202},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {
"x": -0.040430869999999994,
"y": -2.38418579e-07,
"z": 0.588120937,
},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.060430716999999995, "y": -1.1920929e-07, "z": 0.5881218},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.08043068299999999, "y": 0.0, "z": 0.588122368},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.10043055999999999, "y": 0.0, "z": 0.5881231},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.12043055600000001, "y": 0.0, "z": 0.5881235},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.140430626, "y": -2.38418579e-07, "z": 0.588124156},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.16043072600000002, "y": 0.0, "z": 0.588124752},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{},
]
standard_pose()
execute_actions(actions, disableRendering=True, returnToStart=True)
| ai2thor-main | arm_test/arm_counter_30fps_simulate_pause_return_start.py |
#!/usr/bin/env python
"""
Script that maintains the Private directory checkout - intended to be run
immediately after switching branches in the parent ai2thor project
"""
import os
import subprocess
private_repo_url = "https://github.com/allenai/ai2thor-private"
base_dir = os.path.normpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")
)
private_dir = os.path.join(base_dir, "unity", "Assets", "Private")
def current_branch():
git_dir = os.path.join(base_dir, ".git")
return (
subprocess.check_output(
f"git --git-dir={git_dir} rev-parse --abbrev-ref HEAD", shell=True
)
.decode("ascii")
.strip()
)
def checkout_branch(remote="origin"):
if not os.path.isdir(private_dir):
subprocess.check_call(f"git clone {private_repo_url} {private_dir}", shell=True)
cwd = os.getcwd()
os.chdir(private_dir)
branch = current_branch()
try:
print(f"Trying to checkout {private_dir} -> {branch}")
subprocess.check_call(f"git fetch {remote} {branch}", shell=True)
subprocess.check_call(f"git checkout {branch}", shell=True)
subprocess.check_call(f"git pull {remote} {branch}", shell=True)
except subprocess.CalledProcessError as e:
print(f"No branch exists for private: {branch} - remaining on main")
subprocess.check_call(f"git fetch {remote} main", shell=True)
subprocess.check_call(f"git checkout main", shell=True)
subprocess.check_call(f"git pull {remote} main", shell=True)
os.chdir(cwd)
if __name__ == "__main__":
if not os.path.isdir(private_dir) and os.path.exists(private_dir):
raise Exception(
f"Private directory {private_dir} is not a directory - please remove"
)
else:
checkout_branch()
| ai2thor-main | scripts/update_private.py |
import os
from pathlib import Path
ABS_PATH_OF_TOP_LEVEL_DIR = os.path.abspath(os.path.dirname(Path(__file__)))
ABS_PATH_OF_DOCS_DIR = os.path.join(ABS_PATH_OF_TOP_LEVEL_DIR, "docs")
| ask4help-main | constants.py |
#!/usr/bin/env python3
"""Entry point to training/validating/testing for a user given experiment
name."""
import allenact.main
if __name__ == "__main__":
allenact.main.main()
| ask4help-main | main.py |
ask4help-main | projects/__init__.py |
|
ask4help-main | projects/gym_baselines/__init__.py |
|
from abc import ABC
from typing import Dict, Sequence, Optional, List, Any
from allenact.base_abstractions.experiment_config import ExperimentConfig
from allenact.base_abstractions.sensor import Sensor
class GymBaseConfig(ExperimentConfig, ABC):
SENSORS: Optional[Sequence[Sensor]] = None
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
raise NotImplementedError
def train_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
return self._get_sampler_args(
process_ind=process_ind, mode="train", seeds=seeds
)
def valid_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
return self._get_sampler_args(
process_ind=process_ind, mode="valid", seeds=seeds
)
def test_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
return self._get_sampler_args(process_ind=process_ind, mode="test", seeds=seeds)
| ask4help-main | projects/gym_baselines/experiments/gym_base.py |
from abc import ABC
from typing import Dict, Any
from allenact.utils.viz_utils import VizSuite, AgentViewViz
from projects.gym_baselines.experiments.gym_base import GymBaseConfig
class GymHumanoidBaseConfig(GymBaseConfig, ABC):
@classmethod
def machine_params(cls, mode="train", **kwargs) -> Dict[str, Any]:
visualizer = None
if mode == "test":
visualizer = VizSuite(
mode=mode,
video_viz=AgentViewViz(
label="episode_vid",
max_clip_length=400,
vector_task_source=("render", {"mode": "rgb_array"}),
fps=30,
),
)
return {
"nprocesses": 8 if mode == "train" else 1, # rollout
"devices": [],
"visualizer": visualizer,
}
| ask4help-main | projects/gym_baselines/experiments/gym_humanoid_base.py |
from abc import ABC
from typing import Dict, Any
from allenact.utils.viz_utils import VizSuite, AgentViewViz
from projects.gym_baselines.experiments.gym_base import GymBaseConfig
class GymMoJoCoBaseConfig(GymBaseConfig, ABC):
@classmethod
def machine_params(cls, mode="train", **kwargs) -> Dict[str, Any]:
visualizer = None
if mode == "test":
visualizer = VizSuite(
mode=mode,
video_viz=AgentViewViz(
label="episode_vid",
max_clip_length=400,
vector_task_source=("render", {"mode": "rgb_array"}),
fps=30,
),
)
return {
"nprocesses": 8 if mode == "train" else 1, # rollout
"devices": [],
"visualizer": visualizer,
}
| ask4help-main | projects/gym_baselines/experiments/gym_mujoco_base.py |
from abc import ABC
from typing import cast
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses.ppo import PPO
from allenact.utils.experiment_utils import (
TrainingPipeline,
Builder,
PipelineStage,
LinearDecay,
)
from projects.gym_baselines.experiments.gym_humanoid_base import GymHumanoidBaseConfig
class GymHumanoidPPOConfig(GymHumanoidBaseConfig, ABC):
@classmethod
def training_pipeline(cls, **kwargs) -> TrainingPipeline:
lr = 1e-4
ppo_steps = int(8e7) # convergence may be after 1e8
clip_param = 0.1
value_loss_coef = 0.5
entropy_coef = 0.0
num_mini_batch = 4 # optimal 64
update_repeats = 10
max_grad_norm = 0.5
num_steps = 2048
gamma = 0.99
use_gae = True
gae_lambda = 0.95
advance_scene_rollout_period = None
save_interval = 200000
metric_accumulate_interval = 50000
return TrainingPipeline(
named_losses=dict(
ppo_loss=PPO(
clip_param=clip_param,
value_loss_coef=value_loss_coef,
entropy_coef=entropy_coef,
),
), # type:ignore
pipeline_stages=[
PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps),
],
optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam), dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=advance_scene_rollout_period,
save_interval=save_interval,
metric_accumulate_interval=metric_accumulate_interval,
lr_scheduler_builder=Builder(
LambdaLR,
{
"lr_lambda": LinearDecay(steps=ppo_steps, startp=1, endp=1)
}, # constant learning rate
),
)
| ask4help-main | projects/gym_baselines/experiments/gym_humanoid_ddppo.py |
ask4help-main | projects/gym_baselines/experiments/__init__.py |
|
from abc import ABC
from typing import cast
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses.ppo import PPO
from allenact.utils.experiment_utils import (
TrainingPipeline,
Builder,
PipelineStage,
LinearDecay,
)
from projects.gym_baselines.experiments.gym_mujoco_base import GymMoJoCoBaseConfig
class GymMuJoCoPPOConfig(GymMoJoCoBaseConfig, ABC):
@classmethod
def training_pipeline(cls, **kwargs) -> TrainingPipeline:
lr = 3e-4
ppo_steps = int(3e7)
clip_param = 0.2
value_loss_coef = 0.5
entropy_coef = 0.0
num_mini_batch = 4 # optimal 64
update_repeats = 10
max_grad_norm = 0.5
num_steps = 2048
gamma = 0.99
use_gae = True
gae_lambda = 0.95
advance_scene_rollout_period = None
save_interval = 200000
metric_accumulate_interval = 50000
return TrainingPipeline(
named_losses=dict(
ppo_loss=PPO(
clip_param=clip_param,
value_loss_coef=value_loss_coef,
entropy_coef=entropy_coef,
),
), # type:ignore
pipeline_stages=[
PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps),
],
optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam), dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=advance_scene_rollout_period,
save_interval=save_interval,
metric_accumulate_interval=metric_accumulate_interval,
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps, startp=1, endp=0)},
),
)
| ask4help-main | projects/gym_baselines/experiments/gym_mujoco_ddppo.py |
from typing import Dict, List, Any
import gym
import torch.nn as nn
from allenact.base_abstractions.experiment_config import TaskSampler
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.gym_plugin.gym_models import MemorylessActorCritic
from allenact_plugins.gym_plugin.gym_sensors import GymMuJoCoSensor
from allenact_plugins.gym_plugin.gym_tasks import GymTaskSampler
from projects.gym_baselines.experiments.gym_mujoco_ddppo import GymMuJoCoPPOConfig
class GymMuJoCoSwimmerConfig(GymMuJoCoPPOConfig):
SENSORS = [
GymMuJoCoSensor(gym_env_name="Swimmer-v2", uuid="gym_mujoco_data"),
]
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
"""We define our `ActorCriticModel` agent using a lightweight
implementation with separate MLPs for actors and critic,
MemorylessActorCritic.
Since this is a model for continuous control, note that the
superclass of our model is `ActorCriticModel[GaussianDistr]`
instead of `ActorCriticModel[CategoricalDistr]`, since we'll use
a Gaussian distribution to sample actions.
"""
action_space = gym.spaces.Box(-1.0, 1.0, (2,), "float32")
return MemorylessActorCritic(
input_uuid="gym_mujoco_data",
action_space=action_space, # specific action_space
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
action_std=0.5,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return GymTaskSampler(gym_env_type="Swimmer-v2", **kwargs)
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
"""Generate initialization arguments for train, valid, and test
TaskSamplers.
# Parameters
process_ind : index of the current task sampler
mode: one of `train`, `valid`, or `test`
"""
if mode == "train":
max_tasks = None # infinite training tasks
task_seeds_list = None # no predefined random seeds for training
deterministic_sampling = False # randomly sample tasks in training
else:
max_tasks = 4
# one seed for each task to sample:
# - ensures different seeds for each sampler, and
# - ensures a deterministic set of sampled tasks.
task_seeds_list = list(
range(process_ind * max_tasks, (process_ind + 1) * max_tasks)
)
deterministic_sampling = (
True # deterministically sample task in validation/testing
)
return dict(
gym_env_types=["Swimmer-v2"],
sensors=self.SENSORS, # sensors used to return observations to the agent
max_tasks=max_tasks, # see above
task_seeds_list=task_seeds_list, # see above
deterministic_sampling=deterministic_sampling, # see above
seed=seeds[process_ind],
)
@classmethod
def tag(cls) -> str:
return "Gym-MuJoCo-Swimmer-v2-PPO"
| ask4help-main | projects/gym_baselines/experiments/mujoco/gym_mujoco_swimmer_ddppo.py |
from typing import Dict, List, Any
import gym
import torch.nn as nn
from allenact.base_abstractions.experiment_config import TaskSampler
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.gym_plugin.gym_models import MemorylessActorCritic
from allenact_plugins.gym_plugin.gym_sensors import GymMuJoCoSensor
from allenact_plugins.gym_plugin.gym_tasks import GymTaskSampler
from projects.gym_baselines.experiments.gym_mujoco_ddppo import GymMuJoCoPPOConfig
class GymMuJoCoReacherConfig(GymMuJoCoPPOConfig):
SENSORS = [
GymMuJoCoSensor(gym_env_name="Reacher-v2", uuid="gym_mujoco_data"),
]
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
"""We define our `ActorCriticModel` agent using a lightweight
implementation with separate MLPs for actors and critic,
MemorylessActorCritic.
Since this is a model for continuous control, note that the
superclass of our model is `ActorCriticModel[GaussianDistr]`
instead of `ActorCriticModel[CategoricalDistr]`, since we'll use
a Gaussian distribution to sample actions.
"""
action_space = gym.spaces.Box(-1.0, 1.0, (2,), "float32")
return MemorylessActorCritic(
input_uuid="gym_mujoco_data",
action_space=action_space, # specific action_space
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
action_std=0.5,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return GymTaskSampler(gym_env_type="Reacher-v2", **kwargs)
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
"""Generate initialization arguments for train, valid, and test
TaskSamplers.
# Parameters
process_ind : index of the current task sampler
mode: one of `train`, `valid`, or `test`
"""
if mode == "train":
max_tasks = None # infinite training tasks
task_seeds_list = None # no predefined random seeds for training
deterministic_sampling = False # randomly sample tasks in training
else:
max_tasks = 4
# one seed for each task to sample:
# - ensures different seeds for each sampler, and
# - ensures a deterministic set of sampled tasks.
task_seeds_list = list(
range(process_ind * max_tasks, (process_ind + 1) * max_tasks)
)
deterministic_sampling = (
True # deterministically sample task in validation/testing
)
return dict(
gym_env_types=["Reacher-v2"],
sensors=self.SENSORS, # sensors used to return observations to the agent
max_tasks=max_tasks, # see above
task_seeds_list=task_seeds_list, # see above
deterministic_sampling=deterministic_sampling, # see above
seed=seeds[process_ind],
)
@classmethod
def tag(cls) -> str:
return "Gym-MuJoCo-Reacher-v2-PPO"
| ask4help-main | projects/gym_baselines/experiments/mujoco/gym_mujoco_reacher_ddppo.py |
from typing import Dict, List, Any
import gym
import torch.nn as nn
from allenact.base_abstractions.experiment_config import TaskSampler
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.gym_plugin.gym_models import MemorylessActorCritic
from allenact_plugins.gym_plugin.gym_sensors import GymMuJoCoSensor
from allenact_plugins.gym_plugin.gym_tasks import GymTaskSampler
from projects.gym_baselines.experiments.gym_mujoco_ddppo import GymMuJoCoPPOConfig
class GymMuJoCoWalkerConfig(GymMuJoCoPPOConfig):
SENSORS = [
GymMuJoCoSensor(gym_env_name="Walker2d-v2", uuid="gym_mujoco_data"),
]
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
"""We define our `ActorCriticModel` agent using a lightweight
implementation with separate MLPs for actors and critic,
MemorylessActorCritic.
Since this is a model for continuous control, note that the
superclass of our model is `ActorCriticModel[GaussianDistr]`
instead of `ActorCriticModel[CategoricalDistr]`, since we'll use
a Gaussian distribution to sample actions.
"""
action_space = gym.spaces.Box(-1.0, 1.0, (6,), "float32")
return MemorylessActorCritic(
input_uuid="gym_mujoco_data",
action_space=action_space, # specific action_space
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
action_std=0.5,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return GymTaskSampler(gym_env_type="Walker2d-v2", **kwargs)
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
"""Generate initialization arguments for train, valid, and test
TaskSamplers.
# Parameters
process_ind : index of the current task sampler
mode: one of `train`, `valid`, or `test`
"""
if mode == "train":
max_tasks = None # infinite training tasks
task_seeds_list = None # no predefined random seeds for training
deterministic_sampling = False # randomly sample tasks in training
else:
max_tasks = 4
# one seed for each task to sample:
# - ensures different seeds for each sampler, and
# - ensures a deterministic set of sampled tasks.
task_seeds_list = list(
range(process_ind * max_tasks, (process_ind + 1) * max_tasks)
)
deterministic_sampling = (
True # deterministically sample task in validation/testing
)
return dict(
gym_env_types=["Walker2d-v2"],
sensors=self.SENSORS, # sensors used to return observations to the agent
max_tasks=max_tasks, # see above
task_seeds_list=task_seeds_list, # see above
deterministic_sampling=deterministic_sampling, # see above
seed=seeds[process_ind],
)
@classmethod
def tag(cls) -> str:
return "Gym-MuJoCo-Walker2d-v2-PPO"
| ask4help-main | projects/gym_baselines/experiments/mujoco/gym_mujoco_walker2d_ddppo.py |
ask4help-main | projects/gym_baselines/experiments/mujoco/__init__.py |
|
from typing import Dict, List, Any
import gym
import torch.nn as nn
from allenact.base_abstractions.experiment_config import TaskSampler
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.gym_plugin.gym_models import MemorylessActorCritic
from allenact_plugins.gym_plugin.gym_sensors import GymMuJoCoSensor
from allenact_plugins.gym_plugin.gym_tasks import GymTaskSampler
from projects.gym_baselines.experiments.gym_mujoco_ddppo import GymMuJoCoPPOConfig
class GymMuJoCoHalfCheetahConfig(GymMuJoCoPPOConfig):
SENSORS = [
GymMuJoCoSensor(gym_env_name="HalfCheetah-v2", uuid="gym_mujoco_data"),
]
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
"""We define our `ActorCriticModel` agent using a lightweight
implementation with separate MLPs for actors and critic,
MemorylessActorCritic.
Since this is a model for continuous control, note that the
superclass of our model is `ActorCriticModel[GaussianDistr]`
instead of `ActorCriticModel[CategoricalDistr]`, since we'll use
a Gaussian distribution to sample actions.
"""
action_space = gym.spaces.Box(-1.0, 1.0, (6,), "float32")
return MemorylessActorCritic(
input_uuid="gym_mujoco_data",
action_space=action_space, # specific action_space
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
action_std=0.5,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return GymTaskSampler(gym_env_type="HalfCheetah-v2", **kwargs)
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
"""Generate initialization arguments for train, valid, and test
TaskSamplers.
# Parameters
process_ind : index of the current task sampler
mode: one of `train`, `valid`, or `test`
"""
if mode == "train":
max_tasks = None # infinite training tasks
task_seeds_list = None # no predefined random seeds for training
deterministic_sampling = False # randomly sample tasks in training
else:
max_tasks = 4
# one seed for each task to sample:
# - ensures different seeds for each sampler, and
# - ensures a deterministic set of sampled tasks.
task_seeds_list = list(
range(process_ind * max_tasks, (process_ind + 1) * max_tasks)
)
deterministic_sampling = (
True # deterministically sample task in validation/testing
)
return dict(
gym_env_types=["HalfCheetah-v2"],
sensors=self.SENSORS, # sensors used to return observations to the agent
max_tasks=max_tasks, # see above
task_seeds_list=task_seeds_list, # see above
deterministic_sampling=deterministic_sampling, # see above
seed=seeds[process_ind],
)
@classmethod
def tag(cls) -> str:
return "Gym-MuJoCo-HalfCheetah-v2-PPO"
| ask4help-main | projects/gym_baselines/experiments/mujoco/gym_mujoco_halfcheetah_ddppo.py |
from typing import Dict, List, Any
import gym
import torch.nn as nn
from allenact.base_abstractions.experiment_config import TaskSampler
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.gym_plugin.gym_models import MemorylessActorCritic
from allenact_plugins.gym_plugin.gym_sensors import GymMuJoCoSensor
from allenact_plugins.gym_plugin.gym_tasks import GymTaskSampler
from projects.gym_baselines.experiments.gym_humanoid_ddppo import GymHumanoidPPOConfig
class GymMuJoCoHumanoidConfig(GymHumanoidPPOConfig):
SENSORS = [
GymMuJoCoSensor(gym_env_name="Humanoid-v2", uuid="gym_mujoco_data"),
]
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
"""We define our `ActorCriticModel` agent using a lightweight
implementation with separate MLPs for actors and critic,
MemorylessActorCritic.
Since this is a model for continuous control, note that the
superclass of our model is `ActorCriticModel[GaussianDistr]`
instead of `ActorCriticModel[CategoricalDistr]`, since we'll use
a Gaussian distribution to sample actions.
"""
action_space = gym.spaces.Box(
-0.4000000059604645, 0.4000000059604645, (17,), "float32"
)
return MemorylessActorCritic(
input_uuid="gym_mujoco_data",
action_space=action_space, # specific action_space
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
action_std=0.5,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return GymTaskSampler(gym_env_type="Humanoid-v2", **kwargs)
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
"""Generate initialization arguments for train, valid, and test
TaskSamplers.
# Parameters
process_ind : index of the current task sampler
mode: one of `train`, `valid`, or `test`
"""
if mode == "train":
max_tasks = None # infinite training tasks
task_seeds_list = None # no predefined random seeds for training
deterministic_sampling = False # randomly sample tasks in training
else:
max_tasks = 4
# one seed for each task to sample:
# - ensures different seeds for each sampler, and
# - ensures a deterministic set of sampled tasks.
task_seeds_list = list(
range(process_ind * max_tasks, (process_ind + 1) * max_tasks)
)
deterministic_sampling = (
True # deterministically sample task in validation/testing
)
return dict(
gym_env_types=["Humanoid-v2"],
sensors=self.SENSORS, # sensors used to return observations to the agent
max_tasks=max_tasks, # see above
task_seeds_list=task_seeds_list, # see above
deterministic_sampling=deterministic_sampling, # see above
seed=seeds[process_ind],
)
@classmethod
def tag(cls) -> str:
return "Gym-MuJoCo-Humanoid-v2-PPO"
| ask4help-main | projects/gym_baselines/experiments/mujoco/gym_mujoco_humanoid_ddppo.py |
from typing import Dict, List, Any
import gym
import torch.nn as nn
from allenact.base_abstractions.experiment_config import TaskSampler
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.gym_plugin.gym_models import MemorylessActorCritic
from allenact_plugins.gym_plugin.gym_sensors import GymMuJoCoSensor
from allenact_plugins.gym_plugin.gym_tasks import GymTaskSampler
from projects.gym_baselines.experiments.gym_mujoco_ddppo import GymMuJoCoPPOConfig
class GymMuJoInvertedDoublePendulumConfig(GymMuJoCoPPOConfig):
SENSORS = [
GymMuJoCoSensor(
gym_env_name="InvertedDoublePendulum-v2", uuid="gym_mujoco_data"
),
]
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
"""We define our `ActorCriticModel` agent using a lightweight
implementation with separate MLPs for actors and critic,
MemorylessActorCritic.
Since this is a model for continuous control, note that the
superclass of our model is `ActorCriticModel[GaussianDistr]`
instead of `ActorCriticModel[CategoricalDistr]`, since we'll use
a Gaussian distribution to sample actions.
"""
action_space = gym.spaces.Box(-1.0, 1.0, (1,), "float32")
return MemorylessActorCritic(
input_uuid="gym_mujoco_data",
action_space=action_space, # specific action_space
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
action_std=0.5,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return GymTaskSampler(gym_env_type="InvertedDoublePendulum-v2", **kwargs)
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
"""Generate initialization arguments for train, valid, and test
TaskSamplers.
# Parameters
process_ind : index of the current task sampler
mode: one of `train`, `valid`, or `test`
"""
if mode == "train":
max_tasks = None # infinite training tasks
task_seeds_list = None # no predefined random seeds for training
deterministic_sampling = False # randomly sample tasks in training
else:
max_tasks = 4
# one seed for each task to sample:
# - ensures different seeds for each sampler, and
# - ensures a deterministic set of sampled tasks.
task_seeds_list = list(
range(process_ind * max_tasks, (process_ind + 1) * max_tasks)
)
deterministic_sampling = (
True # deterministically sample task in validation/testing
)
return dict(
gym_env_types=["InvertedDoublePendulum-v2"],
sensors=self.SENSORS, # sensors used to return observations to the agent
max_tasks=max_tasks, # see above
task_seeds_list=task_seeds_list, # see above
deterministic_sampling=deterministic_sampling, # see above
seed=seeds[process_ind],
)
@classmethod
def tag(cls) -> str:
return "Gym-MuJoCo-InvertedDoublePendulum-v2-PPO"
| ask4help-main | projects/gym_baselines/experiments/mujoco/gym_mujoco_inverteddoublependulum_ddppo.py |
from typing import Dict, List, Any
import gym
import torch.nn as nn
from allenact.base_abstractions.experiment_config import TaskSampler
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.gym_plugin.gym_models import MemorylessActorCritic
from allenact_plugins.gym_plugin.gym_sensors import GymMuJoCoSensor
from allenact_plugins.gym_plugin.gym_tasks import GymTaskSampler
from projects.gym_baselines.experiments.gym_mujoco_ddppo import GymMuJoCoPPOConfig
class GymMuJoCoAntConfig(GymMuJoCoPPOConfig):
SENSORS = [
GymMuJoCoSensor(gym_env_name="Ant-v2", uuid="gym_mujoco_data"),
]
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
"""We define our `ActorCriticModel` agent using a lightweight
implementation with separate MLPs for actors and critic,
MemorylessActorCritic.
Since this is a model for continuous control, note that the
superclass of our model is `ActorCriticModel[GaussianDistr]`
instead of `ActorCriticModel[CategoricalDistr]`, since we'll use
a Gaussian distribution to sample actions.
"""
action_space = gym.spaces.Box(-3.0, 3.0, (8,), "float32")
return MemorylessActorCritic(
input_uuid="gym_mujoco_data",
action_space=action_space, # specific action_space
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
action_std=0.5,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return GymTaskSampler(gym_env_type="Ant-v2", **kwargs)
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
"""Generate initialization arguments for train, valid, and test
TaskSamplers.
# Parameters
process_ind : index of the current task sampler
mode: one of `train`, `valid`, or `test`
"""
if mode == "train":
max_tasks = None # infinite training tasks
task_seeds_list = None # no predefined random seeds for training
deterministic_sampling = False # randomly sample tasks in training
else:
max_tasks = 4
# one seed for each task to sample:
# - ensures different seeds for each sampler, and
# - ensures a deterministic set of sampled tasks.
task_seeds_list = list(
range(process_ind * max_tasks, (process_ind + 1) * max_tasks)
)
deterministic_sampling = (
True # deterministically sample task in validation/testing
)
return dict(
gym_env_types=["Ant-v2"],
sensors=self.SENSORS, # sensors used to return observations to the agent
max_tasks=max_tasks, # see above
task_seeds_list=task_seeds_list, # see above
deterministic_sampling=deterministic_sampling, # see above
seed=seeds[process_ind],
)
@classmethod
def tag(cls) -> str:
return "Gym-MuJoCo-Ant-v2-PPO"
| ask4help-main | projects/gym_baselines/experiments/mujoco/gym_mujoco_ant_ddppo.py |
from typing import Dict, List, Any
import gym
import torch.nn as nn
from allenact.base_abstractions.experiment_config import TaskSampler
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.gym_plugin.gym_models import MemorylessActorCritic
from allenact_plugins.gym_plugin.gym_sensors import GymMuJoCoSensor
from allenact_plugins.gym_plugin.gym_tasks import GymTaskSampler
from projects.gym_baselines.experiments.gym_mujoco_ddppo import GymMuJoCoPPOConfig
class GymMuJoCoHopperConfig(GymMuJoCoPPOConfig):
SENSORS = [
GymMuJoCoSensor(gym_env_name="Hopper-v2", uuid="gym_mujoco_data"),
]
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
"""We define our `ActorCriticModel` agent using a lightweight
implementation with separate MLPs for actors and critic,
MemorylessActorCritic.
Since this is a model for continuous control, note that the
superclass of our model is `ActorCriticModel[GaussianDistr]`
instead of `ActorCriticModel[CategoricalDistr]`, since we'll use
a Gaussian distribution to sample actions.
"""
action_space = gym.spaces.Box(-1.0, 1.0, (3,), "float32")
return MemorylessActorCritic(
input_uuid="gym_mujoco_data",
action_space=action_space, # specific action_space
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
action_std=0.5,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return GymTaskSampler(gym_env_type="Hopper-v2", **kwargs)
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
"""Generate initialization arguments for train, valid, and test
TaskSamplers.
# Parameters
process_ind : index of the current task sampler
mode: one of `train`, `valid`, or `test`
"""
if mode == "train":
max_tasks = None # infinite training tasks
task_seeds_list = None # no predefined random seeds for training
deterministic_sampling = False # randomly sample tasks in training
else:
max_tasks = 4
# one seed for each task to sample:
# - ensures different seeds for each sampler, and
# - ensures a deterministic set of sampled tasks.
task_seeds_list = list(
range(process_ind * max_tasks, (process_ind + 1) * max_tasks)
)
deterministic_sampling = (
True # deterministically sample task in validation/testing
)
return dict(
gym_env_types=["Hopper-v2"],
sensors=self.SENSORS, # sensors used to return observations to the agent
max_tasks=max_tasks, # see above
task_seeds_list=task_seeds_list, # see above
deterministic_sampling=deterministic_sampling, # see above
seed=seeds[process_ind],
)
@classmethod
def tag(cls) -> str:
return "Gym-MuJoCo-Hopper-v2-PPO"
| ask4help-main | projects/gym_baselines/experiments/mujoco/gym_mujoco_hopper_ddppo.py |
from typing import Dict, List, Any
import gym
import torch.nn as nn
from allenact.base_abstractions.experiment_config import TaskSampler
from allenact.base_abstractions.sensor import SensorSuite
from allenact_plugins.gym_plugin.gym_models import MemorylessActorCritic
from allenact_plugins.gym_plugin.gym_sensors import GymMuJoCoSensor
from allenact_plugins.gym_plugin.gym_tasks import GymTaskSampler
from projects.gym_baselines.experiments.gym_mujoco_ddppo import GymMuJoCoPPOConfig
class GymMuJoCoInvertedPendulumConfig(GymMuJoCoPPOConfig):
SENSORS = [
GymMuJoCoSensor(gym_env_name="InvertedPendulum-v2", uuid="gym_mujoco_data"),
]
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
"""We define our `ActorCriticModel` agent using a lightweight
implementation with separate MLPs for actors and critic,
MemorylessActorCritic.
Since this is a model for continuous control, note that the
superclass of our model is `ActorCriticModel[GaussianDistr]`
instead of `ActorCriticModel[CategoricalDistr]`, since we'll use
a Gaussian distribution to sample actions.
"""
action_space = gym.spaces.Box(-3.0, 3.0, (1,), "float32")
return MemorylessActorCritic(
input_uuid="gym_mujoco_data",
action_space=action_space, # specific action_space
observation_space=SensorSuite(cls.SENSORS).observation_spaces,
action_std=0.5,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return GymTaskSampler(gym_env_type="InvertedPendulum-v2", **kwargs)
def _get_sampler_args(
self, process_ind: int, mode: str, seeds: List[int]
) -> Dict[str, Any]:
"""Generate initialization arguments for train, valid, and test
TaskSamplers.
# Parameters
process_ind : index of the current task sampler
mode: one of `train`, `valid`, or `test`
"""
if mode == "train":
max_tasks = None # infinite training tasks
task_seeds_list = None # no predefined random seeds for training
deterministic_sampling = False # randomly sample tasks in training
else:
max_tasks = 4
# one seed for each task to sample:
# - ensures different seeds for each sampler, and
# - ensures a deterministic set of sampled tasks.
task_seeds_list = list(
range(process_ind * max_tasks, (process_ind + 1) * max_tasks)
)
deterministic_sampling = (
True # deterministically sample task in validation/testing
)
return dict(
gym_env_types=["InvertedPendulum-v2"],
sensors=self.SENSORS, # sensors used to return observations to the agent
max_tasks=max_tasks, # see above
task_seeds_list=task_seeds_list, # see above
deterministic_sampling=deterministic_sampling, # see above
seed=seeds[process_ind],
)
@classmethod
def tag(cls) -> str:
return "Gym-MuJoCo-InvertedPendulum-v2-PPO"
| ask4help-main | projects/gym_baselines/experiments/mujoco/gym_mujoco_invertedpendulum_ddppo.py |
ask4help-main | projects/gym_baselines/models/__init__.py |
|
"""
Note: I add this file just for the format consistence with other baselines in the project, so it is just the same as
`allenact_plugins.gym_models.py` so far. However, if it is in the Gym Robotics, some modification is need.
For example, for `state_dim`:
if input_uuid == 'gym_robotics_data':
# consider that the observation space is Dict for robotics env
state_dim = observation_space[self.input_uuid]['observation'].shape[0]
else:
assert len(observation_space[self.input_uuid].shape) == 1
state_dim = observation_space[self.input_uuid].shape[0]
"""
| ask4help-main | projects/gym_baselines/models/gym_models.py |
ask4help-main | projects/objectnav_baselines/__init__.py |
|
from typing import Sequence, Union
import gym
import torch.nn as nn
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.embodiedai.sensors.vision_sensors import RGBSensor, DepthSensor
from allenact.utils.experiment_utils import Builder
from allenact_plugins.ithor_plugin.ithor_sensors import GoalObjectTypeThorSensor
from allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask
from projects.objectnav_baselines.experiments.objectnav_base import ObjectNavBaseConfig
from projects.objectnav_baselines.models.object_nav_models import ObjectNavActorCritic
class ObjectNavMixInUnfrozenResNetGRUConfig(ObjectNavBaseConfig):
"""No ResNet preprocessor, using Raw Image as input, and learn a ResNet as
encoder."""
@classmethod
def preprocessors(cls) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]:
return []
BACKBONE = (
"gnresnet18"
# "simple_cnn"
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
rgb_uuid = next((s.uuid for s in cls.SENSORS if isinstance(s, RGBSensor)), None)
depth_uuid = next(
(s.uuid for s in cls.SENSORS if isinstance(s, DepthSensor)), None
)
goal_sensor_uuid = next(
(s.uuid for s in cls.SENSORS if isinstance(s, GoalObjectTypeThorSensor)),
None,
)
return ObjectNavActorCritic(
action_space=gym.spaces.Discrete(len(ObjectNavTask.class_action_names())),
observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces,
rgb_uuid=rgb_uuid,
depth_uuid=depth_uuid,
goal_sensor_uuid=goal_sensor_uuid,
hidden_size=192
if cls.MULTIPLE_BELIEFS and len(cls.AUXILIARY_UUIDS) > 1
else 512,
backbone=cls.BACKBONE,
resnet_baseplanes=32,
object_type_embedding_dim=32,
num_rnn_layers=1,
rnn_type="GRU",
add_prev_actions=cls.ADD_PREV_ACTIONS,
action_embed_size=6,
auxiliary_uuids=cls.AUXILIARY_UUIDS,
multiple_beliefs=cls.MULTIPLE_BELIEFS,
beliefs_fusion=cls.BELIEF_FUSION,
)
| ask4help-main | projects/objectnav_baselines/experiments/objectnav_mixin_unfrozenresnet_gru.py |
ask4help-main | projects/objectnav_baselines/experiments/__init__.py |
|
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.grouped_action_imitation import (
GroupedActionImitation,
)
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.utils.experiment_utils import (
Builder,
PipelineStage,
TrainingPipeline,
LinearDecay,
)
from allenact_plugins.ithor_plugin.ithor_sensors import TakeEndActionThorNavSensor
from allenact_plugins.robothor_plugin import robothor_constants
from allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask
from projects.objectnav_baselines.experiments.objectnav_base import ObjectNavBaseConfig
class ObjectNavThorMixInPPOAndGBCConfig(ObjectNavBaseConfig):
SENSORS = (
TakeEndActionThorNavSensor(
nactions=len(ObjectNavTask.class_action_names()), uuid="expert_group_action"
),
)
def training_pipeline(self, **kwargs):
ppo_steps = int(300000000)
lr = 3e-4
num_mini_batch = 1
update_repeats = 4
num_steps = 128
save_interval = 5000000
log_interval = 10000
gamma = 0.99
use_gae = True
gae_lambda = 0.95
max_grad_norm = 0.5
action_strs = ObjectNavTask.class_action_names()
non_end_action_inds_set = {
i for i, a in enumerate(action_strs) if a != robothor_constants.END
}
end_action_ind_set = {action_strs.index(robothor_constants.END)}
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=log_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={
"ppo_loss": PPO(**PPOConfig),
"grouped_action_imitation": GroupedActionImitation(
nactions=len(ObjectNavTask.class_action_names()),
action_groups=[non_end_action_inds_set, end_action_ind_set],
),
},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(
loss_names=["ppo_loss", "grouped_action_imitation"],
max_stage_steps=ppo_steps,
)
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}
),
)
| ask4help-main | projects/objectnav_baselines/experiments/objectnav_thor_mixin_ddppo_and_gbc.py |
from projects.objectnav_baselines.experiments.objectnav_mixin_resnetgru import (
ObjectNavMixInResNetGRUConfig,
)
class ObjectNavMixInResNet18GRUConfig(ObjectNavMixInResNetGRUConfig):
RESNET_TYPE: str = "RN18"
| ask4help-main | projects/objectnav_baselines/experiments/objectnav_mixin_resnet18gru.py |
import glob
import os
import platform
from abc import ABC
from math import ceil
from typing import Dict, Any, List, Optional, Sequence, Tuple, cast
import gym
import numpy as np
import torch
from allenact.base_abstractions.experiment_config import MachineParams
from allenact.base_abstractions.preprocessor import SensorPreprocessorGraph
from allenact.base_abstractions.sensor import SensorSuite, ExpertActionSensor
from allenact.base_abstractions.task import TaskSampler
from allenact.utils.experiment_utils import evenly_distribute_count_into_bins
from allenact.utils.system import get_logger
from allenact_plugins.ithor_plugin.ithor_util import (
horizontal_to_vertical_fov,
get_open_x_displays,
)
from allenact_plugins.robothor_plugin.robothor_sensors import DepthSensorThor
from allenact_plugins.robothor_plugin.robothor_task_samplers import (
ObjectNavDatasetTaskSampler,
)
from allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask
from projects.objectnav_baselines.experiments.objectnav_base import ObjectNavBaseConfig
import ai2thor
from packaging import version
if ai2thor.__version__ not in ["0.0.1", None] and version.parse(
ai2thor.__version__
) < version.parse("3.2.0"):
raise ImportError(
"To run the AI2-THOR ObjectNav baseline experiments you must use"
" ai2thor version 3.2.0 or higher."
)
class ObjectNavThorBaseConfig(ObjectNavBaseConfig, ABC):
"""The base config for all AI2-THOR ObjectNav experiments."""
DEFAULT_NUM_TRAIN_PROCESSES: Optional[int] = None
DEFAULT_TRAIN_GPU_IDS = tuple(range(torch.cuda.device_count()))
DEFAULT_VALID_GPU_IDS = (torch.cuda.device_count() - 1,)
DEFAULT_TEST_GPU_IDS = tuple(range(torch.cuda.device_count())) #(torch.cuda.device_count() - 1,)
TRAIN_DATASET_DIR: Optional[str] = None
VAL_DATASET_DIR: Optional[str] = None
TEST_DATASET_DIR: Optional[str] = None
AGENT_MODE = "default"
TARGET_TYPES: Optional[Sequence[str]] = None
THOR_COMMIT_ID: Optional[str] = None
THOR_IS_HEADLESS: bool = False
def __init__(
self,
num_train_processes: Optional[int] = None,
num_test_processes: Optional[int] = None,
test_on_validation: bool = False,
train_gpu_ids: Optional[Sequence[int]] = None,
val_gpu_ids: Optional[Sequence[int]] = None,
test_gpu_ids: Optional[Sequence[int]] = None,
randomize_train_materials: bool = False,
):
super().__init__()
def v_or_default(v, default):
return v if v is not None else default
self.num_train_processes = v_or_default(
num_train_processes, self.DEFAULT_NUM_TRAIN_PROCESSES
)
self.num_test_processes = v_or_default(
num_test_processes, (10 if torch.cuda.is_available() else 1)
)
self.test_on_validation = test_on_validation
self.train_gpu_ids = v_or_default(train_gpu_ids, self.DEFAULT_TRAIN_GPU_IDS)
self.val_gpu_ids = v_or_default(val_gpu_ids, self.DEFAULT_VALID_GPU_IDS)
self.test_gpu_ids = v_or_default(test_gpu_ids, self.DEFAULT_TEST_GPU_IDS)
self.sampler_devices = self.train_gpu_ids
self.randomize_train_materials = randomize_train_materials
@classmethod
def env_args(cls):
assert cls.THOR_COMMIT_ID is not None
return dict(
width=cls.CAMERA_WIDTH,
height=cls.CAMERA_HEIGHT,
commit_id=cls.THOR_COMMIT_ID,
continuousMode=True,
applyActionNoise=cls.STOCHASTIC,
rotateStepDegrees=cls.ROTATION_DEGREES,
visibilityDistance=cls.VISIBILITY_DISTANCE,
gridSize=cls.STEP_SIZE,
snapToGrid=False,
agentMode=cls.AGENT_MODE,
fieldOfView=horizontal_to_vertical_fov(
horizontal_fov_in_degrees=cls.HORIZONTAL_FIELD_OF_VIEW,
width=cls.CAMERA_WIDTH,
height=cls.CAMERA_HEIGHT,
),
include_private_scenes=False,
renderDepthImage=any(isinstance(s, DepthSensorThor) for s in cls.SENSORS),
)
def machine_params(self, mode="train", **kwargs):
sampler_devices: Sequence[torch.device] = []
devices: Sequence[torch.device]
if mode == "train":
workers_per_device = 1
devices = (
[torch.device("cpu")]
if not torch.cuda.is_available()
else cast(Tuple, self.train_gpu_ids) * workers_per_device
)
nprocesses = evenly_distribute_count_into_bins(
self.num_train_processes, max(len(devices), 1)
)
sampler_devices = self.sampler_devices
elif mode == "valid":
nprocesses = 1
devices = (
[torch.device("cpu")]
if not torch.cuda.is_available()
else self.val_gpu_ids
)
elif mode == "test":
devices = (
[torch.device("cpu")]
if not torch.cuda.is_available()
else self.test_gpu_ids
)
nprocesses = evenly_distribute_count_into_bins(
self.num_test_processes, max(len(devices), 1)
)
# nprocesses=1
else:
raise NotImplementedError("mode must be 'train', 'valid', or 'test'.")
sensors = [*self.SENSORS]
'''
#
if mode != "train":
sensors = [s for s in sensors if not isinstance(s, ExpertActionSensor)]
'''
sensor_preprocessor_graph = (
SensorPreprocessorGraph(
source_observation_spaces=SensorSuite(sensors).observation_spaces,
preprocessors=self.preprocessors(),
)
if mode == "train"
or (
(isinstance(nprocesses, int) and nprocesses > 0)
or (isinstance(nprocesses, Sequence) and sum(nprocesses) > 0)
)
else None
)
return MachineParams(
nprocesses=nprocesses,
devices=devices,
sampler_devices=sampler_devices
if mode == "train"
else devices, # ignored with > 1 gpu_ids
sensor_preprocessor_graph=sensor_preprocessor_graph,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return ObjectNavDatasetTaskSampler(**kwargs)
@staticmethod
def _partition_inds(n: int, num_parts: int):
return np.round(np.linspace(0, n, num_parts + 1, endpoint=True)).astype(
np.int32
)
def _get_sampler_args_for_scene_split(
self,
scenes_dir: str,
process_ind: int,
total_processes: int,
devices: Optional[List[int]],
seeds: Optional[List[int]],
deterministic_cudnn: bool,
include_expert_sensor: bool = True,
allow_oversample: bool = False,
) -> Dict[str, Any]:
path = os.path.join(scenes_dir, "*.json.gz")
scenes = [scene.split("/")[-1].split(".")[0] for scene in glob.glob(path)]
if len(scenes) == 0:
raise RuntimeError(
(
"Could find no scene dataset information in directory {}."
" Are you sure you've downloaded them? "
" If not, see https://allenact.org/installation/download-datasets/ information"
" on how this can be done."
).format(scenes_dir)
)
oversample_warning = (
f"Warning: oversampling some of the scenes ({scenes}) to feed all processes ({total_processes})."
" You can avoid this by setting a number of workers divisible by the number of scenes"
)
if total_processes > len(scenes): # oversample some scenes -> bias
if not allow_oversample:
raise RuntimeError(
f"Cannot have `total_processes > len(scenes)`"
f" ({total_processes} > {len(scenes)}) when `allow_oversample` is `False`."
)
if total_processes % len(scenes) != 0:
get_logger().warning(oversample_warning)
scenes = scenes * int(ceil(total_processes / len(scenes)))
scenes = scenes[: total_processes * (len(scenes) // total_processes)]
elif len(scenes) % total_processes != 0:
get_logger().warning(oversample_warning)
inds = self._partition_inds(len(scenes), total_processes)
if not self.THOR_IS_HEADLESS:
x_display: Optional[str] = None
if platform.system() == "Linux":
x_displays = get_open_x_displays(throw_error_if_empty=True)
if len([d for d in devices if d != torch.device("cpu")]) > len(
x_displays
):
get_logger().warning(
f"More GPU devices found than X-displays (devices: `{x_displays}`, x_displays: `{x_displays}`)."
f" This is not necessarily a bad thing but may mean that you're not using GPU memory as"
f" efficiently as possible. Consider following the instructions here:"
f" https://allenact.org/installation/installation-framework/#installation-of-ithor-ithor-plugin"
f" describing how to start an X-display on every GPU."
)
x_display = x_displays[process_ind % len(x_displays)]
device_dict = dict(x_display=x_display)
else:
device_dict = dict(gpu_device=devices[process_ind % len(devices)])
return {
"scenes": scenes[inds[process_ind] : inds[process_ind + 1]],
"object_types": self.TARGET_TYPES,
"max_steps": self.MAX_STEPS,
"sensors": [
s
for s in self.SENSORS
if (include_expert_sensor or not isinstance(s, ExpertActionSensor))
],
"action_space": gym.spaces.Discrete(
len(ObjectNavTask.class_action_names())
),
"seed": seeds[process_ind] if seeds is not None else None,
"deterministic_cudnn": deterministic_cudnn,
"rewards_config": self.REWARD_CONFIG,
"adaptive_reward": self.ADAPTIVE_REWARD,
"env_args": {**self.env_args(), **device_dict},
}
def train_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
res = self._get_sampler_args_for_scene_split(
scenes_dir=os.path.join(self.TRAIN_DATASET_DIR, "episodes"),
process_ind=process_ind,
total_processes=total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
allow_oversample=True,
)
res["scene_directory"] = self.TRAIN_DATASET_DIR
res["loop_dataset"] = True
res["allow_flipping"] = True
res["randomize_materials_in_training"] = self.randomize_train_materials
res['task_mode'] = 'Train'
return res
def valid_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
res = self._get_sampler_args_for_scene_split(
scenes_dir=os.path.join(self.VAL_DATASET_DIR, "episodes"),
process_ind=process_ind,
total_processes=total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
include_expert_sensor=True, ## only for ask for help
allow_oversample=False,
)
res["scene_directory"] = self.VAL_DATASET_DIR
res["loop_dataset"] = False
res['task_mode'] = 'Valid'
return res
def test_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
if self.test_on_validation or self.TEST_DATASET_DIR is None:
if not self.test_on_validation:
get_logger().warning(
"`test_on_validation` is set to `True` and thus we will run evaluation on the validation set instead."
" Be careful as the saved metrics json and tensorboard files **will still be labeled as"
" 'test' rather than 'valid'**."
)
else:
get_logger().warning(
"No test dataset dir detected, running test on validation set instead."
" Be careful as the saved metrics json and tensorboard files *will still be labeled as"
" 'test' rather than 'valid'**."
)
return self.valid_task_sampler_args(
process_ind=process_ind,
total_processes=total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
)
else:
res = self._get_sampler_args_for_scene_split(
scenes_dir=os.path.join(self.TEST_DATASET_DIR, "episodes"),
process_ind=process_ind,
total_processes=total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
include_expert_sensor=True, ##only for ask for help
allow_oversample=False,
)
res["env_args"]["all_metadata_available"] = False
res["rewards_config"] = {**res["rewards_config"], "shaping_weight": 0}
res["scene_directory"] = self.TEST_DATASET_DIR
res["loop_dataset"] = False
res['task_mode'] = 'Test'
return res
| ask4help-main | projects/objectnav_baselines/experiments/objectnav_thor_base.py |
from typing import Sequence, Union
import gym
import torch.nn as nn
from torchvision import models
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.embodiedai.preprocessors.resnet import ResNetPreprocessor
from allenact.embodiedai.sensors.vision_sensors import RGBSensor, DepthSensor
from allenact.utils.experiment_utils import Builder
from allenact_plugins.ithor_plugin.ithor_sensors import GoalObjectTypeThorSensor
from allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask
from projects.objectnav_baselines.experiments.objectnav_base import ObjectNavBaseConfig
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
from projects.objectnav_baselines.models.object_nav_models import (
ResnetTensorObjectNavActorCritic,
)
class ObjectNavMixInResNetGRUConfig(ObjectNavBaseConfig):
RESNET_TYPE: str
@classmethod
def preprocessors(cls) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]:
if not hasattr(cls, "RESNET_TYPE"):
raise RuntimeError(
"When subclassing `ObjectNavMixInResNetGRUConfig` we now expect that you have specified `RESNET_TYPE`"
" as a class variable of your subclass (e.g. `RESNET_TYPE = 'RN18'` to use a ResNet18 model)."
" Alternatively you can instead subclass `ObjectNavMixInResNet18GRUConfig` which does this"
" specification for you."
)
preprocessors = []
if cls.RESNET_TYPE in ["RN18", "RN34"]:
output_shape = (512, 7, 7)
elif cls.RESNET_TYPE in ["RN50", "RN101", "RN152"]:
output_shape = (2048, 7, 7)
else:
raise NotImplementedError(
f"`RESNET_TYPE` must be one 'RNx' with x equaling one of"
f" 18, 34, 50, 101, or 152."
)
rgb_sensor = next((s for s in cls.SENSORS if isinstance(s, RGBSensor)), None)
if rgb_sensor is not None:
preprocessors.append(
ResNetPreprocessor(
input_height=cls.SCREEN_SIZE,
input_width=cls.SCREEN_SIZE,
output_width=output_shape[2],
output_height=output_shape[1],
output_dims=output_shape[0],
pool=False,
torchvision_resnet_model=getattr(
models, f"resnet{cls.RESNET_TYPE.replace('RN', '')}"
),
input_uuids=[rgb_sensor.uuid],
output_uuid="rgb_resnet_imagenet",
)
)
depth_sensor = next(
(s for s in cls.SENSORS if isinstance(s, DepthSensor)), None
)
if depth_sensor is not None:
preprocessors.append(
ResNetPreprocessor(
input_height=cls.SCREEN_SIZE,
input_width=cls.SCREEN_SIZE,
output_width=output_shape[2],
output_height=output_shape[1],
output_dims=output_shape[0],
pool=False,
torchvision_resnet_model=getattr(
models, f"resnet{cls.RESNET_TYPE.replace('RN', '')}"
),
input_uuids=[depth_sensor.uuid],
output_uuid="depth_resnet_imagenet",
)
)
return preprocessors
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
has_rgb = any(isinstance(s, RGBSensor) for s in cls.SENSORS)
has_depth = any(isinstance(s, DepthSensor) for s in cls.SENSORS)
goal_sensor_uuid = next(
(s.uuid for s in cls.SENSORS if isinstance(s, GoalObjectTypeThorSensor)),
None,
)
return ResnetTensorObjectNavActorCritic(
action_space=gym.spaces.Discrete(len(ObjectNavTask.class_action_names())),
observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces,
goal_sensor_uuid=goal_sensor_uuid,
rgb_resnet_preprocessor_uuid="rgb_resnet_imagenet" if has_rgb else None,
depth_resnet_preprocessor_uuid="depth_resnet_imagenet"
if has_depth
else None,
hidden_size=512,
goal_dims=32,
)
| ask4help-main | projects/objectnav_baselines/experiments/objectnav_mixin_resnetgru.py |
from abc import ABC
from typing import Optional, Sequence, Union
from allenact.base_abstractions.experiment_config import ExperimentConfig
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.base_abstractions.sensor import Sensor
from allenact.utils.experiment_utils import Builder
class ObjectNavBaseConfig(ExperimentConfig, ABC):
"""The base object navigation configuration file."""
STEP_SIZE = 0.25
ROTATION_DEGREES = 30.0
VISIBILITY_DISTANCE = 1.0
STOCHASTIC = True
HORIZONTAL_FIELD_OF_VIEW = 79
CAMERA_WIDTH = 400
CAMERA_HEIGHT = 300
SCREEN_SIZE = 224
MAX_STEPS = 500
ADVANCE_SCENE_ROLLOUT_PERIOD: Optional[int] = None
SENSORS: Sequence[Sensor] = []
def __init__(self):
self.REWARD_CONFIG = {
"step_penalty": -0.01,
"goal_success_reward": 10.0,
"failed_stop_reward": 0.0,
"shaping_weight": 1.0,
}
@classmethod
def preprocessors(cls) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]:
return tuple()
| ask4help-main | projects/objectnav_baselines/experiments/objectnav_base.py |
from typing import Dict, Tuple
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
)
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.embodiedai.aux_losses.losses import (
MultiAuxTaskNegEntropyLoss,
InverseDynamicsLoss,
TemporalDistanceLoss,
CPCA1Loss,
CPCA2Loss,
CPCA4Loss,
CPCA8Loss,
CPCA16Loss,
FrequencyLoss,
)
# noinspection PyUnresolvedReferences
from allenact.embodiedai.models.fusion_models import (
AverageFusion,
SoftmaxFusion,
AttentiveFusion,
)
from allenact.utils.experiment_utils import (
Builder,
PipelineStage,
TrainingPipeline,
LinearDecay,
)
from projects.objectnav_baselines.experiments.objectnav_base import ObjectNavBaseConfig
class ObjectNavMixInPPOConfig(ObjectNavBaseConfig):
# selected auxiliary uuids
## if comment all the keys, then it's vanilla DD-PPO
AUXILIARY_UUIDS = [
# InverseDynamicsLoss.UUID,
# TemporalDistanceLoss.UUID,
# CPCA1Loss.UUID,
# CPCA4Loss.UUID,
# CPCA8Loss.UUID,
# CPCA16Loss.UUID,
# FrequencyLoss.UUID
]
ADD_PREV_ACTIONS = False
MULTIPLE_BELIEFS = False
BELIEF_FUSION = ( # choose one
None
# AttentiveFusion.UUID
# AverageFusion.UUID
# SoftmaxFusion.UUID
)
def training_pipeline(self, **kwargs):
# PPO
ppo_steps = int(300000000)
lr = 3e-4
num_mini_batch = 1
update_repeats = 4
num_steps = 128
save_interval = 5000000
log_interval = 10000 if torch.cuda.is_available() else 1
gamma = 0.99
use_gae = True
gae_lambda = 0.95
max_grad_norm = 0.5
named_losses = {"ppo_loss": (PPO(**PPOConfig), 1.0)}
named_losses = self._update_with_auxiliary_losses(named_losses)
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=log_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={key: val[0] for key, val in named_losses.items()},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(
loss_names=list(named_losses.keys()),
max_stage_steps=ppo_steps,
loss_weights=[val[1] for val in named_losses.values()],
)
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}
),
)
@classmethod
def _update_with_auxiliary_losses(cls, named_losses):
# auxliary losses
aux_loss_total_weight = 2.0
# Total losses
total_aux_losses: Dict[str, Tuple[AbstractActorCriticLoss, float]] = {
InverseDynamicsLoss.UUID: (
InverseDynamicsLoss(
subsample_rate=0.2, subsample_min_num=10, # TODO: test its effects
),
0.05 * aux_loss_total_weight, # should times 2
),
TemporalDistanceLoss.UUID: (
TemporalDistanceLoss(
num_pairs=8, epsiode_len_min=5, # TODO: test its effects
),
0.2 * aux_loss_total_weight, # should times 2
),
CPCA1Loss.UUID: (
CPCA1Loss(subsample_rate=0.2,), # TODO: test its effects
0.05 * aux_loss_total_weight, # should times 2
),
CPCA2Loss.UUID: (
CPCA2Loss(subsample_rate=0.2,), # TODO: test its effects
0.05 * aux_loss_total_weight, # should times 2
),
CPCA4Loss.UUID: (
CPCA4Loss(subsample_rate=0.2,), # TODO: test its effects
0.05 * aux_loss_total_weight, # should times 2
),
CPCA8Loss.UUID: (
CPCA8Loss(subsample_rate=0.2,), # TODO: test its effects
0.05 * aux_loss_total_weight, # should times 2
),
CPCA16Loss.UUID: (
CPCA16Loss(subsample_rate=0.2,), # TODO: test its effects
0.05 * aux_loss_total_weight, # should times 2
),
FrequencyLoss.UUID : (
FrequencyLoss(),
0.05*aux_loss_total_weight,
),
}
named_losses.update(
{uuid: total_aux_losses[uuid] for uuid in cls.AUXILIARY_UUIDS}
)
if cls.MULTIPLE_BELIEFS: # add weight entropy loss automatically
named_losses[MultiAuxTaskNegEntropyLoss.UUID] = (
MultiAuxTaskNegEntropyLoss(cls.AUXILIARY_UUIDS),
0.01,
)
return named_losses
| ask4help-main | projects/objectnav_baselines/experiments/objectnav_mixin_ddppo.py |
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses.imitation import Imitation
from allenact.utils.experiment_utils import (
Builder,
PipelineStage,
TrainingPipeline,
LinearDecay,
)
from projects.objectnav_baselines.experiments.objectnav_base import ObjectNavBaseConfig
class ObjectNavMixInDAggerConfig(ObjectNavBaseConfig):
"""An Object Navigation experiment configuration in RoboThor with RGB
input."""
def __init__(self):
super().__init__()
self.REWARD_CONFIG["shaping_weight"] = 0
def training_pipeline(self, **kwargs):
training_steps = int(300000000)
tf_steps = int(5e6)
anneal_steps = int(5e6)
il_no_tf_steps = training_steps - tf_steps - anneal_steps
assert il_no_tf_steps > 0
lr = 3e-4
num_mini_batch = 1
update_repeats = 4
num_steps = 128
save_interval = 5000000
log_interval = 10000 if torch.cuda.is_available() else 1
gamma = 0.99
use_gae = True
gae_lambda = 0.95
max_grad_norm = 0.5
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=log_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={"imitation_loss": Imitation(),},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
max_stage_steps=tf_steps,
teacher_forcing=LinearDecay(startp=1.0, endp=1.0, steps=tf_steps,),
),
PipelineStage(
loss_names=["imitation_loss"],
max_stage_steps=anneal_steps + il_no_tf_steps,
teacher_forcing=LinearDecay(
startp=1.0, endp=0.0, steps=anneal_steps,
),
),
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=training_steps)},
),
)
| ask4help-main | projects/objectnav_baselines/experiments/objectnav_mixin_dagger.py |
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
GoalObjectTypeThorSensor,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_ddppo import (
ObjectNavMixInPPOConfig,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_resnetgru import (
ObjectNavMixInResNetGRUConfig,
)
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
class ObjectNavRoboThorRGBPPOExperimentConfig(
ObjectNavRoboThorBaseConfig, ObjectNavMixInPPOConfig, ObjectNavMixInResNetGRUConfig,
):
"""An Object Navigation experiment configuration in RoboThor with RGB
input."""
RESNET_TYPE = "RN50"
SENSORS = [
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
]
@classmethod
def tag(cls):
return "Objectnav-RoboTHOR-RGB-ResNet50GRU-DDPPO"
| ask4help-main | projects/objectnav_baselines/experiments/robothor/objectnav_robothor_rgb_resnet50gru_ddppo.py |
import os
from abc import ABC
from typing import Optional, List, Any, Dict
import torch
from allenact.utils.misc_utils import prepare_locals_for_super
from projects.objectnav_baselines.experiments.objectnav_thor_base import (
ObjectNavThorBaseConfig,
)
class ObjectNavRoboThorBaseConfig(ObjectNavThorBaseConfig, ABC):
"""The base config for all RoboTHOR ObjectNav experiments."""
THOR_COMMIT_ID = "bad5bc2b250615cb766ffb45d455c211329af17e"
THOR_COMMIT_ID_FOR_RAND_MATERIALS = "9549791ce2e7f472063a10abb1fb7664159fec23"
AGENT_MODE = "locobot"
DEFAULT_NUM_TRAIN_PROCESSES = 60 if torch.cuda.is_available() else 1
TRAIN_DATASET_DIR = os.path.join(os.getcwd(), "datasets/robothor-objectnav/train_0.25")
VAL_DATASET_DIR = os.path.join(os.getcwd(), "datasets/robothor-objectnav/val")
# TEST_DATASET_DIR = os.path.join(os.getcwd(), "datasets/robothor-objectnav/test")
TARGET_TYPES = tuple(
sorted(
[
"AlarmClock",
"Apple",
"BaseballBat",
"BasketBall",
"Bowl",
"GarbageCan",
"HousePlant",
"Laptop",
"Mug",
"SprayBottle",
"Television",
"Vase",
]
)
)
def train_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
kwargs = super(ObjectNavRoboThorBaseConfig, self).train_task_sampler_args(
**prepare_locals_for_super(locals())
)
if self.randomize_train_materials:
kwargs["env_args"]["commit_id"] = self.THOR_COMMIT_ID_FOR_RAND_MATERIALS
return kwargs
| ask4help-main | projects/objectnav_baselines/experiments/robothor/objectnav_robothor_base.py |
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
GoalObjectTypeThorSensor,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_ddppo import (
ObjectNavMixInPPOConfig,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_resnet18gru import (
ObjectNavMixInResNet18GRUConfig,
)
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
class ObjectNavRoboThorRGBPPOExperimentConfig(
ObjectNavRoboThorBaseConfig,
ObjectNavMixInPPOConfig,
ObjectNavMixInResNet18GRUConfig,
):
"""An Object Navigation experiment configuration in RoboThor with RGB
input."""
SENSORS = [
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
]
@classmethod
def tag(cls):
return "Objectnav-RoboTHOR-RGB-ResNetGRU-DDPPO"
| ask4help-main | projects/objectnav_baselines/experiments/robothor/objectnav_robothor_rgb_resnetgru_ddppo.py |
from allenact.base_abstractions.sensor import ExpertActionSensor
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
GoalObjectTypeThorSensor,
)
from allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask
from projects.objectnav_baselines.experiments.objectnav_mixin_dagger import (
ObjectNavMixInDAggerConfig,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_resnet18gru import (
ObjectNavMixInResNet18GRUConfig,
)
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
class ObjectNaviThorRGBDAggerExperimentConfig(
ObjectNavRoboThorBaseConfig,
ObjectNavMixInDAggerConfig,
ObjectNavMixInResNet18GRUConfig,
):
"""An Object Navigation experiment configuration in RoboThor with RGB
input."""
SENSORS = [
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
ExpertActionSensor(nactions=len(ObjectNavTask.class_action_names()),),
]
@classmethod
def tag(cls):
return "Objectnav-RoboTHOR-RGB-ResNetGRU-DAgger"
| ask4help-main | projects/objectnav_baselines/experiments/robothor/objectnav_robothor_rgb_resnetgru_dagger.py |
ask4help-main | projects/objectnav_baselines/experiments/robothor/__init__.py |
|
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
GoalObjectTypeThorSensor,
)
from allenact_plugins.robothor_plugin.robothor_sensors import DepthSensorThor
from projects.objectnav_baselines.experiments.objectnav_mixin_ddppo import (
ObjectNavMixInPPOConfig,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_resnet18gru import (
ObjectNavMixInResNet18GRUConfig,
)
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
class ObjectNavRoboThorRGBPPOExperimentConfig(
ObjectNavRoboThorBaseConfig,
ObjectNavMixInPPOConfig,
ObjectNavMixInResNet18GRUConfig,
):
"""An Object Navigation experiment configuration in RoboThor with RGBD
input."""
SENSORS = [
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
DepthSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_normalization=True,
uuid="depth_lowres",
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
]
@classmethod
def tag(cls):
return "Objectnav-RoboTHOR-RGBD-ResNetGRU-DDPPO"
| ask4help-main | projects/objectnav_baselines/experiments/robothor/objectnav_robothor_rgbd_resnetgru_ddppo.py |
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
GoalObjectTypeThorSensor,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_resnet18gru import (
ObjectNavMixInResNet18GRUConfig,
)
from projects.objectnav_baselines.experiments.objectnav_thor_mixin_ddppo_and_gbc import (
ObjectNavThorMixInPPOAndGBCConfig,
)
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
class ObjectNaviThorRGBPPOExperimentConfig(
ObjectNavRoboThorBaseConfig,
ObjectNavThorMixInPPOAndGBCConfig,
ObjectNavMixInResNet18GRUConfig,
):
"""An Object Navigation experiment configuration in RoboThor with RGB
input."""
SENSORS = ObjectNavThorMixInPPOAndGBCConfig.SENSORS + ( # type:ignore
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
)
@classmethod
def tag(cls):
return "Objectnav-RoboTHOR-RGB-ResNetGRU-DDPPOAndGBC"
| ask4help-main | projects/objectnav_baselines/experiments/robothor/objectnav_robothor_rgb_resnetgru_ddppo_and_gbc.py |
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
GoalObjectTypeThorSensor,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_ddppo import (
ObjectNavMixInPPOConfig,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_unfrozenresnet_gru import (
ObjectNavMixInUnfrozenResNetGRUConfig,
)
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
class ObjectNavRoboThorRGBPPOExperimentConfig(
ObjectNavRoboThorBaseConfig,
ObjectNavMixInPPOConfig,
ObjectNavMixInUnfrozenResNetGRUConfig,
):
"""An Object Navigation experiment configuration in RoboThor with RGB input
without preprocessing by frozen ResNet (instead, a trainable ResNet)."""
SENSORS = [
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
]
@classmethod
def tag(cls):
return "Objectnav-RoboTHOR-RGB-UnfrozenResNet-GRU-DDPPO"
| ask4help-main | projects/objectnav_baselines/experiments/robothor/objectnav_robothor_rgb_unfrozenresnet_gru_ddppo.py |
from allenact_plugins.ithor_plugin.ithor_sensors import GoalObjectTypeThorSensor
from allenact_plugins.robothor_plugin.robothor_sensors import DepthSensorThor
from projects.objectnav_baselines.experiments.objectnav_mixin_ddppo import (
ObjectNavMixInPPOConfig,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_resnet18gru import (
ObjectNavMixInResNet18GRUConfig,
)
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
class ObjectNavRoboThorRGBPPOExperimentConfig(
ObjectNavRoboThorBaseConfig,
ObjectNavMixInPPOConfig,
ObjectNavMixInResNet18GRUConfig,
):
"""An Object Navigation experiment configuration in RoboThor with Depth
input."""
SENSORS = (
DepthSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_normalization=True,
uuid="depth_lowres",
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
)
@classmethod
def tag(cls):
return "Objectnav-RoboTHOR-Depth-ResNetGRU-DDPPO"
| ask4help-main | projects/objectnav_baselines/experiments/robothor/objectnav_robothor_depth_resnetgru_ddppo.py |
import math
from typing import Optional, Sequence
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from projects.objectnav_baselines.experiments.robothor.clip.objectnav_robothor_zeroshot_rgb_clipgru_ddppo import (
ObjectNavRoboThorClipRGBPPOExperimentConfig as BaseConfig,
)
from allenact.utils.experiment_utils import (
Builder,
LinearDecay,
MultiLinearDecay,
TrainingPipeline,
PipelineStage,
)
class DistributedObjectNavRoboThorRGBPPOExperimentConfig(BaseConfig):
def tag(self) -> str:
return "ObjectNavRoboThorRGBPPO_CLIP_zeroshot"
THOR_COMMIT_ID = "91139c909576f3bf95a187c5b02c6fd455d06b48"
THOR_IS_HEADLESS = True
def env_args(self):
res = super().env_args()
res.pop("commit_id", None)
return res
# %%
def __init__(
self,
distributed_nodes: int = 1,
num_train_processes: Optional[int] = None,
train_gpu_ids: Optional[Sequence[int]] = None,
val_gpu_ids: Optional[Sequence[int]] = None,
test_gpu_ids: Optional[Sequence[int]] = None,
):
super().__init__(num_train_processes, train_gpu_ids, val_gpu_ids, test_gpu_ids)
self.distributed_nodes = distributed_nodes
# %%
def machine_params(self, mode="train", **kwargs):
params = super().machine_params(mode, **kwargs)
if mode == "train":
params.devices = params.devices * self.distributed_nodes
params.nprocesses = params.nprocesses * self.distributed_nodes
params.sampler_devices = params.sampler_devices * self.distributed_nodes
if "machine_id" in kwargs:
machine_id = kwargs["machine_id"]
assert (
0 <= machine_id < self.distributed_nodes
), f"machine_id {machine_id} out of range [0, {self.distributed_nodes - 1}]"
local_worker_ids = list(
range(
len(self.train_gpu_ids) * machine_id,
len(self.train_gpu_ids) * (machine_id + 1),
)
)
params.set_local_worker_ids(local_worker_ids)
# Confirm we're setting up train params nicely:
print(
f"devices {params.devices}"
f"\nnprocesses {params.nprocesses}"
f"\nsampler_devices {params.sampler_devices}"
f"\nlocal_worker_ids {params.local_worker_ids}"
)
elif mode == "valid":
# Use all GPUs at their maximum capacity for training
# (you may run validation in a separate machine)
params.nprocesses = (0,)
return params
# %%
@staticmethod
def lr_scheduler(small_batch_steps, transition_steps, ppo_steps, lr_scaling):
safe_small_batch_steps = int(small_batch_steps * 1.02)
large_batch_and_lr_steps = ppo_steps - safe_small_batch_steps - transition_steps
# Learning rate after small batch steps (assuming decay to 0)
break1 = 1.0 - safe_small_batch_steps / ppo_steps
# Initial learning rate for large batch (after transition from initial to large learning rate)
break2 = lr_scaling * (
1.0 - (safe_small_batch_steps + transition_steps) / ppo_steps
)
return MultiLinearDecay(
[
# Base learning rate phase for small batch (with linear decay towards 0)
LinearDecay(steps=safe_small_batch_steps, startp=1.0, endp=break1,),
# Allow the optimizer to adapt its statistics to the changes with a larger learning rate
LinearDecay(steps=transition_steps, startp=break1, endp=break2,),
# Scaled learning rate phase for large batch (with linear decay towards 0)
LinearDecay(steps=large_batch_and_lr_steps, startp=break2, endp=0,),
]
)
# %%
def training_pipeline(self, **kwargs):
# These params are identical to the baseline configuration for 60 samplers (1 machine)
ppo_steps = int(300e6)
lr = 3e-4
num_mini_batch = 1
update_repeats = 4
num_steps = 128
save_interval = 5000000
log_interval = 10000 if torch.cuda.is_available() else 1
gamma = 0.99
use_gae = True
gae_lambda = 0.95
max_grad_norm = 0.5
# We add 30 million steps for small batch learning
small_batch_steps = int(30e6)
# And a short transition phase towards large learning rate
# (see comment in the `lr_scheduler` helper method
transition_steps = int(2 / 3 * self.distributed_nodes * 1e6)
# Find exact number of samplers per GPU
assert (
self.num_train_processes % len(self.train_gpu_ids) == 0
), "Expected uniform number of samplers per GPU"
samplers_per_gpu = self.num_train_processes // len(self.train_gpu_ids)
# Multiply num_mini_batch by the largest divisor of
# samplers_per_gpu to keep all batches of same size:
num_mini_batch_multiplier = [
i
for i in reversed(
range(1, min(samplers_per_gpu // 2, self.distributed_nodes) + 1)
)
if samplers_per_gpu % i == 0
][0]
# Multiply update_repeats so that the product of this factor and
# num_mini_batch_multiplier is >= self.distributed_nodes:
update_repeats_multiplier = int(
math.ceil(self.distributed_nodes / num_mini_batch_multiplier)
)
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=log_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={"ppo_loss": PPO(**PPOConfig, show_ratios=False)},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
# We increase the number of batches for the first stage to reach an
# equivalent number of updates per collected rollout data as in the
# 1 node/60 samplers setting
PipelineStage(
loss_names=["ppo_loss"],
max_stage_steps=small_batch_steps,
num_mini_batch=num_mini_batch * num_mini_batch_multiplier,
update_repeats=update_repeats * update_repeats_multiplier,
),
# The we proceed with the base configuration (leading to larger
# batches due to the increased number of samplers)
PipelineStage(
loss_names=["ppo_loss"],
max_stage_steps=ppo_steps - small_batch_steps,
),
],
# We use the MultiLinearDecay curve defined by the helper function,
# setting the learning rate scaling as the square root of the number
# of nodes. Linear scaling might also works, but we leave that
# check to the reader.
lr_scheduler_builder=Builder(
LambdaLR,
{
"lr_lambda": self.lr_scheduler(
small_batch_steps=small_batch_steps,
transition_steps=transition_steps,
ppo_steps=ppo_steps,
lr_scaling=math.sqrt(self.distributed_nodes),
)
},
),
)
| ask4help-main | projects/objectnav_baselines/experiments/robothor/clip/objectnav_robothor_distr.py |
from allenact_plugins.clip_plugin.clip_preprocessors import ClipResNetPreprocessor
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
GoalObjectTypeThorSensor,
)
from allenact.base_abstractions.sensor import ExpertActionSensor
from projects.objectnav_baselines.experiments.clip.objectnav_mixin_clipresnetgru import (
ObjectNavMixInClipResNetGRUConfig,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_ddppo import (
ObjectNavMixInPPOConfig,
)
from allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.utils.experiment_utils import (
Builder,
PipelineStage,
TrainingPipeline,
LinearDecay,
)
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
import gym
import torch.nn as nn
from allenact.embodiedai.sensors.vision_sensors import RGBSensor, DepthSensor
from projects.objectnav_baselines.models.object_nav_models import (
ResnetTensorObjectNavActorCritic,
)
class ObjectNavRoboThorClipRGBPPOExperimentConfig(
ObjectNavRoboThorBaseConfig,
ObjectNavMixInPPOConfig,
ObjectNavMixInClipResNetGRUConfig,
):
"""An Object Navigation experiment configuration in RoboThor with RGB
input."""
CLIP_MODEL_TYPE = "RN50"
SENSORS = [
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
mean=ClipResNetPreprocessor.CLIP_RGB_MEANS,
stdev=ClipResNetPreprocessor.CLIP_RGB_STDS,
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
ExpertActionSensor(nactions=len(ObjectNavTask.class_action_names()), ),
]
def __init__(self):
super().__init__()
# self.REWARD_CONFIG = {
# "step_penalty": -0.01,
# "goal_success_reward": 10.0,
# "failed_stop_reward": -8.0, ##double this maybe? ##change this
# "shaping_weight": 1.0,
# "penalty_for_ask": -0.3,
# }
self.REWARD_CONFIG = {
"step_penalty": -0.01,
"goal_success_reward": 0.00,
"failed_stop_reward": -10.00,
"shaping_weight": 0.00,
"penalty_for_init_ask": -1.00, ##decreasing this as well
"penalty_for_ask_recurring": -0.0,##removing recurring cost
"penalty_for_step_ask": -0.0,
}
@classmethod
def tag(cls):
return "Objectnav-RoboTHOR-RGB-ClipResNet50GRU-FINETUNE-DDPPO"
def training_pipeline(self, **kwargs):
# PPO
ppo_steps = int(15000000)
lr = 3e-4
num_mini_batch = 1
update_repeats = 4
num_steps = 128//2
save_interval = 2500000
log_interval = 10000 if torch.cuda.is_available() else 1
gamma = 0.99
use_gae = True
gae_lambda = 0.95
max_grad_norm = 0.5
named_losses = {"ppo_loss": (PPO(**PPOConfig), 1.0)}
named_losses = self._update_with_auxiliary_losses(named_losses)
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=log_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={key: val[0] for key, val in named_losses.items()},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(
loss_names=list(named_losses.keys()),
max_stage_steps=ppo_steps,
loss_weights=[val[1] for val in named_losses.values()],
)
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}
),
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
has_rgb = any(isinstance(s, RGBSensor) for s in cls.SENSORS)
has_depth = any(isinstance(s, DepthSensor) for s in cls.SENSORS)
goal_sensor_uuid = next(
(s.uuid for s in cls.SENSORS if isinstance(s, GoalObjectTypeThorSensor)),
None,
)
end_action_in_ask = True
if end_action_in_ask:
action_space = gym.spaces.Dict({"nav_action": gym.spaces.Discrete(len(ObjectNavTask.class_action_names())),
"ask_action": gym.spaces.Discrete(4)}) ## 3 means take END action, 2 means stop asking, 1 means start asking, 0 means do nothing
else:
action_space = gym.spaces.Dict({"nav_action": gym.spaces.Discrete(len(ObjectNavTask.class_action_names())),
"ask_action": gym.spaces.Discrete(3)})
# 2 means stop asking, 1 means start asking, 0 means do nothing
return ResnetTensorObjectNavActorCritic(
action_space=action_space, # gym.spaces.Discrete(len(ObjectNavTask.class_action_names())),
observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces,
goal_sensor_uuid=goal_sensor_uuid,
rgb_resnet_preprocessor_uuid="rgb_clip_resnet" if has_rgb else None,
depth_resnet_preprocessor_uuid="depth_clip_resnet" if has_depth else None,
hidden_size=512,
goal_dims=32,
is_finetuned=True,
end_action_in_ask=end_action_in_ask,
)
| ask4help-main | projects/objectnav_baselines/experiments/robothor/clip/objectnav_robothor_rgb_clipresnet50gru_finetune_ask_end_ddppo.py |
from allenact_plugins.clip_plugin.clip_preprocessors import ClipResNetPreprocessor
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
GoalObjectTypeThorSensor,
)
from allenact_plugins.robothor_plugin.robothor_sensors import (RewardConfigSensor)
from allenact.base_abstractions.sensor import ExpertActionSensor
from projects.objectnav_baselines.experiments.clip.objectnav_mixin_clipresnetgru import (
ObjectNavMixInClipResNetGRUConfig,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_ddppo import (
ObjectNavMixInPPOConfig,
)
from allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.utils.experiment_utils import (
Builder,
PipelineStage,
TrainingPipeline,
LinearDecay,
)
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
import gym
import torch.nn as nn
from allenact.embodiedai.sensors.vision_sensors import RGBSensor, DepthSensor
from projects.objectnav_baselines.models.object_nav_models import (
ResnetTensorObjectNavActorCritic,
)
class ObjectNavRoboThorClipRGBPPOExperimentConfig(
ObjectNavRoboThorBaseConfig,
ObjectNavMixInPPOConfig,
ObjectNavMixInClipResNetGRUConfig,
):
"""An Object Navigation experiment configuration in RoboThor with RGB
input."""
CLIP_MODEL_TYPE = "RN50"
SENSORS = [
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
mean=ClipResNetPreprocessor.CLIP_RGB_MEANS,
stdev=ClipResNetPreprocessor.CLIP_RGB_STDS,
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
ExpertActionSensor(nactions=len(ObjectNavTask.class_action_names()), ),
RewardConfigSensor(),
]
def __init__(self):
super().__init__()
# self.REWARD_CONFIG = {
# "step_penalty": -0.01,
# "goal_success_reward": 10.0,
# "failed_stop_reward": -8.0, ##double this maybe? ##change this
# "shaping_weight": 1.0,
# "penalty_for_ask": -0.3,
# }
self.REWARD_CONFIG = {
"step_penalty": -0.00,
"goal_success_reward": 0.00,
"failed_stop_reward": -10.00,
"shaping_weight": 0.00,
"penalty_for_init_ask": -1.00,
"penalty_for_ask_recurring": -0.00,#-0.1/4,##decreasing recurring cost
"penalty_for_step_ask": -0.01,
}
self.ADAPTIVE_REWARD = True
@classmethod
def tag(cls):
return "Objectnav-RoboTHOR-RGB-ClipResNet50GRU-FINETUNE-DDPPO"
def training_pipeline(self, **kwargs):
# PPO
ppo_steps = int(15000000)
lr = 3e-4
num_mini_batch = 1
update_repeats = 4
num_steps = 128//2
save_interval = 5000000
log_interval = 10000 if torch.cuda.is_available() else 1
gamma = 0.99
use_gae = True
gae_lambda = 0.95
max_grad_norm = 0.5
named_losses = {"ppo_loss": (PPO(**PPOConfig), 1.0)}
named_losses = self._update_with_auxiliary_losses(named_losses)
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=log_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={key: val[0] for key, val in named_losses.items()},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(
loss_names=list(named_losses.keys()),
max_stage_steps=ppo_steps,
loss_weights=[val[1] for val in named_losses.values()],
)
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}
),
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
has_rgb = any(isinstance(s, RGBSensor) for s in cls.SENSORS)
has_depth = any(isinstance(s, DepthSensor) for s in cls.SENSORS)
adaptive_reward_flag = any(isinstance(s,RewardConfigSensor) for s in cls.SENSORS)
goal_sensor_uuid = next(
(s.uuid for s in cls.SENSORS if isinstance(s, GoalObjectTypeThorSensor)),
None,
)
action_space = gym.spaces.Dict({"nav_action": gym.spaces.Discrete(len(ObjectNavTask.class_action_names())),
"ask_action": gym.spaces.Discrete(2)})
##NEW ACTIONS : 0 means expert step, 1 means agent step
##2 means stop asking, 1 means start asking, 0 means do nothing
return ResnetTensorObjectNavActorCritic(
action_space=action_space, # gym.spaces.Discrete(len(ObjectNavTask.class_action_names())),
observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces,
goal_sensor_uuid=goal_sensor_uuid,
rgb_resnet_preprocessor_uuid="rgb_clip_resnet" if has_rgb else None,
depth_resnet_preprocessor_uuid="depth_clip_resnet" if has_depth else None,
hidden_size=512,
goal_dims=32,
is_finetuned=True,
adapt_belief=False,
adaptive_reward=adaptive_reward_flag,
)
| ask4help-main | projects/objectnav_baselines/experiments/robothor/clip/objectnav_robothor_rgb_clipresnet50gru_finetune_ask_adaptive_reward_ddppo.py |
from allenact_plugins.clip_plugin.clip_preprocessors import ClipResNetPreprocessor
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
GoalObjectTypeThorSensor,
)
from allenact.base_abstractions.sensor import ExpertActionSensor
from projects.objectnav_baselines.experiments.clip.objectnav_mixin_clipresnetgru import (
ObjectNavMixInClipResNetGRUConfig,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_ddppo import (
ObjectNavMixInPPOConfig,
)
from allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
class ObjectNavRoboThorClipRGBPPOExperimentConfig(
ObjectNavRoboThorBaseConfig,
ObjectNavMixInPPOConfig,
ObjectNavMixInClipResNetGRUConfig,
):
"""An Object Navigation experiment configuration in RoboThor with RGB
input."""
CLIP_MODEL_TYPE = "RN50"
SENSORS = [
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
mean=ClipResNetPreprocessor.CLIP_RGB_MEANS,
stdev=ClipResNetPreprocessor.CLIP_RGB_STDS,
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
ExpertActionSensor(nactions=len(ObjectNavTask.class_action_names()), ),
]
@classmethod
def tag(cls):
return "Objectnav-RoboTHOR-RGB-ClipResNet50GRU-DDPPO"
| ask4help-main | projects/objectnav_baselines/experiments/robothor/clip/objectnav_robothor_rgb_clipresnet50gru_ddppo.py |
from typing import Dict, Tuple
from allenact_plugins.clip_plugin.clip_preprocessors import ClipResNetPreprocessor
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
GoalObjectTypeThorSensor,
)
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
)
from allenact.base_abstractions.sensor import ExpertActionSensor
from projects.objectnav_baselines.experiments.clip.objectnav_mixin_clipresnetgru import (
ObjectNavMixInClipResNetGRUConfig,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_ddppo import (
ObjectNavMixInPPOConfig,
)
from allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
from allenact.embodiedai.aux_losses.losses import (
MultiAuxTaskNegEntropyLoss,
InverseDynamicsLoss,
TemporalDistanceLoss,
CPCA1Loss,
CPCA2Loss,
CPCA4Loss,
CPCA8Loss,
CPCA16Loss,
FrequencyLoss,
SupImitationLoss,
)
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.utils.experiment_utils import (
Builder,
PipelineStage,
TrainingPipeline,
LinearDecay,
)
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
import gym
import torch.nn as nn
from allenact.embodiedai.sensors.vision_sensors import RGBSensor, DepthSensor
from projects.objectnav_baselines.models.object_nav_models import (
ResnetTensorObjectNavActorCritic,
)
class ObjectNavRoboThorClipRGBPPOExperimentConfig(
ObjectNavRoboThorBaseConfig,
ObjectNavMixInPPOConfig,
ObjectNavMixInClipResNetGRUConfig,
):
"""An Object Navigation experiment configuration in RoboThor with RGB
input."""
CLIP_MODEL_TYPE = "RN50"
SENSORS = [
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
mean=ClipResNetPreprocessor.CLIP_RGB_MEANS,
stdev=ClipResNetPreprocessor.CLIP_RGB_STDS,
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
ExpertActionSensor(nactions=len(ObjectNavTask.class_action_names()), ),
]
AUXILIARY_UUIDS = [
# InverseDynamicsLoss.UUID,
# TemporalDistanceLoss.UUID,
# CPCA1Loss.UUID,
# CPCA4Loss.UUID,
# CPCA8Loss.UUID,
# CPCA16Loss.UUID,
# FrequencyLoss.UUID,
# SupImitationLoss.UUID,
]
def __init__(self):
super().__init__()
# self.REWARD_CONFIG = {
# "step_penalty": -0.01,
# "goal_success_reward": 10.0,
# "failed_stop_reward": -8.0, ##double this maybe? ##change this
# "shaping_weight": 1.0,
# "penalty_for_ask": -0.3,
# }
self.REWARD_CONFIG = {
"step_penalty": -0.00,
"goal_success_reward": 0.00,
"failed_stop_reward": -10.00,
"shaping_weight": 0.00,
"penalty_for_init_ask": -1.00,
"penalty_for_ask_recurring": -0.00,#-0.1/4,##decreasing recurring cost
"penalty_for_step_ask": -0.01,
}
self.ADAPTIVE_REWARD = False
@classmethod
def tag(cls):
return "Objectnav-RoboTHOR-RGB-ClipResNet50GRU-FINETUNE-DDPPO"
def training_pipeline(self, **kwargs):
# PPO
ppo_steps = int(15000000)
lr = 3e-4
num_mini_batch = 1
update_repeats = 4
num_steps = 128//2
save_interval = 5000000
log_interval = 10000 if torch.cuda.is_available() else 1
gamma = 0.99
use_gae = True
gae_lambda = 0.95
max_grad_norm = 0.5
named_losses = {"ppo_loss": (PPO(**PPOConfig), 1.0)}
named_losses = self._update_with_auxiliary_losses(named_losses)
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=log_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={key: val[0] for key, val in named_losses.items()},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(
loss_names=list(named_losses.keys()),
max_stage_steps=ppo_steps,
loss_weights=[val[1] for val in named_losses.values()],
)
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}
),
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
has_rgb = any(isinstance(s, RGBSensor) for s in cls.SENSORS)
has_depth = any(isinstance(s, DepthSensor) for s in cls.SENSORS)
goal_sensor_uuid = next(
(s.uuid for s in cls.SENSORS if isinstance(s, GoalObjectTypeThorSensor)),
None,
)
action_space = gym.spaces.Dict({"nav_action": gym.spaces.Discrete(len(ObjectNavTask.class_action_names())),
"ask_action": gym.spaces.Discrete(2)})
##NEW ACTIONS : 0 means expert step, 1 means agent step
#OLD ACTIONS : 2 means stop asking, 1 means start asking, 0 means do nothing
ADAPT_BELIEF = False
return ResnetTensorObjectNavActorCritic(
action_space=action_space, # gym.spaces.Discrete(len(ObjectNavTask.class_action_names())),
observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces,
goal_sensor_uuid=goal_sensor_uuid,
rgb_resnet_preprocessor_uuid="rgb_clip_resnet" if has_rgb else None,
depth_resnet_preprocessor_uuid="depth_clip_resnet" if has_depth else None,
hidden_size=512,
goal_dims=32,
auxiliary_uuids=cls.AUXILIARY_UUIDS,
is_finetuned=True,
adapt_belief=ADAPT_BELIEF,
adaptive_reward=False,
)
@classmethod
def _update_with_auxiliary_losses(cls, named_losses):
# auxliary losses
aux_loss_total_weight = 2.0
# Total losses
total_aux_losses: Dict[str, Tuple[AbstractActorCriticLoss, float]] = {
InverseDynamicsLoss.UUID: (
InverseDynamicsLoss(
subsample_rate=0.2, subsample_min_num=10, # TODO: test its effects
),
0.05 * aux_loss_total_weight, # should times 2
),
TemporalDistanceLoss.UUID: (
TemporalDistanceLoss(
num_pairs=8, epsiode_len_min=5, # TODO: test its effects
),
0.2 * aux_loss_total_weight, # should times 2
),
CPCA1Loss.UUID: (
CPCA1Loss(subsample_rate=0.2,), # TODO: test its effects
0.05 * aux_loss_total_weight, # should times 2
),
CPCA2Loss.UUID: (
CPCA2Loss(subsample_rate=0.2,), # TODO: test its effects
0.05 * aux_loss_total_weight, # should times 2
),
CPCA4Loss.UUID: (
CPCA4Loss(subsample_rate=0.2,), # TODO: test its effects
0.05 * aux_loss_total_weight, # should times 2
),
CPCA8Loss.UUID: (
CPCA8Loss(subsample_rate=0.2,), # TODO: test its effects
0.05 * aux_loss_total_weight, # should times 2
),
CPCA16Loss.UUID: (
CPCA16Loss(subsample_rate=0.2,), # TODO: test its effects
0.05 * aux_loss_total_weight, # should times 2
),
FrequencyLoss.UUID: (
FrequencyLoss(),
0.05*aux_loss_total_weight,
),
SupImitationLoss.UUID: (
SupImitationLoss(),
0.05*aux_loss_total_weight,
)
}
named_losses.update(
{uuid: total_aux_losses[uuid] for uuid in cls.AUXILIARY_UUIDS}
)
if cls.MULTIPLE_BELIEFS: # add weight entropy loss automatically
named_losses[MultiAuxTaskNegEntropyLoss.UUID] = (
MultiAuxTaskNegEntropyLoss(cls.AUXILIARY_UUIDS),
0.01,
)
return named_losses
| ask4help-main | projects/objectnav_baselines/experiments/robothor/clip/objectnav_robothor_rgb_clipresnet50gru_finetune_ask_ddppo.py |
from allenact_plugins.clip_plugin.clip_preprocessors import ClipResNetPreprocessor
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
GoalObjectTypeThorSensor,
)
from projects.objectnav_baselines.experiments.clip.objectnav_mixin_clipresnetgru import (
ObjectNavMixInClipResNetGRUConfig,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_ddppo import (
ObjectNavMixInPPOConfig,
)
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
class ObjectNavRoboThorRGBPPOExperimentConfig(
ObjectNavRoboThorBaseConfig,
ObjectNavMixInPPOConfig,
ObjectNavMixInClipResNetGRUConfig,
):
"""An Object Navigation experiment configuration in RoboThor with RGB
input."""
CLIP_MODEL_TYPE = "RN50x16"
SENSORS = [
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
mean=ClipResNetPreprocessor.CLIP_RGB_MEANS,
stdev=ClipResNetPreprocessor.CLIP_RGB_STDS,
),
GoalObjectTypeThorSensor(
object_types=ObjectNavRoboThorBaseConfig.TARGET_TYPES,
),
]
@classmethod
def tag(cls):
return "Objectnav-RoboTHOR-RGB-ClipResNet50x16GRU-DDPPO"
| ask4help-main | projects/objectnav_baselines/experiments/robothor/clip/objectnav_robothor_rgb_clipresnet50x16gru_ddppo.py |
from allenact_plugins.clip_plugin.clip_preprocessors import ClipResNetPreprocessor
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
GoalObjectTypeThorSensor,
)
from projects.objectnav_baselines.experiments.clip.objectnav_zeroshot_mixin_clip_gru import (
ObjectNavZeroShotMixInClipGRUConfig,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_ddppo import (
ObjectNavMixInPPOConfig,
)
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
class ObjectNavRoboThorClipRGBPPOExperimentConfig(
ObjectNavRoboThorBaseConfig,
ObjectNavMixInPPOConfig,
ObjectNavZeroShotMixInClipGRUConfig,
):
"""An Object Navigation experiment configuration in RoboThor with RGB
input."""
DEFAULT_NUM_TRAIN_PROCESSES = 20
CLIP_MODEL_TYPE = "RN50"
# SEEN_TARGET_TYPES = tuple(
# sorted(
# [
# "AlarmClock",
# "BaseballBat",
# "Bowl",
# "GarbageCan",
# "Laptop",
# "Mug",
# "SprayBottle",
# "Vase",
# ]
# )
# )
# UNSEEN_TARGET_TYPES = tuple(
# sorted(
# [
# "Apple",
# "BasketBall",
# "HousePlant",
# "Television"
# ]
# )
# )
TARGET_TYPES = tuple(
sorted(
[
"Apple",
"BasketBall",
"HousePlant",
"Television",
]
)
)
SENSORS = [
RGBSensorThor(
height=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
width=ObjectNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
mean=ClipResNetPreprocessor.CLIP_RGB_MEANS,
stdev=ClipResNetPreprocessor.CLIP_RGB_STDS,
),
GoalObjectTypeThorSensor(
object_types=TARGET_TYPES,
),
]
@classmethod
def tag(cls):
return "ObjectNavRoboThorRGBPPO_CLIP_zeroshot"
| ask4help-main | projects/objectnav_baselines/experiments/robothor/clip/objectnav_robothor_zeroshot_rgb_clipgru_ddppo.py |
from typing import Sequence, Union
import gym
import numpy as np
import torch.nn as nn
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact_plugins.clip_plugin.clip_preprocessors import ClipResNetPreprocessor
from allenact.embodiedai.sensors.vision_sensors import RGBSensor, DepthSensor
from allenact.utils.experiment_utils import Builder
from allenact_plugins.ithor_plugin.ithor_sensors import GoalObjectTypeThorSensor
from allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask
from projects.objectnav_baselines.experiments.objectnav_base import ObjectNavBaseConfig
from projects.objectnav_baselines.models.object_nav_models import (
ResnetTensorObjectNavActorCritic,
)
class ObjectNavMixInClipResNetGRUConfig(ObjectNavBaseConfig):
CLIP_MODEL_TYPE: str
@classmethod
def preprocessors(cls) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]:
preprocessors = []
rgb_sensor = next((s for s in cls.SENSORS if isinstance(s, RGBSensor)), None)
assert (
np.linalg.norm(
np.array(rgb_sensor._norm_means)
- np.array(ClipResNetPreprocessor.CLIP_RGB_MEANS)
)
< 1e-5
)
assert (
np.linalg.norm(
np.array(rgb_sensor._norm_sds)
- np.array(ClipResNetPreprocessor.CLIP_RGB_STDS)
)
< 1e-5
)
if rgb_sensor is not None:
preprocessors.append(
ClipResNetPreprocessor(
rgb_input_uuid=rgb_sensor.uuid,
clip_model_type=cls.CLIP_MODEL_TYPE,
pool=False,
output_uuid="rgb_clip_resnet",
)
)
depth_sensor = next(
(s for s in cls.SENSORS if isinstance(s, DepthSensor)), None
)
if depth_sensor is not None:
preprocessors.append(
ClipResNetPreprocessor(
rgb_input_uuid=depth_sensor.uuid,
clip_model_type=cls.CLIP_MODEL_TYPE,
pool=False,
output_uuid="depth_clip_resnet",
)
)
return preprocessors
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
has_rgb = any(isinstance(s, RGBSensor) for s in cls.SENSORS)
has_depth = any(isinstance(s, DepthSensor) for s in cls.SENSORS)
goal_sensor_uuid = next(
(s.uuid for s in cls.SENSORS if isinstance(s, GoalObjectTypeThorSensor)),
None,
)
act_space = gym.spaces.Dict({"nav_action":gym.spaces.Discrete(len(ObjectNavTask.class_action_names())),
"ask_action":gym.spaces.Discrete(3)}) ##2 agent takes over, 1 means expert takes over, 0 means pass
return ResnetTensorObjectNavActorCritic(
action_space=act_space,#gym.spaces.Discrete(len(ObjectNavTask.class_action_names())),
observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces,
goal_sensor_uuid=goal_sensor_uuid,
rgb_resnet_preprocessor_uuid="rgb_clip_resnet" if has_rgb else None,
depth_resnet_preprocessor_uuid="depth_clip_resnet" if has_depth else None,
hidden_size=512,
goal_dims=32,
)
| ask4help-main | projects/objectnav_baselines/experiments/clip/objectnav_mixin_clipresnetgru.py |
from typing import Sequence, Union
import gym
import numpy as np
import torch.nn as nn
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact_plugins.clip_plugin.clip_preprocessors import (
ClipResNetPreprocessor,
ClipTextPreprocessor
)
from allenact.embodiedai.sensors.vision_sensors import RGBSensor
from allenact.utils.experiment_utils import Builder
from allenact_plugins.ithor_plugin.ithor_sensors import GoalObjectTypeThorSensor
from allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask
from projects.objectnav_baselines.experiments.objectnav_base import ObjectNavBaseConfig
from allenact_plugins.clip_plugin.objectnav_models import CLIPObjectNavActorCritic
class ObjectNavZeroShotMixInClipGRUConfig(ObjectNavBaseConfig):
CLIP_MODEL_TYPE: str
@classmethod
def preprocessors(cls) -> Sequence[Union[Preprocessor, Builder[Preprocessor]]]:
preprocessors = []
rgb_sensor = next((s for s in cls.SENSORS if isinstance(s, RGBSensor)), None)
assert (
np.linalg.norm(
np.array(rgb_sensor._norm_means)
- np.array(ClipResNetPreprocessor.CLIP_RGB_MEANS)
)
< 1e-5
)
assert (
np.linalg.norm(
np.array(rgb_sensor._norm_sds)
- np.array(ClipResNetPreprocessor.CLIP_RGB_STDS)
)
< 1e-5
)
if rgb_sensor is not None:
preprocessors.append(
ClipResNetPreprocessor(
rgb_input_uuid=rgb_sensor.uuid,
clip_model_type=cls.CLIP_MODEL_TYPE,
pool=False,
output_uuid="rgb_clip_resnet",
)
)
goal_sensor_uuid = next(
(s.uuid for s in cls.SENSORS if isinstance(s, GoalObjectTypeThorSensor)),
None,
)
preprocessors.append(
ClipTextPreprocessor(
goal_sensor_uuid=goal_sensor_uuid,
object_types=cls.TARGET_TYPES,
output_uuid="goal_object_encoded",
)
)
return preprocessors
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
has_rgb = any(isinstance(s, RGBSensor) for s in cls.SENSORS)
goal_sensor_uuid = next(
(s.uuid for s in cls.SENSORS if isinstance(s, GoalObjectTypeThorSensor)),
None,
)
goal_uuid = (
'goal_object_encoded' if 'goal_object_encoded' in kwargs["sensor_preprocessor_graph"].preprocessors
else goal_sensor_uuid
)
return CLIPObjectNavActorCritic(
action_space=gym.spaces.Discrete(len(ObjectNavTask.class_action_names())),
observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces,
goal_sensor_uuid=goal_uuid,
rgb_resnet_preprocessor_uuid="rgb_clip_resnet" if has_rgb else None,
hidden_size=512,
)
| ask4help-main | projects/objectnav_baselines/experiments/clip/objectnav_zeroshot_mixin_clip_gru.py |
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
GoalObjectTypeThorSensor,
)
from allenact_plugins.robothor_plugin.robothor_sensors import DepthSensorThor
from projects.objectnav_baselines.experiments.ithor.objectnav_ithor_base import (
ObjectNaviThorBaseConfig,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_ddppo import (
ObjectNavMixInPPOConfig,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_resnet18gru import (
ObjectNavMixInResNet18GRUConfig,
)
class ObjectNaviThorRGBDPPOExperimentConfig(
ObjectNaviThorBaseConfig, ObjectNavMixInPPOConfig, ObjectNavMixInResNet18GRUConfig
):
"""An Object Navigation experiment configuration in iTHOR with RGBD
input."""
SENSORS = [
RGBSensorThor(
height=ObjectNaviThorBaseConfig.SCREEN_SIZE,
width=ObjectNaviThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
DepthSensorThor(
height=ObjectNaviThorBaseConfig.SCREEN_SIZE,
width=ObjectNaviThorBaseConfig.SCREEN_SIZE,
use_normalization=True,
uuid="depth_lowres",
),
GoalObjectTypeThorSensor(object_types=ObjectNaviThorBaseConfig.TARGET_TYPES,),
]
@classmethod
def tag(cls):
return "Objectnav-iTHOR-RGBD-ResNetGRU-DDPPO"
| ask4help-main | projects/objectnav_baselines/experiments/ithor/objectnav_ithor_rgbd_resnetgru_ddppo.py |
from allenact_plugins.ithor_plugin.ithor_sensors import GoalObjectTypeThorSensor
from allenact_plugins.robothor_plugin.robothor_sensors import DepthSensorThor
from projects.objectnav_baselines.experiments.ithor.objectnav_ithor_base import (
ObjectNaviThorBaseConfig,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_ddppo import (
ObjectNavMixInPPOConfig,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_resnet18gru import (
ObjectNavMixInResNet18GRUConfig,
)
class ObjectNaviThorDepthPPOExperimentConfig(
ObjectNaviThorBaseConfig, ObjectNavMixInPPOConfig, ObjectNavMixInResNet18GRUConfig,
):
"""An Object Navigation experiment configuration in iThor with Depth
input."""
SENSORS = (
DepthSensorThor(
height=ObjectNaviThorBaseConfig.SCREEN_SIZE,
width=ObjectNaviThorBaseConfig.SCREEN_SIZE,
use_normalization=True,
uuid="depth_lowres",
),
GoalObjectTypeThorSensor(object_types=ObjectNaviThorBaseConfig.TARGET_TYPES,),
)
@classmethod
def tag(cls):
return "Objectnav-iTHOR-Depth-ResNetGRU-DDPPO"
| ask4help-main | projects/objectnav_baselines/experiments/ithor/objectnav_ithor_depth_resnetgru_ddppo.py |
from allenact_plugins.ithor_plugin.ithor_sensors import (
RGBSensorThor,
GoalObjectTypeThorSensor,
)
from projects.objectnav_baselines.experiments.ithor.objectnav_ithor_base import (
ObjectNaviThorBaseConfig,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_ddppo import (
ObjectNavMixInPPOConfig,
)
from projects.objectnav_baselines.experiments.objectnav_mixin_resnet18gru import (
ObjectNavMixInResNet18GRUConfig,
)
class ObjectNaviThorRGBPPOExperimentConfig(
ObjectNaviThorBaseConfig, ObjectNavMixInPPOConfig, ObjectNavMixInResNet18GRUConfig,
):
"""An Object Navigation experiment configuration in iThor with RGB
input."""
SENSORS = [
RGBSensorThor(
height=ObjectNaviThorBaseConfig.SCREEN_SIZE,
width=ObjectNaviThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
GoalObjectTypeThorSensor(object_types=ObjectNaviThorBaseConfig.TARGET_TYPES,),
]
@classmethod
def tag(cls):
return "Objectnav-iTHOR-RGB-ResNetGRU-DDPPO"
| ask4help-main | projects/objectnav_baselines/experiments/ithor/objectnav_ithor_rgb_resnetgru_ddppo.py |
ask4help-main | projects/objectnav_baselines/experiments/ithor/__init__.py |
|
import os
from abc import ABC
import torch
from projects.objectnav_baselines.experiments.objectnav_thor_base import (
ObjectNavThorBaseConfig,
)
class ObjectNaviThorBaseConfig(ObjectNavThorBaseConfig, ABC):
"""The base config for all iTHOR ObjectNav experiments."""
THOR_COMMIT_ID = "9549791ce2e7f472063a10abb1fb7664159fec23"
AGENT_MODE = "default"
DEFAULT_NUM_TRAIN_PROCESSES = 40 if torch.cuda.is_available() else 1
TRAIN_DATASET_DIR = os.path.join(os.getcwd(), "datasets/ithor-objectnav/train")
VAL_DATASET_DIR = os.path.join(os.getcwd(), "datasets/ithor-objectnav/val")
TARGET_TYPES = tuple(
sorted(
[
"AlarmClock",
"Apple",
"Book",
"Bowl",
"Box",
"Candle",
"GarbageCan",
"HousePlant",
"Laptop",
"SoapBottle",
"Television",
"Toaster",
],
)
)
| ask4help-main | projects/objectnav_baselines/experiments/ithor/objectnav_ithor_base.py |
"""Baseline models for use in the object navigation task.
Object navigation is currently available as a Task in AI2-THOR and
Facebook's Habitat.
"""
from typing import Tuple, Dict, Optional, cast, List
import gym
import torch
import torch.nn as nn
from gym.spaces.dict import Dict as SpaceDict
from allenact.algorithms.onpolicy_sync.policy import ObservationType
from allenact.embodiedai.models.basic_models import SimpleCNN
import allenact.embodiedai.models.resnet as resnet
from allenact.embodiedai.models.visual_nav_models import (
VisualNavActorCritic,
FusionType,
)
class ObjectNavActorCritic(VisualNavActorCritic):
"""Baseline recurrent actor critic model for object-navigation.
# Attributes
action_space : The space of actions available to the agent. Currently only discrete
actions are allowed (so this space will always be of type `gym.spaces.Discrete`).
observation_space : The observation space expected by the agent. This observation space
should include (optionally) 'rgb' images and 'depth' images and is required to
have a component corresponding to the goal `goal_sensor_uuid`.
goal_sensor_uuid : The uuid of the sensor of the goal object. See `GoalObjectTypeThorSensor`
as an example of such a sensor.
hidden_size : The hidden size of the GRU RNN.
object_type_embedding_dim: The dimensionality of the embedding corresponding to the goal
object type.
"""
def __init__(
self,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
goal_sensor_uuid: str,
# RNN
hidden_size=512,
num_rnn_layers=1,
rnn_type="GRU",
add_prev_actions=False,
action_embed_size=6,
# Aux loss
multiple_beliefs=False,
beliefs_fusion: Optional[FusionType] = None,
auxiliary_uuids: Optional[List[str]] = None,
# below are custom params
rgb_uuid: Optional[str] = None,
depth_uuid: Optional[str] = None,
object_type_embedding_dim=8,
trainable_masked_hidden_state: bool = False,
# perception backbone params,
backbone="gnresnet18",
resnet_baseplanes=32,
):
"""Initializer.
See class documentation for parameter definitions.
"""
super().__init__(
action_space=action_space,
observation_space=observation_space,
hidden_size=hidden_size,
multiple_beliefs=multiple_beliefs,
beliefs_fusion=beliefs_fusion,
auxiliary_uuids=auxiliary_uuids,
)
self.goal_sensor_uuid = goal_sensor_uuid
self._n_object_types = self.observation_space.spaces[self.goal_sensor_uuid].n
self.object_type_embedding_size = object_type_embedding_dim
self.backbone = backbone
if backbone == "simple_cnn":
self.visual_encoder = SimpleCNN(
observation_space=observation_space,
output_size=hidden_size,
rgb_uuid=rgb_uuid,
depth_uuid=depth_uuid,
)
else: # resnet family
self.visual_encoder = resnet.GroupNormResNetEncoder(
observation_space=observation_space,
output_size=hidden_size,
rgb_uuid=rgb_uuid,
depth_uuid=depth_uuid,
baseplanes=resnet_baseplanes,
ngroups=resnet_baseplanes // 2,
make_backbone=getattr(resnet, backbone),
)
self.create_state_encoders(
obs_embed_size=self.goal_visual_encoder_output_dims,
num_rnn_layers=num_rnn_layers,
rnn_type=rnn_type,
add_prev_actions=add_prev_actions,
prev_action_embed_size=action_embed_size,
trainable_masked_hidden_state=trainable_masked_hidden_state,
)
self.create_actorcritic_head()
self.create_aux_models(
obs_embed_size=self.goal_visual_encoder_output_dims,
action_embed_size=action_embed_size,
)
self.object_type_embedding = nn.Embedding(
num_embeddings=self._n_object_types,
embedding_dim=object_type_embedding_dim,
)
self.train()
@property
def is_blind(self) -> bool:
"""True if the model is blind (e.g. neither 'depth' or 'rgb' is an
input observation type)."""
return self.visual_encoder.is_blind
@property
def goal_visual_encoder_output_dims(self):
dims = self.object_type_embedding_size
if self.is_blind:
return dims
return dims + self.recurrent_hidden_state_size
def get_object_type_encoding(
self, observations: Dict[str, torch.FloatTensor]
) -> torch.FloatTensor:
"""Get the object type encoding from input batched observations."""
# noinspection PyTypeChecker
return self.object_type_embedding( # type:ignore
observations[self.goal_sensor_uuid].to(torch.int64)
)
def forward_encoder(self, observations: ObservationType) -> torch.FloatTensor:
target_encoding = self.get_object_type_encoding(
cast(Dict[str, torch.FloatTensor], observations)
)
obs_embeds = [target_encoding]
if not self.is_blind:
perception_embed = self.visual_encoder(observations)
obs_embeds = [perception_embed] + obs_embeds
obs_embeds = torch.cat(obs_embeds, dim=-1)
return obs_embeds
class ResnetTensorObjectNavActorCritic(VisualNavActorCritic):
def __init__(
# base params
self,
action_space: gym.spaces.Discrete,
observation_space: SpaceDict,
goal_sensor_uuid: str,
hidden_size=512,
num_rnn_layers=1,
rnn_type="GRU",
add_prev_actions=False,
action_embed_size=6,
multiple_beliefs=False,
beliefs_fusion: Optional[FusionType] = None,
auxiliary_uuids: Optional[List[str]] = None,
is_finetuned = False,
end_action_in_ask=False,
adapt_belief = False,
adaptive_reward=False,
# custom params
rgb_resnet_preprocessor_uuid: Optional[str] = None,
depth_resnet_preprocessor_uuid: Optional[str] = None,
goal_dims: int = 32,
resnet_compressor_hidden_out_dims: Tuple[int, int] = (128, 32),
combiner_hidden_out_dims: Tuple[int, int] = (128, 32),
):
super().__init__(
action_space=action_space,
observation_space=observation_space,
hidden_size=hidden_size,
multiple_beliefs=multiple_beliefs,
beliefs_fusion=beliefs_fusion,
auxiliary_uuids=auxiliary_uuids,
)
if (
rgb_resnet_preprocessor_uuid is None
or depth_resnet_preprocessor_uuid is None
):
resnet_preprocessor_uuid = (
rgb_resnet_preprocessor_uuid
if rgb_resnet_preprocessor_uuid is not None
else depth_resnet_preprocessor_uuid
)
self.goal_visual_encoder = ResnetTensorGoalEncoder(
self.observation_space,
goal_sensor_uuid,
resnet_preprocessor_uuid,
goal_dims,
resnet_compressor_hidden_out_dims,
combiner_hidden_out_dims,
)
else:
self.goal_visual_encoder = ResnetDualTensorGoalEncoder( # type:ignore
self.observation_space,
goal_sensor_uuid,
rgb_resnet_preprocessor_uuid,
depth_resnet_preprocessor_uuid,
goal_dims,
resnet_compressor_hidden_out_dims,
combiner_hidden_out_dims,
)
self.create_state_encoders(
obs_embed_size=self.goal_visual_encoder.output_dims,
num_rnn_layers=num_rnn_layers,
rnn_type=rnn_type,
add_prev_actions=add_prev_actions,
prev_action_embed_size=action_embed_size,
)
self.create_actorcritic_head()
self.create_aux_models(
obs_embed_size=self.goal_visual_encoder.output_dims,
action_embed_size=action_embed_size,
)
self.is_finetuned = is_finetuned
self.end_action_in_ask = end_action_in_ask
self.adapt_belief = adapt_belief
self.adaptive_reward = adaptive_reward
if self.is_finetuned:
self.create_ask4_help_module(prev_action_embed_size=action_embed_size,
num_rnn_layers=num_rnn_layers,
rnn_type=rnn_type,
adaptive_reward=self.adaptive_reward,
)
if self.adapt_belief:
self.create_expert_encoder(input_size=self._hidden_size,
prev_action_embed_size=action_embed_size,
num_rnn_layers=num_rnn_layers,
rnn_type=rnn_type,
)
self.train()
@property
def is_blind(self) -> bool:
"""True if the model is blind (e.g. neither 'depth' or 'rgb' is an
input observation type)."""
return self.goal_visual_encoder.is_blind
def forward_encoder(self, observations: ObservationType) -> torch.FloatTensor:
return self.goal_visual_encoder(observations)
class ResnetTensorGoalEncoder(nn.Module):
def __init__(
self,
observation_spaces: SpaceDict,
goal_sensor_uuid: str,
resnet_preprocessor_uuid: str,
class_dims: int = 32,
resnet_compressor_hidden_out_dims: Tuple[int, int] = (128, 32),
combiner_hidden_out_dims: Tuple[int, int] = (128, 32),
) -> None:
super().__init__()
self.goal_uuid = goal_sensor_uuid
self.resnet_uuid = resnet_preprocessor_uuid
self.class_dims = class_dims
self.resnet_hid_out_dims = resnet_compressor_hidden_out_dims
self.combine_hid_out_dims = combiner_hidden_out_dims
self.embed_class = nn.Embedding(
num_embeddings=observation_spaces.spaces[self.goal_uuid].n,
embedding_dim=self.class_dims,
)
self.blind = self.resnet_uuid not in observation_spaces.spaces
if not self.blind:
self.resnet_tensor_shape = observation_spaces.spaces[self.resnet_uuid].shape
self.resnet_compressor = nn.Sequential(
nn.Conv2d(self.resnet_tensor_shape[0], self.resnet_hid_out_dims[0], 1),
nn.ReLU(),
nn.Conv2d(*self.resnet_hid_out_dims[0:2], 1),
nn.ReLU(),
)
self.target_obs_combiner = nn.Sequential(
nn.Conv2d(
self.resnet_hid_out_dims[1] + self.class_dims,
self.combine_hid_out_dims[0],
1,
),
nn.ReLU(),
nn.Conv2d(*self.combine_hid_out_dims[0:2], 1),
)
@property
def is_blind(self):
return self.blind
@property
def output_dims(self):
if self.blind:
return self.class_dims
else:
return (
self.combine_hid_out_dims[-1]
* self.resnet_tensor_shape[1]
* self.resnet_tensor_shape[2]
)
def get_object_type_encoding(
self, observations: Dict[str, torch.FloatTensor]
) -> torch.FloatTensor:
"""Get the object type encoding from input batched observations."""
return cast(
torch.FloatTensor,
self.embed_class(observations[self.goal_uuid].to(torch.int64)),
)
def compress_resnet(self, observations):
return self.resnet_compressor(observations[self.resnet_uuid])
def distribute_target(self, observations):
target_emb = self.embed_class(observations[self.goal_uuid])
return target_emb.view(-1, self.class_dims, 1, 1).expand(
-1, -1, self.resnet_tensor_shape[-2], self.resnet_tensor_shape[-1]
)
def adapt_input(self, observations):
resnet = observations[self.resnet_uuid]
goal = observations[self.goal_uuid]
use_agent = False
nagent = 1
if len(resnet.shape) == 6:
use_agent = True
nstep, nsampler, nagent = resnet.shape[:3]
else:
nstep, nsampler = resnet.shape[:2]
observations[self.resnet_uuid] = resnet.view(-1, *resnet.shape[-3:])
observations[self.goal_uuid] = goal.view(-1, goal.shape[-1])
return observations, use_agent, nstep, nsampler, nagent
@staticmethod
def adapt_output(x, use_agent, nstep, nsampler, nagent):
if use_agent:
return x.view(nstep, nsampler, nagent, -1)
return x.view(nstep, nsampler * nagent, -1)
def forward(self, observations):
observations, use_agent, nstep, nsampler, nagent = self.adapt_input(
observations
)
if self.blind:
return self.embed_class(observations[self.goal_uuid])
embs = [
self.compress_resnet(observations),
self.distribute_target(observations),
]
x = self.target_obs_combiner(torch.cat(embs, dim=1,))
x = x.reshape(x.size(0), -1) # flatten
return self.adapt_output(x, use_agent, nstep, nsampler, nagent)
class ResnetDualTensorGoalEncoder(nn.Module):
def __init__(
self,
observation_spaces: SpaceDict,
goal_sensor_uuid: str,
rgb_resnet_preprocessor_uuid: str,
depth_resnet_preprocessor_uuid: str,
class_dims: int = 32,
resnet_compressor_hidden_out_dims: Tuple[int, int] = (128, 32),
combiner_hidden_out_dims: Tuple[int, int] = (128, 32),
) -> None:
super().__init__()
self.goal_uuid = goal_sensor_uuid
self.rgb_resnet_uuid = rgb_resnet_preprocessor_uuid
self.depth_resnet_uuid = depth_resnet_preprocessor_uuid
self.class_dims = class_dims
self.resnet_hid_out_dims = resnet_compressor_hidden_out_dims
self.combine_hid_out_dims = combiner_hidden_out_dims
self.embed_class = nn.Embedding(
num_embeddings=observation_spaces.spaces[self.goal_uuid].n,
embedding_dim=self.class_dims,
)
self.blind = (
self.rgb_resnet_uuid not in observation_spaces.spaces
or self.depth_resnet_uuid not in observation_spaces.spaces
)
if not self.blind:
self.resnet_tensor_shape = observation_spaces.spaces[
self.rgb_resnet_uuid
].shape
self.rgb_resnet_compressor = nn.Sequential(
nn.Conv2d(self.resnet_tensor_shape[0], self.resnet_hid_out_dims[0], 1),
nn.ReLU(),
nn.Conv2d(*self.resnet_hid_out_dims[0:2], 1),
nn.ReLU(),
)
self.depth_resnet_compressor = nn.Sequential(
nn.Conv2d(self.resnet_tensor_shape[0], self.resnet_hid_out_dims[0], 1),
nn.ReLU(),
nn.Conv2d(*self.resnet_hid_out_dims[0:2], 1),
nn.ReLU(),
)
self.rgb_target_obs_combiner = nn.Sequential(
nn.Conv2d(
self.resnet_hid_out_dims[1] + self.class_dims,
self.combine_hid_out_dims[0],
1,
),
nn.ReLU(),
nn.Conv2d(*self.combine_hid_out_dims[0:2], 1),
)
self.depth_target_obs_combiner = nn.Sequential(
nn.Conv2d(
self.resnet_hid_out_dims[1] + self.class_dims,
self.combine_hid_out_dims[0],
1,
),
nn.ReLU(),
nn.Conv2d(*self.combine_hid_out_dims[0:2], 1),
)
@property
def is_blind(self):
return self.blind
@property
def output_dims(self):
if self.blind:
return self.class_dims
else:
return (
2
* self.combine_hid_out_dims[-1]
* self.resnet_tensor_shape[1]
* self.resnet_tensor_shape[2]
)
def get_object_type_encoding(
self, observations: Dict[str, torch.FloatTensor]
) -> torch.FloatTensor:
"""Get the object type encoding from input batched observations."""
return cast(
torch.FloatTensor,
self.embed_class(observations[self.goal_uuid].to(torch.int64)),
)
def compress_rgb_resnet(self, observations):
return self.rgb_resnet_compressor(observations[self.rgb_resnet_uuid])
def compress_depth_resnet(self, observations):
return self.depth_resnet_compressor(observations[self.depth_resnet_uuid])
def distribute_target(self, observations):
target_emb = self.embed_class(observations[self.goal_uuid])
return target_emb.view(-1, self.class_dims, 1, 1).expand(
-1, -1, self.resnet_tensor_shape[-2], self.resnet_tensor_shape[-1]
)
def adapt_input(self, observations):
rgb = observations[self.rgb_resnet_uuid]
depth = observations[self.depth_resnet_uuid]
use_agent = False
nagent = 1
if len(rgb.shape) == 6:
use_agent = True
nstep, nsampler, nagent = rgb.shape[:3]
else:
nstep, nsampler = rgb.shape[:2]
observations[self.rgb_resnet_uuid] = rgb.view(-1, *rgb.shape[-3:])
observations[self.depth_resnet_uuid] = depth.view(-1, *depth.shape[-3:])
observations[self.goal_uuid] = observations[self.goal_uuid].view(-1, 1)
return observations, use_agent, nstep, nsampler, nagent
@staticmethod
def adapt_output(x, use_agent, nstep, nsampler, nagent):
if use_agent:
return x.view(nstep, nsampler, nagent, -1)
return x.view(nstep, nsampler * nagent, -1)
def forward(self, observations):
observations, use_agent, nstep, nsampler, nagent = self.adapt_input(
observations
)
if self.blind:
return self.embed_class(observations[self.goal_uuid])
rgb_embs = [
self.compress_rgb_resnet(observations),
self.distribute_target(observations),
]
rgb_x = self.rgb_target_obs_combiner(torch.cat(rgb_embs, dim=1,))
depth_embs = [
self.compress_depth_resnet(observations),
self.distribute_target(observations),
]
depth_x = self.depth_target_obs_combiner(torch.cat(depth_embs, dim=1,))
x = torch.cat([rgb_x, depth_x], dim=1)
x = x.reshape(x.size(0), -1) # flatten
return self.adapt_output(x, use_agent, nstep, nsampler, nagent)
| ask4help-main | projects/objectnav_baselines/models/object_nav_models.py |
ask4help-main | projects/objectnav_baselines/models/__init__.py |
|
ask4help-main | projects/babyai_baselines/__init__.py |
|
ask4help-main | projects/babyai_baselines/experiments/__init__.py |
|
from abc import ABC
from typing import Dict, Any, List, Optional, Union, Sequence, cast
import gym
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses import PPO, A2C
from allenact.algorithms.onpolicy_sync.losses.a2cacktr import A2CConfig
from allenact.algorithms.onpolicy_sync.losses.imitation import Imitation
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.base_abstractions.experiment_config import ExperimentConfig, MachineParams
from allenact.base_abstractions.misc import Loss
from allenact.base_abstractions.sensor import SensorSuite, Sensor, ExpertActionSensor
from allenact.base_abstractions.task import TaskSampler
from allenact.utils.experiment_utils import (
Builder,
LinearDecay,
PipelineStage,
TrainingPipeline,
)
from allenact_plugins.babyai_plugin.babyai_models import BabyAIRecurrentACModel
from allenact_plugins.babyai_plugin.babyai_tasks import BabyAITask, BabyAITaskSampler
from allenact_plugins.minigrid_plugin.minigrid_sensors import (
EgocentricMiniGridSensor,
MiniGridMissionSensor,
)
class BaseBabyAIExperimentConfig(ExperimentConfig, ABC):
"""Base experimental config."""
LEVEL: Optional[str] = None
TOTAL_RL_TRAIN_STEPS: Optional[int] = None
AGENT_VIEW_SIZE: int = 7
ROLLOUT_STEPS: Optional[int] = None
NUM_TRAIN_SAMPLERS: Optional[int] = None
NUM_TEST_TASKS: Optional[int] = None
INSTR_LEN: Optional[int] = None
USE_INSTR: Optional[bool] = None
GPU_ID: Optional[int] = None
USE_EXPERT = False
SHOULD_LOG = True
PPO_NUM_MINI_BATCH = 2
ARCH: Optional[str] = None
NUM_CKPTS_TO_SAVE = 50
TEST_SEED_OFFSET = 0
DEFAULT_LR = 1e-3
@classmethod
def METRIC_ACCUMULATE_INTERVAL(cls):
return cls.NUM_TRAIN_SAMPLERS * 1000
@classmethod
def get_sensors(cls) -> Sequence[Sensor]:
assert cls.USE_INSTR is not None
return (
[
EgocentricMiniGridSensor(
agent_view_size=cls.AGENT_VIEW_SIZE, view_channels=3
),
]
+ (
[MiniGridMissionSensor(instr_len=cls.INSTR_LEN)] # type:ignore
if cls.USE_INSTR
else []
)
+ (
[
ExpertActionSensor( # type: ignore
nactions=len(BabyAITask.class_action_names())
)
]
if cls.USE_EXPERT
else []
)
)
@classmethod
def rl_loss_default(cls, alg: str, steps: Optional[int] = None):
if alg == "ppo":
assert steps is not None
return {
"loss": Builder(
PPO, kwargs={"clip_decay": LinearDecay(steps)}, default=PPOConfig,
),
"num_mini_batch": cls.PPO_NUM_MINI_BATCH,
"update_repeats": 4,
}
elif alg == "a2c":
return {
"loss": A2C(**A2CConfig),
"num_mini_batch": 1,
"update_repeats": 1,
}
elif alg == "imitation":
return {
"loss": Imitation(),
"num_mini_batch": cls.PPO_NUM_MINI_BATCH,
"update_repeats": 4,
}
else:
raise NotImplementedError
@classmethod
def _training_pipeline(
cls,
named_losses: Dict[str, Union[Loss, Builder]],
pipeline_stages: List[PipelineStage],
num_mini_batch: int,
update_repeats: int,
total_train_steps: int,
lr: Optional[float] = None,
):
lr = cls.DEFAULT_LR if lr is None else lr
num_steps = cls.ROLLOUT_STEPS
metric_accumulate_interval = (
cls.METRIC_ACCUMULATE_INTERVAL()
) # Log every 10 max length tasks
save_interval = int(cls.TOTAL_RL_TRAIN_STEPS / cls.NUM_CKPTS_TO_SAVE)
gamma = 0.99
use_gae = "reinforce_loss" not in named_losses
gae_lambda = 0.99
max_grad_norm = 0.5
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=metric_accumulate_interval,
optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam), dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses=named_losses,
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=None,
should_log=cls.SHOULD_LOG,
pipeline_stages=pipeline_stages,
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=cls.TOTAL_RL_TRAIN_STEPS)} # type: ignore
),
)
@classmethod
def machine_params(
cls, mode="train", gpu_id="default", n_train_processes="default", **kwargs
):
if mode == "train":
if n_train_processes == "default":
nprocesses = cls.NUM_TRAIN_SAMPLERS
else:
nprocesses = n_train_processes
elif mode == "valid":
nprocesses = 0
elif mode == "test":
nprocesses = min(
100 if torch.cuda.is_available() else 8, cls.NUM_TEST_TASKS
)
else:
raise NotImplementedError("mode must be 'train', 'valid', or 'test'.")
if gpu_id == "default":
devices = [] if cls.GPU_ID is None else [cls.GPU_ID]
else:
devices = [gpu_id]
return MachineParams(nprocesses=nprocesses, devices=devices)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
sensors = cls.get_sensors()
return BabyAIRecurrentACModel(
action_space=gym.spaces.Discrete(len(BabyAITask.class_action_names())),
observation_space=SensorSuite(sensors).observation_spaces,
use_instr=cls.USE_INSTR,
use_memory=True,
arch=cls.ARCH,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return BabyAITaskSampler(**kwargs)
def train_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
return {
"env_builder": self.LEVEL,
"sensors": self.get_sensors(),
"seed": seeds[process_ind] if seeds is not None else None,
}
def valid_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
raise RuntimeError
def test_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
max_tasks = self.NUM_TEST_TASKS // total_processes + (
process_ind < (self.NUM_TEST_TASKS % total_processes)
)
task_seeds_list = [
2 ** 31 - 1 + self.TEST_SEED_OFFSET + process_ind + total_processes * i
for i in range(max_tasks)
]
# print(max_tasks, process_ind, total_processes, task_seeds_list)
assert len(task_seeds_list) == 0 or (
min(task_seeds_list) >= 0 and max(task_seeds_list) <= 2 ** 32 - 1
)
train_sampler_args = self.train_task_sampler_args(
process_ind=process_ind,
total_processes=total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
)
return {
**train_sampler_args,
"task_seeds_list": task_seeds_list,
"max_tasks": max_tasks,
"deterministic_sampling": True,
"sensors": [
s for s in train_sampler_args["sensors"] if "Expert" not in str(type(s))
],
}
| ask4help-main | projects/babyai_baselines/experiments/base.py |
from allenact.utils.experiment_utils import PipelineStage
from projects.babyai_baselines.experiments.go_to_local.base import (
BaseBabyAIGoToLocalExperimentConfig,
)
class PPOBabyAIGoToLocalExperimentConfig(BaseBabyAIGoToLocalExperimentConfig):
"""Behavior clone then PPO."""
USE_EXPERT = True
@classmethod
def tag(cls):
return "BabyAIGoToLocalBC"
@classmethod
def training_pipeline(cls, **kwargs):
total_train_steps = cls.TOTAL_IL_TRAIN_STEPS
ppo_info = cls.rl_loss_default("ppo", steps=-1)
imitation_info = cls.rl_loss_default("imitation")
return cls._training_pipeline(
named_losses={"imitation_loss": imitation_info["loss"],},
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"], max_stage_steps=total_train_steps,
),
],
num_mini_batch=min(
info["num_mini_batch"] for info in [ppo_info, imitation_info]
),
update_repeats=min(
info["update_repeats"] for info in [ppo_info, imitation_info]
),
total_train_steps=total_train_steps,
)
| ask4help-main | projects/babyai_baselines/experiments/go_to_local/bc.py |
import os
from typing import Sequence, Optional
import torch
from allenact.utils.experiment_utils import PipelineStage, OffPolicyPipelineComponent
from allenact_plugins.babyai_plugin.babyai_constants import (
BABYAI_EXPERT_TRAJECTORIES_DIR,
)
from allenact_plugins.minigrid_plugin.minigrid_offpolicy import (
MiniGridOffPolicyExpertCELoss,
create_minigrid_offpolicy_data_iterator,
)
from projects.tutorials.minigrid_offpolicy_tutorial import (
BCOffPolicyBabyAIGoToLocalExperimentConfig,
)
class DistributedBCOffPolicyBabyAIGoToLocalExperimentConfig(
BCOffPolicyBabyAIGoToLocalExperimentConfig
):
"""Distributed Off policy imitation."""
@classmethod
def tag(cls):
return "DistributedBabyAIGoToLocalBCOffPolicy"
@classmethod
def machine_params(
cls, mode="train", gpu_id="default", n_train_processes="default", **kwargs
):
res = super().machine_params(mode, gpu_id, n_train_processes, **kwargs)
if res["nprocesses"] > 0 and torch.cuda.is_available():
ngpu_to_use = min(torch.cuda.device_count(), 2)
res["nprocesses"] = [res["nprocesses"] // ngpu_to_use] * ngpu_to_use
res["gpu_ids"] = list(range(ngpu_to_use))
return res
@classmethod
def expert_ce_loss_kwargs_generator(
cls, worker_id: int, rollouts_per_worker: Sequence[int], seed: Optional[int]
):
return dict(num_workers=len(rollouts_per_worker), current_worker=worker_id)
@classmethod
def training_pipeline(cls, **kwargs):
total_train_steps = cls.TOTAL_IL_TRAIN_STEPS
ppo_info = cls.rl_loss_default("ppo", steps=-1)
num_mini_batch = ppo_info["num_mini_batch"]
update_repeats = ppo_info["update_repeats"]
return cls._training_pipeline(
named_losses={
"offpolicy_expert_ce_loss": MiniGridOffPolicyExpertCELoss(
total_episodes_in_epoch=int(1e6)
// len(cls.machine_params("train")["gpu_ids"])
),
},
pipeline_stages=[
PipelineStage(
loss_names=[],
max_stage_steps=total_train_steps,
offpolicy_component=OffPolicyPipelineComponent(
data_iterator_builder=lambda **extra_kwargs: create_minigrid_offpolicy_data_iterator(
path=os.path.join(
BABYAI_EXPERT_TRAJECTORIES_DIR,
"BabyAI-GoToLocal-v0{}.pkl".format(
"" if torch.cuda.is_available() else "-small"
),
),
nrollouts=cls.NUM_TRAIN_SAMPLERS // num_mini_batch,
rollout_len=cls.ROLLOUT_STEPS,
instr_len=cls.INSTR_LEN,
**extra_kwargs,
),
data_iterator_kwargs_generator=cls.expert_ce_loss_kwargs_generator,
loss_names=["offpolicy_expert_ce_loss"],
updates=num_mini_batch * update_repeats,
),
),
],
num_mini_batch=0,
update_repeats=0,
total_train_steps=total_train_steps,
)
| ask4help-main | projects/babyai_baselines/experiments/go_to_local/distributed_bc_offpolicy.py |
from allenact.utils.experiment_utils import PipelineStage, LinearDecay
from projects.babyai_baselines.experiments.go_to_local.base import (
BaseBabyAIGoToLocalExperimentConfig,
)
class DaggerBabyAIGoToLocalExperimentConfig(BaseBabyAIGoToLocalExperimentConfig):
"""Find goal in lighthouse env using imitation learning.
Training with Dagger.
"""
USE_EXPERT = True
@classmethod
def tag(cls):
return "BabyAIGoToLocalDagger"
@classmethod
def training_pipeline(cls, **kwargs):
total_train_steps = cls.TOTAL_IL_TRAIN_STEPS
loss_info = cls.rl_loss_default("imitation")
return cls._training_pipeline(
named_losses={"imitation_loss": loss_info["loss"]},
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
teacher_forcing=LinearDecay(
startp=1.0, endp=0.0, steps=total_train_steps // 2,
),
max_stage_steps=total_train_steps,
)
],
num_mini_batch=loss_info["num_mini_batch"],
update_repeats=loss_info["update_repeats"],
total_train_steps=total_train_steps,
)
| ask4help-main | projects/babyai_baselines/experiments/go_to_local/dagger.py |
ask4help-main | projects/babyai_baselines/experiments/go_to_local/__init__.py |
|
import torch
from allenact.utils.experiment_utils import PipelineStage, LinearDecay
from projects.babyai_baselines.experiments.go_to_local.base import (
BaseBabyAIGoToLocalExperimentConfig,
)
class BCTeacherForcingBabyAIGoToLocalExperimentConfig(
BaseBabyAIGoToLocalExperimentConfig
):
"""Behavior clone with teacher forcing."""
USE_EXPERT = True
GPU_ID = 0 if torch.cuda.is_available() else None
@classmethod
def METRIC_ACCUMULATE_INTERVAL(cls):
return 1
@classmethod
def tag(cls):
return "BabyAIGoToLocalBCTeacherForcing"
@classmethod
def training_pipeline(cls, **kwargs):
total_train_steps = cls.TOTAL_IL_TRAIN_STEPS
ppo_info = cls.rl_loss_default("ppo", steps=-1)
imitation_info = cls.rl_loss_default("imitation")
return cls._training_pipeline(
named_losses={"imitation_loss": imitation_info["loss"],},
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
teacher_forcing=LinearDecay(
startp=1.0, endp=1.0, steps=total_train_steps,
),
max_stage_steps=total_train_steps,
),
],
num_mini_batch=min(
info["num_mini_batch"] for info in [ppo_info, imitation_info]
),
update_repeats=min(
info["update_repeats"] for info in [ppo_info, imitation_info]
),
total_train_steps=total_train_steps,
)
| ask4help-main | projects/babyai_baselines/experiments/go_to_local/bc_teacher_forcing.py |
import torch
from .bc_teacher_forcing import BCTeacherForcingBabyAIGoToLocalExperimentConfig
class DistributedBCTeacherForcingBabyAIGoToLocalExperimentConfig(
BCTeacherForcingBabyAIGoToLocalExperimentConfig
):
"""Distributed behavior clone with teacher forcing."""
USE_EXPERT = True
GPU_ID = 0 if torch.cuda.is_available() else None
@classmethod
def METRIC_ACCUMULATE_INTERVAL(cls):
return 1
@classmethod
def tag(cls):
return "BabyAIGoToLocalBCTeacherForcingDistributed"
@classmethod
def machine_params(
cls, mode="train", gpu_id="default", n_train_processes="default", **kwargs
):
res = super().machine_params(mode, gpu_id, n_train_processes, **kwargs)
if res["nprocesses"] > 0 and torch.cuda.is_available():
ngpu_to_use = min(torch.cuda.device_count(), 2)
res["nprocesses"] = [res["nprocesses"] // ngpu_to_use] * ngpu_to_use
res["gpu_ids"] = list(range(ngpu_to_use))
return res
| ask4help-main | projects/babyai_baselines/experiments/go_to_local/distributed_bc_teacher_forcing.py |
import torch
from allenact.utils.experiment_utils import PipelineStage
from projects.babyai_baselines.experiments.go_to_local.base import (
BaseBabyAIGoToLocalExperimentConfig,
)
class PPOBabyAIGoToLocalExperimentConfig(BaseBabyAIGoToLocalExperimentConfig):
"""PPO only."""
NUM_TRAIN_SAMPLERS: int = (
128 * 12
if torch.cuda.is_available()
else BaseBabyAIGoToLocalExperimentConfig.NUM_TRAIN_SAMPLERS
)
ROLLOUT_STEPS: int = 32
USE_LR_DECAY = False
DEFAULT_LR = 1e-4
@classmethod
def tag(cls):
return "BabyAIGoToLocalPPO"
@classmethod
def training_pipeline(cls, **kwargs):
total_train_steps = cls.TOTAL_RL_TRAIN_STEPS
ppo_info = cls.rl_loss_default("ppo", steps=total_train_steps)
return cls._training_pipeline(
named_losses={"ppo_loss": ppo_info["loss"],},
pipeline_stages=[
PipelineStage(
loss_names=["ppo_loss"], max_stage_steps=total_train_steps,
),
],
num_mini_batch=ppo_info["num_mini_batch"],
update_repeats=ppo_info["update_repeats"],
total_train_steps=total_train_steps,
)
| ask4help-main | projects/babyai_baselines/experiments/go_to_local/ppo.py |
import torch
from allenact.utils.experiment_utils import PipelineStage
from projects.babyai_baselines.experiments.go_to_local.base import (
BaseBabyAIGoToLocalExperimentConfig,
)
class A2CBabyAIGoToLocalExperimentConfig(BaseBabyAIGoToLocalExperimentConfig):
"""A2C only."""
NUM_TRAIN_SAMPLERS: int = (
128 * 6
if torch.cuda.is_available()
else BaseBabyAIGoToLocalExperimentConfig.NUM_TRAIN_SAMPLERS
)
ROLLOUT_STEPS: int = 16
USE_LR_DECAY = False
DEFAULT_LR = 1e-4
@classmethod
def tag(cls):
return "BabyAIGoToLocalA2C"
@classmethod
def training_pipeline(cls, **kwargs):
total_training_steps = cls.TOTAL_RL_TRAIN_STEPS
a2c_info = cls.rl_loss_default("a2c", steps=total_training_steps)
return cls._training_pipeline(
named_losses={"a2c_loss": a2c_info["loss"],},
pipeline_stages=[
PipelineStage(
loss_names=["a2c_loss"], max_stage_steps=total_training_steps,
),
],
num_mini_batch=a2c_info["num_mini_batch"],
update_repeats=a2c_info["update_repeats"],
total_train_steps=total_training_steps,
)
| ask4help-main | projects/babyai_baselines/experiments/go_to_local/a2c.py |
from abc import ABC
from typing import Dict, List, Optional, Union, Any, cast
import gym
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.base_abstractions.misc import Loss
from allenact.base_abstractions.sensor import SensorSuite
from allenact.utils.experiment_utils import (
Builder,
LinearDecay,
PipelineStage,
TrainingPipeline,
)
from allenact_plugins.babyai_plugin.babyai_models import BabyAIRecurrentACModel
from allenact_plugins.babyai_plugin.babyai_tasks import BabyAITask
from projects.babyai_baselines.experiments.base import BaseBabyAIExperimentConfig
class BaseBabyAIGoToLocalExperimentConfig(BaseBabyAIExperimentConfig, ABC):
"""Base experimental config."""
LEVEL: Optional[str] = "BabyAI-GoToLocal-v0"
TOTAL_RL_TRAIN_STEPS = int(15e6)
TOTAL_IL_TRAIN_STEPS = int(7.5e6)
ROLLOUT_STEPS: int = 128
NUM_TRAIN_SAMPLERS: int = 128 if torch.cuda.is_available() else 4
PPO_NUM_MINI_BATCH = 4
NUM_CKPTS_TO_SAVE = 20
NUM_TEST_TASKS: int = 1000
USE_LR_DECAY: bool = True
# ARCH = "cnn1"
# ARCH = "cnn2"
ARCH = "expert_filmcnn"
USE_INSTR = True
INSTR_LEN: int = 5
INCLUDE_AUXILIARY_HEAD = False
@classmethod
def METRIC_ACCUMULATE_INTERVAL(cls):
return cls.NUM_TRAIN_SAMPLERS * 64
@classmethod
def _training_pipeline( # type:ignore
cls,
named_losses: Dict[str, Union[Loss, Builder]],
pipeline_stages: List[PipelineStage],
num_mini_batch: int,
update_repeats: int,
total_train_steps: int,
lr: Optional[float] = None,
):
lr = cls.DEFAULT_LR
num_steps = cls.ROLLOUT_STEPS
metric_accumulate_interval = (
cls.METRIC_ACCUMULATE_INTERVAL()
) # Log every 10 max length tasks
save_interval = int(total_train_steps / cls.NUM_CKPTS_TO_SAVE)
gamma = 0.99
use_gae = "reinforce_loss" not in named_losses
gae_lambda = 0.99
max_grad_norm = 0.5
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=metric_accumulate_interval,
optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam), dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses=named_losses,
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=None,
should_log=cls.SHOULD_LOG,
pipeline_stages=pipeline_stages,
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=total_train_steps)} # type: ignore
)
if cls.USE_LR_DECAY
else None,
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
sensors = cls.get_sensors()
return BabyAIRecurrentACModel(
action_space=gym.spaces.Discrete(len(BabyAITask.class_action_names())),
observation_space=SensorSuite(sensors).observation_spaces,
use_instr=cls.USE_INSTR,
use_memory=True,
arch=cls.ARCH,
instr_dim=256,
lang_model="attgru",
memory_dim=2048,
include_auxiliary_head=cls.INCLUDE_AUXILIARY_HEAD,
)
def valid_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
raise RuntimeError("No validation processes for these tasks")
| ask4help-main | projects/babyai_baselines/experiments/go_to_local/base.py |
from allenact.utils.experiment_utils import PipelineStage
from projects.babyai_baselines.experiments.go_to_obj.base import (
BaseBabyAIGoToObjExperimentConfig,
)
class PPOBabyAIGoToObjExperimentConfig(BaseBabyAIGoToObjExperimentConfig):
"""Behavior clone then PPO."""
USE_EXPERT = True
@classmethod
def tag(cls):
return "BabyAIGoToObjBC"
@classmethod
def training_pipeline(cls, **kwargs):
total_train_steps = cls.TOTAL_IL_TRAIN_STEPS
ppo_info = cls.rl_loss_default("ppo", steps=-1)
imitation_info = cls.rl_loss_default("imitation")
return cls._training_pipeline(
named_losses={"imitation_loss": imitation_info["loss"],},
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"], max_stage_steps=total_train_steps,
),
],
num_mini_batch=min(
info["num_mini_batch"] for info in [ppo_info, imitation_info]
),
update_repeats=min(
info["update_repeats"] for info in [ppo_info, imitation_info]
),
total_train_steps=total_train_steps,
)
| ask4help-main | projects/babyai_baselines/experiments/go_to_obj/bc.py |
from allenact.utils.experiment_utils import PipelineStage, LinearDecay
from projects.babyai_baselines.experiments.go_to_obj.base import (
BaseBabyAIGoToObjExperimentConfig,
)
class DaggerBabyAIGoToObjExperimentConfig(BaseBabyAIGoToObjExperimentConfig):
"""Find goal in lighthouse env using imitation learning.
Training with Dagger.
"""
USE_EXPERT = True
@classmethod
def tag(cls):
return "BabyAIGoToObjDagger"
@classmethod
def training_pipeline(cls, **kwargs):
total_train_steps = cls.TOTAL_IL_TRAIN_STEPS
loss_info = cls.rl_loss_default("imitation")
return cls._training_pipeline(
named_losses={"imitation_loss": loss_info["loss"]},
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
teacher_forcing=LinearDecay(
startp=1.0, endp=0.0, steps=total_train_steps // 2,
),
max_stage_steps=total_train_steps,
)
],
num_mini_batch=loss_info["num_mini_batch"],
update_repeats=loss_info["update_repeats"],
total_train_steps=total_train_steps,
)
| ask4help-main | projects/babyai_baselines/experiments/go_to_obj/dagger.py |
ask4help-main | projects/babyai_baselines/experiments/go_to_obj/__init__.py |
|
from allenact.utils.experiment_utils import PipelineStage, LinearDecay
from projects.babyai_baselines.experiments.go_to_obj.base import (
BaseBabyAIGoToObjExperimentConfig,
)
class PPOBabyAIGoToObjExperimentConfig(BaseBabyAIGoToObjExperimentConfig):
"""Behavior clone (with teacher forcing) then PPO."""
USE_EXPERT = True
@classmethod
def tag(cls):
return "BabyAIGoToObjBCTeacherForcing"
@classmethod
def training_pipeline(cls, **kwargs):
total_train_steps = cls.TOTAL_IL_TRAIN_STEPS
ppo_info = cls.rl_loss_default("ppo", steps=-1)
imitation_info = cls.rl_loss_default("imitation")
return cls._training_pipeline(
named_losses={"imitation_loss": imitation_info["loss"],},
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
teacher_forcing=LinearDecay(
startp=1.0, endp=1.0, steps=total_train_steps,
),
max_stage_steps=total_train_steps,
),
],
num_mini_batch=min(
info["num_mini_batch"] for info in [ppo_info, imitation_info]
),
update_repeats=min(
info["update_repeats"] for info in [ppo_info, imitation_info]
),
total_train_steps=total_train_steps,
)
| ask4help-main | projects/babyai_baselines/experiments/go_to_obj/bc_teacher_forcing.py |
from allenact.utils.experiment_utils import PipelineStage
from projects.babyai_baselines.experiments.go_to_obj.base import (
BaseBabyAIGoToObjExperimentConfig,
)
class PPOBabyAIGoToObjExperimentConfig(BaseBabyAIGoToObjExperimentConfig):
"""PPO only."""
@classmethod
def tag(cls):
return "BabyAIGoToObjPPO"
@classmethod
def training_pipeline(cls, **kwargs):
total_train_steps = cls.TOTAL_RL_TRAIN_STEPS
ppo_info = cls.rl_loss_default("ppo", steps=total_train_steps)
return cls._training_pipeline(
named_losses={"ppo_loss": ppo_info["loss"],},
pipeline_stages=[
PipelineStage(
loss_names=["ppo_loss"], max_stage_steps=total_train_steps,
),
],
num_mini_batch=ppo_info["num_mini_batch"],
update_repeats=ppo_info["update_repeats"],
total_train_steps=total_train_steps,
)
| ask4help-main | projects/babyai_baselines/experiments/go_to_obj/ppo.py |
from allenact.utils.experiment_utils import PipelineStage
from projects.babyai_baselines.experiments.go_to_obj.base import (
BaseBabyAIGoToObjExperimentConfig,
)
class A2CBabyAIGoToObjExperimentConfig(BaseBabyAIGoToObjExperimentConfig):
"""A2C only."""
TOTAL_RL_TRAIN_STEPS = int(1e5)
@classmethod
def tag(cls):
return "BabyAIGoToObjA2C"
@classmethod
def training_pipeline(cls, **kwargs):
total_training_steps = cls.TOTAL_RL_TRAIN_STEPS
a2c_info = cls.rl_loss_default("a2c", steps=total_training_steps)
return cls._training_pipeline(
named_losses={"a2c_loss": a2c_info["loss"],},
pipeline_stages=[
PipelineStage(
loss_names=["a2c_loss"], max_stage_steps=total_training_steps,
),
],
num_mini_batch=a2c_info["num_mini_batch"],
update_repeats=a2c_info["update_repeats"],
total_train_steps=total_training_steps,
)
| ask4help-main | projects/babyai_baselines/experiments/go_to_obj/a2c.py |
from abc import ABC
from typing import Dict, List, Optional, Union, cast
import gym
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.base_abstractions.misc import Loss
from allenact.base_abstractions.sensor import SensorSuite
from allenact.utils.experiment_utils import (
Builder,
LinearDecay,
PipelineStage,
TrainingPipeline,
)
from allenact_plugins.babyai_plugin.babyai_models import BabyAIRecurrentACModel
from allenact_plugins.babyai_plugin.babyai_tasks import BabyAITask
from projects.babyai_baselines.experiments.base import BaseBabyAIExperimentConfig
class BaseBabyAIGoToObjExperimentConfig(BaseBabyAIExperimentConfig, ABC):
"""Base experimental config."""
LEVEL: Optional[str] = "BabyAI-GoToObj-v0"
TOTAL_RL_TRAIN_STEPS = int(5e4)
TOTAL_IL_TRAIN_STEPS = int(2e4)
ROLLOUT_STEPS: int = 32
NUM_TRAIN_SAMPLERS: int = 16
PPO_NUM_MINI_BATCH = 2
NUM_TEST_TASKS: int = 50
USE_LR_DECAY: bool = False
DEFAULT_LR = 1e-3
ARCH = "cnn1"
# ARCH = "cnn2"
# ARCH = "expert_filmcnn"
USE_INSTR = False
INSTR_LEN: int = -1
@classmethod
def METRIC_ACCUMULATE_INTERVAL(cls):
return cls.NUM_TRAIN_SAMPLERS * 128
@classmethod
def _training_pipeline( # type:ignore
cls,
named_losses: Dict[str, Union[Loss, Builder]],
pipeline_stages: List[PipelineStage],
num_mini_batch: int,
update_repeats: int,
total_train_steps: int,
lr: Optional[float] = None,
):
lr = cls.DEFAULT_LR
num_steps = cls.ROLLOUT_STEPS
metric_accumulate_interval = (
cls.METRIC_ACCUMULATE_INTERVAL()
) # Log every 10 max length tasks
save_interval = 2 ** 31
gamma = 0.99
use_gae = "reinforce_loss" not in named_losses
gae_lambda = 0.99
max_grad_norm = 0.5
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=metric_accumulate_interval,
optimizer_builder=Builder(cast(optim.Optimizer, optim.Adam), dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses=named_losses,
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=None,
should_log=cls.SHOULD_LOG,
pipeline_stages=pipeline_stages,
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=total_train_steps)} # type: ignore
)
if cls.USE_LR_DECAY
else None,
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
sensors = cls.get_sensors()
return BabyAIRecurrentACModel(
action_space=gym.spaces.Discrete(len(BabyAITask.class_action_names())),
observation_space=SensorSuite(sensors).observation_spaces,
use_instr=cls.USE_INSTR,
use_memory=True,
arch=cls.ARCH,
instr_dim=8,
lang_model="gru",
memory_dim=128,
)
| ask4help-main | projects/babyai_baselines/experiments/go_to_obj/base.py |
ask4help-main | projects/pointnav_baselines/__init__.py |
|
from abc import ABC
from typing import Optional, Sequence, Union
from allenact.base_abstractions.experiment_config import ExperimentConfig
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.base_abstractions.sensor import Sensor
from allenact.utils.experiment_utils import Builder
class PointNavBaseConfig(ExperimentConfig, ABC):
"""An Object Navigation experiment configuration in iThor."""
ADVANCE_SCENE_ROLLOUT_PERIOD: Optional[int] = None
PREPROCESSORS: Sequence[Union[Preprocessor, Builder[Preprocessor]]] = tuple()
SENSORS: Optional[Sequence[Sensor]] = None
STEP_SIZE = 0.25
ROTATION_DEGREES = 30.0
DISTANCE_TO_GOAL = 0.2
STOCHASTIC = True
CAMERA_WIDTH = 400
CAMERA_HEIGHT = 300
SCREEN_SIZE = 224
MAX_STEPS = 500
def __init__(self):
self.REWARD_CONFIG = {
"step_penalty": -0.01,
"goal_success_reward": 10.0,
"failed_stop_reward": 0.0,
"reached_max_steps_reward": 0.0,
"shaping_weight": 1.0,
}
| ask4help-main | projects/pointnav_baselines/experiments/pointnav_base.py |
ask4help-main | projects/pointnav_baselines/experiments/__init__.py |
|
from abc import ABC
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.utils.experiment_utils import (
Builder,
PipelineStage,
TrainingPipeline,
LinearDecay,
)
from projects.pointnav_baselines.experiments.pointnav_base import PointNavBaseConfig
class PointNavHabitatMixInPPOConfig(PointNavBaseConfig, ABC):
"""The base config for all iTHOR PPO PointNav experiments."""
NUM_STEPS = 128
LR = 2.5e-4
@classmethod
def training_pipeline(cls, **kwargs):
ppo_steps = int(75000000)
lr = cls.LR
num_mini_batch = 1
update_repeats = 3
num_steps = cls.NUM_STEPS
save_interval = 5000000
log_interval = 10000 if torch.cuda.is_available() else 1
gamma = 0.99
use_gae = True
gae_lambda = 0.95
max_grad_norm = 0.5
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=log_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={"ppo_loss": PPO(**PPOConfig)},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps)
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}
),
)
| ask4help-main | projects/pointnav_baselines/experiments/pointnav_habitat_mixin_ddppo.py |
from abc import ABC
import gym
import torch.nn as nn
from allenact.embodiedai.sensors.vision_sensors import RGBSensor, DepthSensor
# fmt: off
try:
# Habitat may not be installed, just create a fake class here in that case
from allenact_plugins.habitat_plugin.habitat_sensors import TargetCoordinatesSensorHabitat
except ImportError:
class TargetCoordinatesSensorHabitat: #type:ignore
pass
# fmt: on
from allenact_plugins.robothor_plugin.robothor_sensors import GPSCompassSensorRoboThor
from allenact_plugins.robothor_plugin.robothor_tasks import PointNavTask
from projects.pointnav_baselines.experiments.pointnav_base import PointNavBaseConfig
from projects.pointnav_baselines.models.point_nav_models import PointNavActorCritic
class PointNavMixInSimpleConvGRUConfig(PointNavBaseConfig, ABC):
"""The base config for all iTHOR PPO PointNav experiments."""
# TODO only tested in roboTHOR Depth
BACKBONE = ( # choose one
"gnresnet18"
# "simple_cnn"
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
rgb_uuid = next((s.uuid for s in cls.SENSORS if isinstance(s, RGBSensor)), None)
depth_uuid = next(
(s.uuid for s in cls.SENSORS if isinstance(s, DepthSensor)), None
)
goal_sensor_uuid = next(
(
s.uuid
for s in cls.SENSORS
if isinstance(
s, (GPSCompassSensorRoboThor, TargetCoordinatesSensorHabitat)
)
),
None,
)
return PointNavActorCritic(
# Env and Tak
action_space=gym.spaces.Discrete(len(PointNavTask.class_action_names())),
observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces,
rgb_uuid=rgb_uuid,
depth_uuid=depth_uuid,
goal_sensor_uuid=goal_sensor_uuid,
# RNN
hidden_size=228
if cls.MULTIPLE_BELIEFS and len(cls.AUXILIARY_UUIDS) > 1
else 512,
num_rnn_layers=1,
rnn_type="GRU",
add_prev_actions=cls.ADD_PREV_ACTIONS,
action_embed_size=4,
# CNN
backbone=cls.BACKBONE,
resnet_baseplanes=32,
embed_coordinates=False,
coordinate_dims=2,
# Aux
auxiliary_uuids=cls.AUXILIARY_UUIDS,
multiple_beliefs=cls.MULTIPLE_BELIEFS,
beliefs_fusion=cls.BELIEF_FUSION,
)
| ask4help-main | projects/pointnav_baselines/experiments/pointnav_mixin_simpleconvgru.py |
from abc import ABC
from typing import Dict, Tuple
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses.abstract_loss import (
AbstractActorCriticLoss,
)
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.embodiedai.aux_losses.losses import (
MultiAuxTaskNegEntropyLoss,
InverseDynamicsLoss,
TemporalDistanceLoss,
CPCA1Loss,
CPCA2Loss,
CPCA4Loss,
CPCA8Loss,
CPCA16Loss,
)
# noinspection PyUnresolvedReferences
from allenact.embodiedai.models.fusion_models import (
AverageFusion,
SoftmaxFusion,
AttentiveFusion,
)
from allenact.utils.experiment_utils import (
Builder,
PipelineStage,
TrainingPipeline,
LinearDecay,
)
from projects.pointnav_baselines.experiments.pointnav_base import PointNavBaseConfig
class PointNavThorMixInPPOConfig(PointNavBaseConfig, ABC):
"""The base config for all iTHOR PPO PointNav experiments."""
# selected auxiliary uuids
## if comment all the keys, then it's vanilla DD-PPO
AUXILIARY_UUIDS = [
# InverseDynamicsLoss.UUID,
# TemporalDistanceLoss.UUID,
# CPCA1Loss.UUID,
# CPCA4Loss.UUID,
# CPCA8Loss.UUID,
# CPCA16Loss.UUID,
]
ADD_PREV_ACTIONS = True
MULTIPLE_BELIEFS = False
BELIEF_FUSION = ( # choose one
None
# AttentiveFusion.UUID
# AverageFusion.UUID
# SoftmaxFusion.UUID
)
NORMALIZE_ADVANTAGE = False
def training_pipeline(self, **kwargs):
# PPO
ppo_steps = int(75000000)
lr = 3e-4
num_mini_batch = 1
update_repeats = 4
num_steps = 128
save_interval = 5000000
log_interval = 10000 if torch.cuda.is_available() else 1
gamma = 0.99
use_gae = True
gae_lambda = 0.95
max_grad_norm = 0.5
PPOConfig["normalize_advantage"] = self.NORMALIZE_ADVANTAGE
named_losses = {"ppo_loss": (PPO(**PPOConfig), 1.0)}
named_losses = self._update_with_auxiliary_losses(named_losses)
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=log_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={key: val[0] for key, val in named_losses.items()},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(
loss_names=list(named_losses.keys()),
max_stage_steps=ppo_steps,
loss_weights=[val[1] for val in named_losses.values()],
)
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}
),
)
@classmethod
def _update_with_auxiliary_losses(cls, named_losses):
# auxliary losses
aux_loss_total_weight = 2.0
# Total losses
total_aux_losses: Dict[str, Tuple[AbstractActorCriticLoss, float]] = {
InverseDynamicsLoss.UUID: (
InverseDynamicsLoss(
subsample_rate=0.2, subsample_min_num=10, # TODO: test its effects
),
0.05 * aux_loss_total_weight, # should times 2
),
TemporalDistanceLoss.UUID: (
TemporalDistanceLoss(
num_pairs=8, epsiode_len_min=5, # TODO: test its effects
),
0.2 * aux_loss_total_weight, # should times 2
),
CPCA1Loss.UUID: (
CPCA1Loss(subsample_rate=0.2,), # TODO: test its effects
0.05 * aux_loss_total_weight, # should times 2
),
CPCA2Loss.UUID: (
CPCA2Loss(subsample_rate=0.2,), # TODO: test its effects
0.05 * aux_loss_total_weight, # should times 2
),
CPCA4Loss.UUID: (
CPCA4Loss(subsample_rate=0.2,), # TODO: test its effects
0.05 * aux_loss_total_weight, # should times 2
),
CPCA8Loss.UUID: (
CPCA8Loss(subsample_rate=0.2,), # TODO: test its effects
0.05 * aux_loss_total_weight, # should times 2
),
CPCA16Loss.UUID: (
CPCA16Loss(subsample_rate=0.2,), # TODO: test its effects
0.05 * aux_loss_total_weight, # should times 2
),
}
named_losses.update(
{uuid: total_aux_losses[uuid] for uuid in cls.AUXILIARY_UUIDS}
)
if cls.MULTIPLE_BELIEFS: # add weight entropy loss automatically
named_losses[MultiAuxTaskNegEntropyLoss.UUID] = (
MultiAuxTaskNegEntropyLoss(cls.AUXILIARY_UUIDS),
0.01,
)
return named_losses
| ask4help-main | projects/pointnav_baselines/experiments/pointnav_thor_mixin_ddppo.py |
from abc import ABC
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.grouped_action_imitation import (
GroupedActionImitation,
)
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.utils.experiment_utils import (
Builder,
PipelineStage,
TrainingPipeline,
LinearDecay,
)
from allenact_plugins.ithor_plugin.ithor_sensors import TakeEndActionThorNavSensor
from allenact_plugins.robothor_plugin import robothor_constants
from allenact_plugins.robothor_plugin.robothor_tasks import PointNavTask
from projects.pointnav_baselines.experiments.pointnav_thor_base import (
PointNavThorBaseConfig,
)
class PointNavThorMixInPPOAndGBCConfig(PointNavThorBaseConfig, ABC):
"""The base config for all iTHOR PPO PointNav experiments."""
SENSORS = (
TakeEndActionThorNavSensor(
nactions=len(PointNavTask.class_action_names()), uuid="expert_group_action"
),
)
@classmethod
def training_pipeline(cls, **kwargs):
ppo_steps = int(75000000)
lr = 3e-4
num_mini_batch = 1
update_repeats = 4
num_steps = 128
save_interval = 5000000
log_interval = 10000 if torch.cuda.is_available() else 1
gamma = 0.99
use_gae = True
gae_lambda = 0.95
max_grad_norm = 0.5
action_strs = PointNavTask.class_action_names()
non_end_action_inds_set = {
i for i, a in enumerate(action_strs) if a != robothor_constants.END
}
end_action_ind_set = {action_strs.index(robothor_constants.END)}
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=log_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={
"ppo_loss": PPO(**PPOConfig),
"grouped_action_imitation": GroupedActionImitation(
nactions=len(PointNavTask.class_action_names()),
action_groups=[non_end_action_inds_set, end_action_ind_set],
),
},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(
loss_names=["ppo_loss", "grouped_action_imitation"],
max_stage_steps=ppo_steps,
)
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}
),
)
| ask4help-main | projects/pointnav_baselines/experiments/pointnav_thor_mixin_ddppo_and_gbc.py |
import glob
import os
import platform
from abc import ABC
from math import ceil
from typing import Dict, Any, List, Optional, Sequence
import ai2thor
import gym
import numpy as np
import torch
from packaging import version
from allenact.base_abstractions.experiment_config import MachineParams
from allenact.base_abstractions.preprocessor import SensorPreprocessorGraph
from allenact.base_abstractions.sensor import SensorSuite, ExpertActionSensor
from allenact.base_abstractions.task import TaskSampler
from allenact.utils.experiment_utils import evenly_distribute_count_into_bins
from allenact.utils.system import get_logger
from allenact_plugins.ithor_plugin.ithor_util import get_open_x_displays
from allenact_plugins.robothor_plugin.robothor_sensors import DepthSensorThor
from allenact_plugins.robothor_plugin.robothor_task_samplers import (
PointNavDatasetTaskSampler,
)
from allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask
from projects.pointnav_baselines.experiments.pointnav_base import PointNavBaseConfig
if ai2thor.__version__ not in ["0.0.1", None] and version.parse(
ai2thor.__version__
) < version.parse("2.7.2"):
raise ImportError(
"To run the PointNav baseline experiments you must use"
" ai2thor version 2.7.1 or higher."
)
class PointNavThorBaseConfig(PointNavBaseConfig, ABC):
"""The base config for all iTHOR PointNav experiments."""
NUM_PROCESSES: Optional[int] = None
TRAIN_GPU_IDS = list(range(torch.cuda.device_count()))
VALID_GPU_IDS = [torch.cuda.device_count() - 1]
TEST_GPU_IDS = [torch.cuda.device_count() - 1]
TRAIN_DATASET_DIR: Optional[str] = None
VAL_DATASET_DIR: Optional[str] = None
TARGET_TYPES: Optional[Sequence[str]] = None
def __init__(self):
super().__init__()
self.ENV_ARGS = dict(
width=self.CAMERA_WIDTH,
height=self.CAMERA_HEIGHT,
continuousMode=True,
applyActionNoise=self.STOCHASTIC,
rotateStepDegrees=self.ROTATION_DEGREES,
gridSize=self.STEP_SIZE,
snapToGrid=False,
agentMode="locobot",
include_private_scenes=False,
renderDepthImage=any(isinstance(s, DepthSensorThor) for s in self.SENSORS),
)
def machine_params(self, mode="train", **kwargs):
sampler_devices: Sequence[int] = []
if mode == "train":
workers_per_device = 1
gpu_ids = (
[]
if not torch.cuda.is_available()
else self.TRAIN_GPU_IDS * workers_per_device
)
nprocesses = (
1
if not torch.cuda.is_available()
else evenly_distribute_count_into_bins(self.NUM_PROCESSES, len(gpu_ids))
)
sampler_devices = self.TRAIN_GPU_IDS
elif mode == "valid":
nprocesses = 1 if torch.cuda.is_available() else 0
gpu_ids = [] if not torch.cuda.is_available() else self.VALID_GPU_IDS
elif mode == "test":
nprocesses = 10
gpu_ids = [] if not torch.cuda.is_available() else self.TEST_GPU_IDS
else:
raise NotImplementedError("mode must be 'train', 'valid', or 'test'.")
sensor_preprocessor_graph = (
SensorPreprocessorGraph(
source_observation_spaces=SensorSuite(self.SENSORS).observation_spaces,
preprocessors=self.PREPROCESSORS,
)
if mode == "train"
or (
(isinstance(nprocesses, int) and nprocesses > 0)
or (isinstance(nprocesses, Sequence) and sum(nprocesses) > 0)
)
else None
)
return MachineParams(
nprocesses=nprocesses,
devices=gpu_ids,
sampler_devices=sampler_devices
if mode == "train"
else gpu_ids, # ignored with > 1 gpu_ids
sensor_preprocessor_graph=sensor_preprocessor_graph,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return PointNavDatasetTaskSampler(**kwargs)
@staticmethod
def _partition_inds(n: int, num_parts: int):
return np.round(np.linspace(0, n, num_parts + 1, endpoint=True)).astype(
np.int32
)
def _get_sampler_args_for_scene_split(
self,
scenes_dir: str,
process_ind: int,
total_processes: int,
devices: Optional[List[int]],
seeds: Optional[List[int]],
deterministic_cudnn: bool,
include_expert_sensor: bool = True,
) -> Dict[str, Any]:
path = os.path.join(scenes_dir, "*.json.gz")
scenes = [scene.split("/")[-1].split(".")[0] for scene in glob.glob(path)]
if len(scenes) == 0:
raise RuntimeError(
(
"Could find no scene dataset information in directory {}."
" Are you sure you've downloaded them? "
" If not, see https://allenact.org/installation/download-datasets/ information"
" on how this can be done."
).format(scenes_dir)
)
oversample_warning = (
f"Warning: oversampling some of the scenes ({scenes}) to feed all processes ({total_processes})."
" You can avoid this by setting a number of workers divisible by the number of scenes"
)
if total_processes > len(scenes): # oversample some scenes -> bias
if total_processes % len(scenes) != 0:
get_logger().warning(oversample_warning)
scenes = scenes * int(ceil(total_processes / len(scenes)))
scenes = scenes[: total_processes * (len(scenes) // total_processes)]
elif len(scenes) % total_processes != 0:
get_logger().warning(oversample_warning)
inds = self._partition_inds(len(scenes), total_processes)
x_display: Optional[str] = None
if platform.system() == "Linux":
x_displays = get_open_x_displays(throw_error_if_empty=True)
if len([d for d in devices if d != torch.device("cpu")]) > len(x_displays):
get_logger().warning(
f"More GPU devices found than X-displays (devices: `{x_displays}`, x_displays: `{x_displays}`)."
f" This is not necessarily a bad thing but may mean that you're not using GPU memory as"
f" efficiently as possible. Consider following the instructions here:"
f" https://allenact.org/installation/installation-framework/#installation-of-ithor-ithor-plugin"
f" describing how to start an X-display on every GPU."
)
x_display = x_displays[process_ind % len(x_displays)]
return {
"scenes": scenes[inds[process_ind] : inds[process_ind + 1]],
"object_types": self.TARGET_TYPES,
"max_steps": self.MAX_STEPS,
"sensors": [
s
for s in self.SENSORS
if (include_expert_sensor or not isinstance(s, ExpertActionSensor))
],
"action_space": gym.spaces.Discrete(
len(ObjectNavTask.class_action_names())
),
"seed": seeds[process_ind] if seeds is not None else None,
"deterministic_cudnn": deterministic_cudnn,
"rewards_config": self.REWARD_CONFIG,
"env_args": {**self.ENV_ARGS, "x_display": x_display,},
}
def train_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
res = self._get_sampler_args_for_scene_split(
os.path.join(self.TRAIN_DATASET_DIR, "episodes"),
process_ind,
total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
)
res["scene_directory"] = self.TRAIN_DATASET_DIR
res["loop_dataset"] = True
res["allow_flipping"] = True
return res
def valid_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
res = self._get_sampler_args_for_scene_split(
os.path.join(self.VAL_DATASET_DIR, "episodes"),
process_ind,
total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
include_expert_sensor=False,
)
res["scene_directory"] = self.VAL_DATASET_DIR
res["loop_dataset"] = False
return res
def test_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
return self.valid_task_sampler_args(
process_ind=process_ind,
total_processes=total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
)
| ask4help-main | projects/pointnav_baselines/experiments/pointnav_thor_base.py |
from allenact_plugins.ithor_plugin.ithor_sensors import RGBSensorThor
from allenact_plugins.robothor_plugin.robothor_sensors import (
DepthSensorThor,
GPSCompassSensorRoboThor,
)
from projects.pointnav_baselines.experiments.pointnav_mixin_simpleconvgru import (
PointNavMixInSimpleConvGRUConfig,
)
from projects.pointnav_baselines.experiments.pointnav_thor_mixin_ddppo import (
PointNavThorMixInPPOConfig,
)
from projects.pointnav_baselines.experiments.robothor.pointnav_robothor_base import (
PointNavRoboThorBaseConfig,
)
class PointNavRoboThorRGBPPOExperimentConfig(
PointNavRoboThorBaseConfig,
PointNavThorMixInPPOConfig,
PointNavMixInSimpleConvGRUConfig,
):
"""An Point Navigation experiment configuration in RoboThor with RGBD
input."""
SENSORS = [
RGBSensorThor(
height=PointNavRoboThorBaseConfig.SCREEN_SIZE,
width=PointNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
DepthSensorThor(
height=PointNavRoboThorBaseConfig.SCREEN_SIZE,
width=PointNavRoboThorBaseConfig.SCREEN_SIZE,
use_normalization=True,
uuid="depth_lowres",
),
GPSCompassSensorRoboThor(),
]
@classmethod
def tag(cls):
return "Pointnav-RoboTHOR-RGBD-SimpleConv-DDPPO"
| ask4help-main | projects/pointnav_baselines/experiments/robothor/pointnav_robothor_rgbd_simpleconvgru_ddppo.py |
from allenact_plugins.robothor_plugin.robothor_sensors import (
DepthSensorThor,
GPSCompassSensorRoboThor,
)
from projects.pointnav_baselines.experiments.pointnav_mixin_simpleconvgru import (
PointNavMixInSimpleConvGRUConfig,
)
from projects.pointnav_baselines.experiments.pointnav_thor_mixin_ddppo import (
PointNavThorMixInPPOConfig,
)
from projects.pointnav_baselines.experiments.robothor.pointnav_robothor_base import (
PointNavRoboThorBaseConfig,
)
class PointNavRoboThorRGBPPOExperimentConfig(
PointNavRoboThorBaseConfig,
PointNavThorMixInPPOConfig,
PointNavMixInSimpleConvGRUConfig,
):
"""An Point Navigation experiment configuration in RoboTHOR with Depth
input."""
SENSORS = [
DepthSensorThor(
height=PointNavRoboThorBaseConfig.SCREEN_SIZE,
width=PointNavRoboThorBaseConfig.SCREEN_SIZE,
use_normalization=True,
uuid="depth_lowres",
),
GPSCompassSensorRoboThor(),
]
@classmethod
def tag(cls):
return "Pointnav-RoboTHOR-Depth-SimpleConv-DDPPO"
| ask4help-main | projects/pointnav_baselines/experiments/robothor/pointnav_robothor_depth_simpleconvgru_ddppo.py |
ask4help-main | projects/pointnav_baselines/experiments/robothor/__init__.py |
|
from allenact_plugins.ithor_plugin.ithor_sensors import RGBSensorThor
from allenact_plugins.robothor_plugin.robothor_sensors import GPSCompassSensorRoboThor
from projects.pointnav_baselines.experiments.pointnav_mixin_simpleconvgru import (
PointNavMixInSimpleConvGRUConfig,
)
from projects.pointnav_baselines.experiments.pointnav_thor_mixin_ddppo import (
PointNavThorMixInPPOConfig,
)
from projects.pointnav_baselines.experiments.robothor.pointnav_robothor_base import (
PointNavRoboThorBaseConfig,
)
class PointNavRoboThorRGBPPOExperimentConfig(
PointNavRoboThorBaseConfig,
PointNavThorMixInPPOConfig,
PointNavMixInSimpleConvGRUConfig,
):
"""An Point Navigation experiment configuration in RoboThor with RGB
input."""
SENSORS = [
RGBSensorThor(
height=PointNavRoboThorBaseConfig.SCREEN_SIZE,
width=PointNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
GPSCompassSensorRoboThor(),
]
@classmethod
def tag(cls):
return "Pointnav-RoboTHOR-RGB-SimpleConv-DDPPO"
| ask4help-main | projects/pointnav_baselines/experiments/robothor/pointnav_robothor_rgb_simpleconvgru_ddppo.py |
from allenact_plugins.ithor_plugin.ithor_sensors import RGBSensorThor
from allenact_plugins.robothor_plugin.robothor_sensors import GPSCompassSensorRoboThor
from projects.pointnav_baselines.experiments.pointnav_mixin_simpleconvgru import (
PointNavMixInSimpleConvGRUConfig,
)
from projects.pointnav_baselines.experiments.pointnav_thor_mixin_ddppo_and_gbc import (
PointNavThorMixInPPOAndGBCConfig,
)
from projects.pointnav_baselines.experiments.robothor.pointnav_robothor_base import (
PointNavRoboThorBaseConfig,
)
class PointNavRoboThorRGBPPOExperimentConfig(
PointNavRoboThorBaseConfig,
PointNavThorMixInPPOAndGBCConfig,
PointNavMixInSimpleConvGRUConfig,
):
"""An PointNavigation experiment configuration in RoboThor with RGB
input."""
SENSORS = PointNavThorMixInPPOAndGBCConfig.SENSORS + ( # type:ignore
RGBSensorThor(
height=PointNavRoboThorBaseConfig.SCREEN_SIZE,
width=PointNavRoboThorBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
),
GPSCompassSensorRoboThor(),
)
@classmethod
def tag(cls):
return "Pointnav-RoboTHOR-RGB-SimpleConv-DDPPOAndGBC"
| ask4help-main | projects/pointnav_baselines/experiments/robothor/pointnav_robothor_rgb_simpleconvgru_ddppo_and_gbc.py |
import os
from abc import ABC
from projects.pointnav_baselines.experiments.pointnav_thor_base import (
PointNavThorBaseConfig,
)
class PointNavRoboThorBaseConfig(PointNavThorBaseConfig, ABC):
"""The base config for all iTHOR PointNav experiments."""
NUM_PROCESSES = 60
TRAIN_DATASET_DIR = os.path.join(os.getcwd(), "datasets/robothor-pointnav/train")
VAL_DATASET_DIR = os.path.join(os.getcwd(), "datasets/robothor-pointnav/val")
| ask4help-main | projects/pointnav_baselines/experiments/robothor/pointnav_robothor_base.py |
from allenact_plugins.habitat_plugin.habitat_sensors import (
RGBSensorHabitat,
TargetCoordinatesSensorHabitat,
)
from projects.pointnav_baselines.experiments.habitat.pointnav_habitat_base import (
PointNavHabitatBaseConfig,
)
from projects.pointnav_baselines.experiments.pointnav_habitat_mixin_ddppo import (
PointNavHabitatMixInPPOConfig,
)
from projects.pointnav_baselines.experiments.pointnav_mixin_simpleconvgru import (
PointNavMixInSimpleConvGRUConfig,
)
class PointNavHabitatDepthDeterministiSimpleConvGRUDDPPOExperimentConfig(
PointNavHabitatBaseConfig,
PointNavHabitatMixInPPOConfig,
PointNavMixInSimpleConvGRUConfig,
):
"""An Point Navigation experiment configuration in Habitat with Depth
input."""
SENSORS = [
RGBSensorHabitat(
height=PointNavHabitatBaseConfig.SCREEN_SIZE,
width=PointNavHabitatBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
),
TargetCoordinatesSensorHabitat(coordinate_dims=2),
]
@classmethod
def tag(cls):
return "Pointnav-Habitat-RGB-SimpleConv-DDPPO"
| ask4help-main | projects/pointnav_baselines/experiments/habitat/pointnav_habitat_rgb_simpleconvgru_ddppo.py |
from allenact_plugins.habitat_plugin.habitat_sensors import (
RGBSensorHabitat,
DepthSensorHabitat,
TargetCoordinatesSensorHabitat,
)
from projects.pointnav_baselines.experiments.habitat.pointnav_habitat_base import (
PointNavHabitatBaseConfig,
)
from projects.pointnav_baselines.experiments.pointnav_habitat_mixin_ddppo import (
PointNavHabitatMixInPPOConfig,
)
from projects.pointnav_baselines.experiments.pointnav_mixin_simpleconvgru import (
PointNavMixInSimpleConvGRUConfig,
)
class PointNavHabitatDepthDeterministiSimpleConvGRUDDPPOExperimentConfig(
PointNavHabitatBaseConfig,
PointNavHabitatMixInPPOConfig,
PointNavMixInSimpleConvGRUConfig,
):
"""An Point Navigation experiment configuration in Habitat with RGBD
input."""
SENSORS = [
RGBSensorHabitat(
height=PointNavHabitatBaseConfig.SCREEN_SIZE,
width=PointNavHabitatBaseConfig.SCREEN_SIZE,
use_resnet_normalization=True,
),
DepthSensorHabitat(
height=PointNavHabitatBaseConfig.SCREEN_SIZE,
width=PointNavHabitatBaseConfig.SCREEN_SIZE,
use_normalization=True,
),
TargetCoordinatesSensorHabitat(coordinate_dims=2),
]
@classmethod
def tag(cls):
return "Pointnav-Habitat-RGBD-SimpleConv-DDPPO"
| ask4help-main | projects/pointnav_baselines/experiments/habitat/pointnav_habitat_rgbd_simpleconvgru_ddppo.py |
ask4help-main | projects/pointnav_baselines/experiments/habitat/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.