python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
from .interpreter import Interpreter
from .get_memory_handler import GetMemoryHandler
from .put_memory_handler import PutMemoryHandler
__all__ = [GetMemoryHandler, Interpreter, PutMemoryHandler]
| craftassist-master | python/craftassist/dialogue_objects/__init__.py |
from base_agent.dialogue_objects import ConditionInterpreter
from mc_stop_condition import AgentAdjacentStopCondition
from .block_helpers import get_block_type
# this will become unnecessary with distance between
class MCConditionInterpreter(ConditionInterpreter):
def __init__(self):
super().__init__()
self.condition_types["ADJACENT_TO_BLOCK_TYPE"] = self.interpret_adjacent_block
def interpret_adjacent_block(self, interpreter, speaker, d):
block_type = d["block_type"]
bid, meta = get_block_type(block_type)
return AgentAdjacentStopCondition(interpreter.agent, bid)
| craftassist-master | python/craftassist/dialogue_objects/condition_helper.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
from typing import Dict, Tuple, Any, Optional, Sequence
from base_agent.dialogue_objects import DialogueObject
from .interpreter_helper import interpret_reference_object, ErrorWithResponse
from base_agent.memory_nodes import MemoryNode, ReferenceObjectNode
from string_lists import ACTION_ING_MAPPING
from tasks import Build
from ttad.generation_dialogues.generate_utils import prepend_a_an
######FIXME TEMPORARY:
from base_agent import post_process_logical_form
class GetMemoryHandler(DialogueObject):
def __init__(self, speaker_name: str, action_dict: Dict, **kwargs):
super().__init__(**kwargs)
self.provisional: Dict = {}
self.speaker_name = speaker_name
self.action_dict = action_dict
def step(self) -> Tuple[Optional[str], Any]:
r = self._step()
self.finished = True
return r
def _step(self) -> Tuple[Optional[str], Any]:
assert self.action_dict["dialogue_type"] == "GET_MEMORY"
filter_type = self.action_dict["filters"]["type"]
if filter_type == "ACTION":
return self.handle_action()
elif filter_type == "AGENT":
return self.handle_agent()
elif filter_type == "REFERENCE_OBJECT":
return self.handle_reference_object()
else:
raise ValueError("Unknown filter_type={}".format(filter_type))
def handle_reference_object(self, voxels_only=False) -> Tuple[Optional[str], Any]:
####FIXME TEMPORARY!!!!
r = post_process_logical_form.fix_reference_object_with_filters(
self.action_dict["filters"]
)
objs = interpret_reference_object(
self, self.speaker_name, r["reference_object"], not_location=True
)
return self.do_answer(objs)
def handle_action(self) -> Tuple[Optional[str], Any]:
# get current action
target_action_type = self.action_dict["filters"].get("target_action_type")
if target_action_type:
task = self.memory.task_stack_find_lowest_instance(target_action_type)
else:
task = self.memory.task_stack_peek()
if task is not None:
task = task.get_root_task()
if task is None:
return "I am not doing anything right now", None
# get answer
return self.do_answer([task])
def handle_agent(self) -> Tuple[Optional[str], Any]:
# location is currently the only expected answer_type
location = tuple(self.agent.pos)
return "I am at {}".format(location), None
def do_answer(self, mems: Sequence[MemoryNode]) -> Tuple[Optional[str], Any]:
answer_type = self.action_dict["answer_type"]
if answer_type == "TAG":
return self.handle_answer_type_tag(mems)
elif answer_type == "EXISTS":
return self.handle_answer_type_exists(mems)
else:
raise ValueError("Bad answer_type={}".format(answer_type))
def handle_answer_type_tag(self, mems: Sequence[MemoryNode]) -> Tuple[Optional[str], Any]:
if len(mems) == 0:
raise ErrorWithResponse("I don't know what you're referring to")
mem = mems[0]
tag_name = self.action_dict["tag_name"]
if tag_name.startswith("has_"):
triples = self.memory.get_triples(subj=mem.memid, pred_text=tag_name)
# TODO backoff to other memories, search etc.
if len(triples) == 0:
# first backoff to tags
triples = self.memory.get_triples(subj=mem.memid, pred_text="has_tag")
if len(triples) == 0:
return "I don't know", None
else:
tag_name = "has_tag"
all_tags = [t[2] for t in triples if t[2][0] != "_"]
_, _, val = triples[0]
if tag_name == "has_name":
if "_in_progress" in self.memory.get_tags_by_memid(mem.memid):
return "It will be a %r" % (val), None
else:
return "It is a %r" % (val), None
elif tag_name == "has_tag":
return "That has tags " + " ".join(all_tags), None
else:
return "It is %r" % (val), None
elif tag_name == "action_name":
assert hasattr(mem, "task")
return "I am {}".format(ACTION_ING_MAPPING[mem.action_name.lower()]), None
elif tag_name == "action_reference_object_name":
assert hasattr(mems[0], "task"), mems[0]
assert isinstance(mems[0].task, Build), mems[0].task
for pred, val in mems[0].task.schematic_tags:
if pred == "has_name":
return "I am building " + prepend_a_an(val), None
return "I am building something that is {}".format(val), None
elif tag_name == "move_target":
assert mem.action_name == "Move", mem
target = tuple(mem.task.target)
return "I am going to {}".format(target), None
elif tag_name == "location":
if isinstance(mems[0], ReferenceObjectNode):
return str(mems[0].get_pos()), None
else:
raise TypeError("Can't get location of {} {}".format(mems[0], mems[0].memid))
else:
raise ErrorWithResponse("I don't understand what you're asking")
def handle_answer_type_exists(self, mems: Sequence[MemoryNode]) -> Tuple[Optional[str], Any]:
# we check progeny data bc if it exists, there was a confirmation,
# and the interpret reference object failed to find the object
# so it does not have the proper tag. this is an unused opportunity to learn...
# also note if the answer is going to be no, bot will always ask. maybe should fix this.
if len(mems) > 0 and len(self.progeny_data) == 0:
return "Yes", None
else:
return "No", None
| craftassist-master | python/craftassist/dialogue_objects/get_memory_handler.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import logging
import numpy as np
import random
import heuristic_perception
from typing import Tuple, Dict, Any, Optional, List
from word2number.w2n import word_to_num
import sys
import os
BASE_AGENT_ROOT = os.path.join(os.path.dirname(__file__), "../..")
sys.path.append(BASE_AGENT_ROOT)
from base_agent.dialogue_objects import (
DialogueObject,
ConfirmTask,
Say,
SPEAKERLOOK,
ReferenceObjectInterpreter,
)
from .interpreter_helper import (
ErrorWithResponse,
NextDialogueStep,
get_repeat_num,
get_repeat_dir,
interpret_reference_location,
interpret_reference_object,
interpret_relative_direction,
interpret_schematic,
interpret_size,
interpret_facing,
interpret_point_target,
filter_by_sublocation,
)
from .modify_helpers import (
handle_fill,
handle_rigidmotion,
handle_scale,
handle_replace,
handle_thicken,
)
from .block_helpers import get_block_type
from .condition_helper import MCConditionInterpreter
from .reference_object_helpers import compute_locations
from base_agent.memory_nodes import PlayerNode
from mc_memory_nodes import MobNode, ItemStackNode
import dance
import tasks
from mc_util import to_block_pos, Hole, XYZ
class Interpreter(DialogueObject):
"""This class handles processes incoming chats and modifies the task stack
Handlers should add/remove/reorder tasks on the stack, but not execute them.
"""
def __init__(self, speaker: str, action_dict: Dict, **kwargs):
super().__init__(**kwargs)
self.speaker = speaker
self.action_dict = action_dict
self.provisional: Dict = {}
self.action_dict_frozen = False
self.loop_data = None
self.archived_loop_data = None
self.default_debug_path = "debug_interpreter.txt"
self.subinterpret = {
"reference_objects": ReferenceObjectInterpreter(interpret_reference_object),
"condition": MCConditionInterpreter(),
}
self.action_handlers = {
"MOVE": self.handle_move,
"BUILD": self.handle_build,
"DESTROY": self.handle_destroy,
"DIG": self.handle_dig,
"STOP": self.handle_stop,
"RESUME": self.handle_resume,
"FREEBUILD": self.handle_freebuild,
"UNDO": self.handle_undo,
"SPAWN": self.handle_spawn,
"FILL": self.handle_fill,
"DANCE": self.handle_dance,
"MODIFY": self.handle_modify,
"DROP": self.handle_drop,
"GET": self.handle_get,
"OTHERACTION": self.handle_otheraction,
}
def step(self) -> Tuple[Optional[str], Any]:
assert self.action_dict["dialogue_type"] == "HUMAN_GIVE_COMMAND"
try:
actions = []
if "action" in self.action_dict:
actions.append(self.action_dict["action"])
elif "action_sequence" in self.action_dict:
actions = self.action_dict["action_sequence"]
actions.reverse()
if len(actions) == 0:
# The action dict is in an unexpected state
raise ErrorWithResponse(
"I thought you wanted me to do something, but now I don't know what"
)
for action_def in actions:
action_type = action_def["action_type"]
response = self.action_handlers[action_type](self.speaker, action_def)
return response
except NextDialogueStep:
return None, None
except ErrorWithResponse as err:
self.finished = True
return err.chat, None
def handle_modify(self, speaker, d) -> Tuple[Optional[str], Any]:
default_ref_d = {"filters": {"location": SPEAKERLOOK}}
ref_d = d.get("reference_object", default_ref_d)
# only modify blockobjects...
objs = self.subinterpret["reference_objects"](
self, speaker, ref_d, only_physical=True, only_voxels=True
)
if len(objs) == 0:
raise ErrorWithResponse("I don't understand what you want me to modify.")
m_d = d.get("modify_dict")
if not m_d:
raise ErrorWithResponse(
"I think you want me to modify an object but am not sure what to do"
)
for obj in objs:
if m_d["modify_type"] == "THINNER" or m_d["modify_type"] == "THICKER":
destroy_task_data, build_task_data = handle_thicken(self, speaker, m_d, obj)
elif m_d["modify_type"] == "REPLACE":
destroy_task_data, build_task_data = handle_replace(self, speaker, m_d, obj)
elif m_d["modify_type"] == "SCALE":
destroy_task_data, build_task_data = handle_scale(self, speaker, m_d, obj)
elif m_d["modify_type"] == "RIGIDMOTION":
destroy_task_data, build_task_data = handle_rigidmotion(self, speaker, m_d, obj)
elif m_d["modify_type"] == "FILL" or m_d["modify_type"] == "HOLLOW":
destroy_task_data, build_task_data = handle_fill(self, speaker, m_d, obj)
else:
raise ErrorWithResponse(
"I think you want me to modify an object but am not sure what to do (parse error)"
)
if build_task_data:
self.append_new_task(tasks.Build, build_task_data)
if destroy_task_data:
self.append_new_task(tasks.Destroy, destroy_task_data)
self.finished = True
return None, None
def handle_undo(self, speaker, d) -> Tuple[Optional[str], Any]:
task_name = d.get("undo_action")
if task_name:
task_name = task_name.split("_")[0].strip()
old_task = self.memory.get_last_finished_root_task(task_name)
if old_task is None:
raise ErrorWithResponse("Nothing to be undone ...")
undo_tasks = [tasks.Undo(self.agent, {"memid": old_task.memid})]
# undo_tasks = [
# tasks.Undo(self.agent, {"memid": task.memid})
# for task in old_task.all_descendent_tasks(include_root=True)
# ]
undo_command = old_task.get_chat().chat_text
logging.info("Pushing ConfirmTask tasks={}".format(undo_tasks))
self.dialogue_stack.append_new(
ConfirmTask,
'Do you want me to undo the command: "{}" ?'.format(undo_command),
undo_tasks,
)
self.finished = True
return None, None
def handle_spawn(self, speaker, d) -> Tuple[Optional[str], Any]:
spawn_filters = d.get("reference_object", {}).get("filters", {})
if not spawn_filters:
raise ErrorWithResponse("I don't understand what you want me to spawn.")
object_name = spawn_filters["has_name"]
schematic = self.memory.get_mob_schematic_by_name(object_name)
if not schematic:
raise ErrorWithResponse("I don't know how to spawn: %r." % (object_name))
object_idm = list(schematic.blocks.values())[0]
location_d = d.get("location", SPEAKERLOOK)
mems = interpret_reference_location(self, speaker, location_d)
steps, reldir = interpret_relative_direction(self, location_d)
pos, _ = compute_locations(self, speaker, mems, steps, reldir)
repeat_times = get_repeat_num(d)
for i in range(repeat_times):
task_data = {"object_idm": object_idm, "pos": pos, "action_dict": d}
self.append_new_task(tasks.Spawn, task_data)
self.finished = True
return None, None
def handle_move(self, speaker, d) -> Tuple[Optional[str], Any]:
def new_tasks():
# TODO if we do this better will be able to handle "stay between the x"
location_d = d.get("location", SPEAKERLOOK)
if self.loop_data and hasattr(self.loop_data, "get_pos"):
mems = [self.loop_data]
else:
mems = interpret_reference_location(self, speaker, location_d)
steps, reldir = interpret_relative_direction(self, location_d)
pos, _ = compute_locations(self, speaker, mems, steps, reldir)
# TODO: can this actually happen?
if pos is None:
raise ErrorWithResponse("I don't understand where you want me to move.")
task_data = {"target": pos, "action_dict": d}
task = tasks.Move(self.agent, task_data)
return [task]
if "stop_condition" in d:
condition = self.subinterpret["condition"](self, speaker, d["stop_condition"])
location_d = d.get("location", SPEAKERLOOK)
mems = interpret_reference_location(self, speaker, location_d)
if mems:
self.loop_data = mems[0]
steps, reldir = interpret_relative_direction(self, location_d)
loop_task_data = {
"new_tasks_fn": new_tasks,
"stop_condition": condition,
"action_dict": d,
}
self.append_new_task(tasks.Loop, loop_task_data)
else:
for t in new_tasks():
self.append_new_task(t)
self.finished = True
return None, None
def handle_build(self, speaker, d) -> Tuple[Optional[str], Any]:
# Get the segment to build
if "reference_object" in d:
# handle copy
repeat = get_repeat_num(d)
objs = self.subinterpret["reference_objects"](
self,
speaker,
d["reference_object"],
limit=repeat,
only_voxels=True,
loose_speakerlook=True,
)
if len(objs) == 0:
raise ErrorWithResponse("I don't understand what you want me to build")
tagss = [
[(p, v) for (_, p, v) in self.memory.get_triples(subj=obj.memid)] for obj in objs
]
interprets = [
[list(obj.blocks.items()), obj.memid, tags] for (obj, tags) in zip(objs, tagss)
]
else: # a schematic
if d.get("repeat") is not None:
repeat_dict = d
else:
repeat_dict = None
interprets = interpret_schematic(
self, speaker, d.get("schematic", {}), repeat_dict=repeat_dict
)
# Get the locations to build
location_d = d.get("location", SPEAKERLOOK)
mems = interpret_reference_location(self, speaker, location_d)
steps, reldir = interpret_relative_direction(self, location_d)
origin, offsets = compute_locations(
self,
speaker,
mems,
steps,
reldir,
repeat_dir=get_repeat_dir(location_d),
objects=interprets,
enable_geoscorer=True,
)
interprets_with_offsets = [
(blocks, mem, tags, off) for (blocks, mem, tags), off in zip(interprets, offsets)
]
tasks_todo = []
for schematic, schematic_memid, tags, offset in interprets_with_offsets:
og = np.array(origin) + offset
task_data = {
"blocks_list": schematic,
"origin": og,
"schematic_memid": schematic_memid,
"schematic_tags": tags,
"action_dict": d,
}
tasks_todo.append(task_data)
for task_data in reversed(tasks_todo):
self.append_new_task(tasks.Build, task_data)
logging.info("Added {} Build tasks to stack".format(len(tasks_todo)))
self.finished = True
return None, None
def handle_freebuild(self, speaker, d) -> Tuple[Optional[str], Any]:
# This handler handles the action where the agent can complete
# a human half-built structure using a generative model
self.dialogue_stack.append_new(Say, "Sorry, I don't know how to do that yet.")
self.finished = True
return None, None
def handle_fill(self, speaker, d) -> Tuple[Optional[str], Any]:
r = d.get("reference_object")
self.finished = True
if not r.get("filters"):
r["filters"] = {"location", SPEAKERLOOK}
# Get the reference location
location_d = r["filters"].get("location", SPEAKERLOOK)
mems = interpret_reference_location(self, speaker, location_d)
steps, reldir = interpret_relative_direction(self, location_d)
location, _ = compute_locations(self, speaker, mems, steps, reldir)
# Get nearby holes
holes: List[Hole] = heuristic_perception.get_all_nearby_holes(self.agent, location)
candidates: List[Tuple[XYZ, Hole]] = [
(to_block_pos(np.mean(hole[0], axis=0)), hole) for hole in holes
]
# Choose the best ones to fill
repeat = get_repeat_num(d)
holes = filter_by_sublocation(self, speaker, candidates, r, limit=repeat, loose=True)
if holes is None:
self.dialogue_stack.append_new(
Say, "I don't understand what holes you want me to fill."
)
return None, None
for hole in holes:
_, hole_info = hole
poss, hole_idm = hole_info
fill_idm = get_block_type(d["has_block_type"]) if "has_block_type" in d else hole_idm
task_data = {"action_dict": d, "schematic": poss, "block_idm": fill_idm}
self.append_new_task(tasks.Fill, task_data)
if len(holes) > 1:
self.dialogue_stack.append_new(Say, "Ok. I'll fill up the holes.")
else:
self.dialogue_stack.append_new(Say, "Ok. I'll fill that hole up.")
self.finished = True
return None, None
def handle_destroy(self, speaker, d) -> Tuple[Optional[str], Any]:
default_ref_d = {"filters": {"location": SPEAKERLOOK}}
ref_d = d.get("reference_object", default_ref_d)
objs = self.subinterpret["reference_objects"](self, speaker, ref_d, only_destructible=True)
if len(objs) == 0:
raise ErrorWithResponse("I don't understand what you want me to destroy.")
# don't kill mobs
if all(isinstance(obj, MobNode) for obj in objs):
raise ErrorWithResponse("I don't kill animals, sorry!")
if all(isinstance(obj, PlayerNode) for obj in objs):
raise ErrorWithResponse("I don't kill players, sorry!")
objs = [obj for obj in objs if not isinstance(obj, MobNode)]
num_destroy_tasks = 0
for obj in objs:
if hasattr(obj, "blocks"):
schematic = list(obj.blocks.items())
task_data = {"schematic": schematic, "action_dict": d}
self.append_new_task(tasks.Destroy, task_data)
num_destroy_tasks += 1
logging.info("Added {} Destroy tasks to stack".format(num_destroy_tasks))
self.finished = True
return None, None
# TODO mark in memory it was stopped by command
def handle_stop(self, speaker, d) -> Tuple[Optional[str], Any]:
self.finished = True
if self.loop_data is not None:
# TODO if we want to be able stop and resume old tasks, will need to store
self.archived_loop_data = self.loop_data
self.loop_data = None
if self.memory.task_stack_pause():
return "Stopping. What should I do next?", None
else:
return "I am not doing anything", None
# TODO mark in memory it was resumed by command
def handle_resume(self, speaker, d) -> Tuple[Optional[str], Any]:
self.finished = True
if self.memory.task_stack_resume():
if self.archived_loop_data is not None:
# TODO if we want to be able stop and resume old tasks, will need to store
self.loop_data = self.archived_loop_data
self.archived_loop_data = None
return "resuming", None
else:
return "nothing to resume", None
def handle_dig(self, speaker, d) -> Tuple[Optional[str], Any]:
def new_tasks():
attrs = {}
schematic_d = d["schematic"]
# set the attributes of the hole to be dug.
for dim, default in [("depth", 1), ("length", 1), ("width", 1)]:
key = "has_{}".format(dim)
if key in schematic_d:
attrs[dim] = word_to_num(schematic_d[key])
elif "has_size" in schematic_d:
attrs[dim] = interpret_size(self, schematic_d["has_size"])
else:
attrs[dim] = default
# minecraft world is [z, x, y]
padding = (attrs["depth"] + 4, attrs["length"] + 4, attrs["width"] + 4)
print("attrs", attrs)
print("padding", padding)
location_d = d.get("location", SPEAKERLOOK)
repeat_num = get_repeat_num(d)
repeat_dir = get_repeat_dir(d)
print("loc d in dig", location_d, "repeat", repeat_num, repeat_dir)
mems = interpret_reference_location(self, speaker, location_d)
steps, reldir = interpret_relative_direction(self, location_d)
origin, offsets = compute_locations(
self,
speaker,
mems,
steps,
reldir,
repeat_num=repeat_num,
repeat_dir=repeat_dir,
padding=padding,
)
print("origin from dig", origin, "offsets", offsets)
# add dig tasks in a loop
tasks_todo = []
for offset in offsets:
og = np.array(origin) + offset
t = tasks.Dig(self.agent, {"origin": og, "action_dict": d, **attrs})
print("append task:", t, og, d, attrs)
tasks_todo.append(t)
return list(reversed(tasks_todo))
print("inside dig, dict", d)
if "stop_condition" in d:
print("stop condition", d["stop_condition"])
condition = self.subinterpret["condition"](self, speaker, d["stop_condition"])
self.append_new_task(
tasks.Loop,
{"new_tasks_fn": new_tasks, "stop_condition": condition, "action_dict": d},
)
else:
for t in new_tasks():
self.append_new_task(t)
self.finished = True
return None, None
def handle_otheraction(self, speaker, d) -> Tuple[Optional[str], Any]:
self.finished = True
return "I don't know how to do that yet", None
def handle_dance(self, speaker, d) -> Tuple[Optional[str], Any]:
def new_tasks():
repeat = get_repeat_num(d)
tasks_to_do = []
# only go around the x has "around"; FIXME allow other kinds of dances
location_d = d.get("location")
if location_d is not None:
rd = location_d.get("relative_direction")
if rd is not None and (
rd == "AROUND" or rd == "CLOCKWISE" or rd == "ANTICLOCKWISE"
):
ref_obj = None
location_reference_object = location_d.get("reference_object")
if location_reference_object:
objmems = self.subinterpret["reference_objects"](
self, speaker, location_reference_object
)
if len(objmems) == 0:
raise ErrorWithResponse("I don't understand where you want me to go.")
ref_obj = objmems[0]
for i in range(repeat):
refmove = dance.RefObjMovement(
self.agent,
ref_object=ref_obj,
relative_direction=location_d["relative_direction"],
)
t = tasks.Dance(self.agent, {"movement": refmove})
tasks_to_do.append(t)
return list(reversed(tasks_to_do))
dance_type = d.get("dance_type", {"dance_type_name": "dance"})
# FIXME holdover from old dict format
if type(dance_type) is str:
dance_type = dance_type = {"dance_type_name": "dance"}
if dance_type.get("point"):
target = interpret_point_target(self, speaker, dance_type["point"])
for i in range(repeat):
t = tasks.Point(self.agent, {"target": target})
tasks_to_do.append(t)
# MC bot does not control body turn separate from head
elif dance_type.get("look_turn") or dance_type.get("body_turn"):
lt = dance_type.get("look_turn") or dance_type.get("body_turn")
f = interpret_facing(self, speaker, lt)
for i in range(repeat):
t = tasks.DanceMove(self.agent, f)
tasks_to_do.append(t)
else:
if location_d is None:
dance_location = None
else:
mems = interpret_reference_location(self, speaker, location_d)
steps, reldir = interpret_relative_direction(self, location_d)
dance_location, _ = compute_locations(self, speaker, mems, steps, reldir)
# TODO use name!
if dance_type.get("dance_type_span") is not None:
dance_name = dance_type["dance_type_span"]
if dance_name == "dance":
dance_name = "ornamental_dance"
dance_memids = self.memory._db_read(
"SELECT DISTINCT(Dances.uuid) FROM Dances INNER JOIN Triples on Dances.uuid=Triples.subj WHERE Triples.obj_text=?",
dance_name,
)
else:
dance_memids = self.memory._db_read(
"SELECT DISTINCT(Dances.uuid) FROM Dances INNER JOIN Triples on Dances.uuid=Triples.subj WHERE Triples.obj_text=?",
"ornamental_dance",
)
dance_memid = random.choice(dance_memids)[0]
dance_fn = self.memory.dances[dance_memid]
for i in range(repeat):
dance_obj = dance.Movement(
agent=self.agent, move_fn=dance_fn, dance_location=dance_location
)
t = tasks.Dance(self.agent, {"movement": dance_obj})
tasks_to_do.append(t)
return list(reversed(tasks_to_do))
if "stop_condition" in d:
condition = self.subinterpret["condition"](self, speaker, d["stop_condition"])
self.append_new_task(
tasks.Loop,
{"new_tasks_fn": new_tasks, "stop_condition": condition, "action_dict": d},
)
else:
for t in new_tasks():
self.append_new_task(t)
self.finished = True
return None, None
def handle_get(self, speaker, d) -> Tuple[Optional[str], Any]:
ref_d = d.get("reference_object", None)
if not ref_d:
raise ErrorWithResponse("I don't understand what you want me to get.")
objs = self.subinterpret["reference_objects"](self, speaker, ref_d, only_on_ground=True)
if len(objs) == 0:
raise ErrorWithResponse("I don't understand what you want me to get.")
obj = [obj for obj in objs if isinstance(obj, ItemStackNode)][0]
item_stack = self.agent.get_item_stack(obj.eid)
idm = (item_stack.item.id, item_stack.item.meta)
task_data = {"idm": idm, "pos": obj.pos, "eid": obj.eid, "memid": obj.memid}
self.append_new_task(tasks.Get, task_data)
self.finished = True
return None, None
def handle_drop(self, speaker, d) -> Tuple[Optional[str], Any]:
ref_d = d.get("reference_object", None)
if not ref_d:
raise ErrorWithResponse("I don't understand what you want me to drop.")
objs = self.subinterpret["reference_objects"](self, speaker, ref_d, only_in_inventory=True)
if len(objs) == 0:
raise ErrorWithResponse("I don't understand what you want me to drop.")
obj = [obj for obj in objs if isinstance(obj, ItemStackNode)][0]
item_stack = self.agent.get_item_stack(obj.eid)
idm = (item_stack.item.id, item_stack.item.meta)
task_data = {"eid": obj.eid, "idm": idm, "memid": obj.memid}
self.append_new_task(tasks.Drop, task_data)
self.finished = True
return None, None
def append_new_task(self, cls, data=None):
# this is badly named, FIXME
# add a tick to avoid two tasks having same timestamp
self.memory.add_tick()
if data is None:
self.memory.task_stack_push(cls, chat_effect=True)
else:
task = cls(self.agent, data)
self.memory.task_stack_push(task, chat_effect=True)
| craftassist-master | python/craftassist/dialogue_objects/interpreter.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import logging
import numpy as np
import math
import random
import re
from typing import cast, List, Tuple, Union, Optional, Dict
from base_agent.dialogue_objects import ConfirmReferenceObject, SPEAKERLOOK, tags_from_dict
import block_data
import heuristic_perception
import rotation
import size_words
from base_agent.memory_nodes import ReferenceObjectNode
from .reference_object_helpers import compute_locations
from .block_helpers import get_block_type
from mc_util import (
Block,
T,
XYZ,
most_common_idm,
object_looked_at,
euclid_dist,
ErrorWithResponse,
NextDialogueStep,
)
from word2number.w2n import word_to_num
from word_maps import SPECIAL_SHAPE_FNS, SPECIAL_SHAPES_CANONICALIZE
def get_special_reference_object(interpreter, speaker, S):
# TODO/FIXME! add things to workspace memory
# TODO/FIXME! remove all direct references to perception modules here, go through memory
# in particular get_player_struct_by_name.... get the eid from memory
if S == "SPEAKER_LOOK" or S == "SPEAKER":
p = interpreter.agent.perception_modules["low_level"].get_player_struct_by_name(speaker)
f = {"special": {S: p.entityId}}
elif S == "AGENT":
p = interpreter.agent.get_player()
f = {"special": {S: p.entityId}}
elif type(S) is dict:
coord_span = S["coordinates_span"]
loc = cast(XYZ, tuple(int(float(w)) for w in re.findall("[-0-9.]+", coord_span)))
if len(loc) != 3:
logging.error("Bad coordinates: {}".format(coord_span))
raise ErrorWithResponse("I don't understand what location you're referring to")
memid = interpreter.memory.add_location((int(loc[0]), int(loc[1]), int(loc[2])))
mem = interpreter.memory.get_location_by_id(memid)
f = {"special": {"DUMMY": mem}}
mems = interpreter.memory.get_reference_objects(f)
if not mems:
# need a better interface for this, don't need to run full perception
# just to force speakerlook in memory
# TODO force if look is stale, not just if it doesn't exist
# this branch shouldn't occur
# interpreter.agent.perceive(force=True)
raise ErrorWithResponse(
"I think you are pointing at something but I don't know what it is"
)
return mems[0]
def interpret_relative_direction(interpreter, location_d):
steps = location_d.get("steps", None)
if steps is not None:
try:
steps = math.ceil(float(steps))
except:
steps = None
reldir = location_d.get("relative_direction")
return steps, reldir
def interpret_reference_object(
interpreter,
speaker,
d,
only_voxels=False,
only_physical=False,
only_in_inventory=False,
only_on_ground=False,
only_destructible=False,
not_location=False,
limit=1,
loose_speakerlook=False,
allow_clarification=True,
) -> List[ReferenceObjectNode]:
"""this tries to find a ref obj memory matching the criteria from the
ref_obj_dict
"""
F = d.get("filters")
special = d.get("special_reference")
# F can be empty...
assert (F is not None) or special, "no filters or special_reference sub-dicts {}".format(d)
if special:
mem = get_special_reference_object(interpreter, speaker, special)
return [mem]
if F.get("contains_coreference", "NULL") != "NULL":
mem = F["contains_coreference"]
if isinstance(mem, ReferenceObjectNode):
return [mem]
elif mem == "resolved":
pass
else:
logging.error("bad coref_resolve -> {}".format(mem))
if len(interpreter.progeny_data) == 0:
tags = tags_from_dict(F)
if only_voxels:
tags.append("_voxel_object")
if only_physical:
tags.append("_physical_object")
if only_in_inventory:
tags.append("_in_inventory")
if only_on_ground:
tags.append("_on_ground")
if only_destructible:
tags.append("_destructible")
# FIXME hack until memory_filters supprts "not"
if not_location:
tags.append("_not_location")
# TODO Add ignore_player maybe?
candidates = get_reference_objects(interpreter, *tags)
if len(candidates) > 0:
r = filter_by_sublocation(
interpreter, speaker, candidates, d, limit=limit, loose=loose_speakerlook
)
return [mem for _, mem in r]
elif allow_clarification:
# no candidates found; ask Clarification
# TODO: move ttad call to dialogue manager and remove this logic
interpreter.action_dict_frozen = True
player_struct = interpreter.agent.perception_modules[
"low_level"
].get_player_struct_by_name(speaker)
tags = []
if only_voxels:
tags.append("_voxel_object")
if only_physical:
tags.append("_physical_object")
if only_destructible:
tags.append("_destructible")
confirm_candidates = get_reference_objects(interpreter, *tags)
objects = object_looked_at(
interpreter.agent, confirm_candidates, player_struct, limit=1
)
if len(objects) == 0:
raise ErrorWithResponse("I don't know what you're referring to")
_, mem = objects[0]
interpreter.provisional["object_mem"] = mem
interpreter.provisional["F"] = F
interpreter.dialogue_stack.append_new(ConfirmReferenceObject, mem)
raise NextDialogueStep()
else:
raise ErrorWithResponse("I don't know what you're referring to")
else:
# clarification answered
r = interpreter.progeny_data[-1].get("response")
if r == "yes":
# TODO: learn from the tag! put it in memory!
return [interpreter.provisional.get("object_mem")] * limit
else:
raise ErrorWithResponse("I don't know what you're referring to")
def interpret_shape_schematic(
interpreter, speaker, d, shapename=None
) -> Tuple[List[Block], List[Tuple[str, str]]]:
"""Return a tuple of 2 values:
- the schematic blocks, list[(xyz, idm)]
- a list of (pred, val) tags
"""
if shapename is not None:
shape = shapename
else:
# For sentences like "Stack" and "Place" that have the shapename in dict
shape = d["has_shape"]
numeric_keys = [
"has_thickness",
"has_radius",
"has_depth",
"has_width",
"has_height",
"has_length",
"has_slope",
# "has_orientation", #is this supposed to be numeric key?
"has_distance",
"has_base",
]
attrs = {key[4:]: word_to_num(d[key]) for key in numeric_keys if key in d}
if "has_orientation" in d:
attrs["orient"] = d["has_orientation"]
if "has_size" in d:
attrs["size"] = interpret_size(interpreter, d["has_size"])
if "has_block_type" in d:
block_type = get_block_type(d["has_block_type"])
attrs["bid"] = block_type
elif "has_colour" in d:
c = block_data.COLOR_BID_MAP.get(d["has_colour"])
if c is not None:
attrs["bid"] = random.choice(c)
tags = []
for key, val in d.items():
if key.startswith("has_"):
stemmed_val = val
tags.append((key, stemmed_val))
return SPECIAL_SHAPE_FNS[shape](**attrs), tags
def interpret_size(interpreter, text) -> Union[int, List[int]]:
"""Processes the has_size_ span value and returns int or list[int]"""
nums = re.findall("[-0-9]+", text)
if len(nums) == 1:
# handle "3", "three", etc.
return word_to_num(nums[0])
elif len(nums) > 1:
# handle "3 x 3", "four by five", etc.
return [word_to_num(n) for n in nums]
else:
# handle "big", "really huge", etc.
if hasattr(interpreter.agent, "size_str_to_int"):
return interpreter.agent.size_str_to_int(text)
else:
return size_words.size_str_to_int(text)
def interpret_named_schematic(
interpreter, speaker, d
) -> Tuple[List[Block], Optional[str], List[Tuple[str, str]]]:
"""Return a tuple of 3 values:
- the schematic blocks, list[(xyz, idm)]
- a SchematicNode memid, or None
- a list of (pred, val) tags
"""
if "has_name" not in d:
raise ErrorWithResponse("I don't know what you want me to build.")
name = d["has_name"]
stemmed_name = name
shapename = SPECIAL_SHAPES_CANONICALIZE.get(name) or SPECIAL_SHAPES_CANONICALIZE.get(
stemmed_name
)
if shapename:
shape_blocks, tags = interpret_shape_schematic(
interpreter, speaker, d, shapename=shapename
)
return shape_blocks, None, tags
schematic = interpreter.memory.get_schematic_by_name(name)
if schematic is None:
schematic = interpreter.memory.get_schematic_by_name(stemmed_name)
if schematic is None:
raise ErrorWithResponse("I don't know what you want me to build.")
tags = [(p, v) for (_, p, v) in interpreter.memory.get_triples(subj=schematic.memid)]
blocks = schematic.blocks
# TODO generalize to more general block properties
# Longer term: remove and put a call to the modify model here
if d.get("has_colour"):
old_idm = most_common_idm(blocks.values())
c = block_data.COLOR_BID_MAP.get(d["has_colour"])
if c is not None:
new_idm = random.choice(c)
for l in blocks:
if blocks[l] == old_idm:
blocks[l] = new_idm
return list(blocks.items()), schematic.memid, tags
def interpret_schematic(
interpreter, speaker, d, repeat_dict=None
) -> List[Tuple[List[Block], Optional[str], List[Tuple[str, str]]]]:
"""Return a list of 3-tuples, each with values:
- the schematic blocks, list[(xyz, idm)]
- a SchematicNode memid, or None
- a list of (pred, val) tags
"""
# hack, fixme in grammar/standardize. sometimes the repeat is a sibling of action
if repeat_dict is not None:
repeat = cast(int, get_repeat_num(repeat_dict))
else:
repeat = cast(int, get_repeat_num(d))
assert type(repeat) == int, "bad repeat={}".format(repeat)
if "has_shape" in d:
blocks, tags = interpret_shape_schematic(interpreter, speaker, d)
return [(blocks, None, tags)] * repeat
else:
return [interpret_named_schematic(interpreter, speaker, d)] * repeat
def interpret_reference_location(interpreter, speaker, d):
"""
Location dict -> coordinates of reference objc and maybe a list of ref obj
memories.
Side effect: adds mems to agent_memory.recent_entities
"""
loose_speakerlook = False
expected_num = 1
if d.get("relative_direction") == "BETWEEN":
loose_speakerlook = True
expected_num = 2
ref_obj_1 = d.get("reference_object_1")
ref_obj_2 = d.get("reference_object_2")
if ref_obj_1 and ref_obj_2:
mem1 = interpret_reference_object(
interpreter,
speaker,
ref_obj_1,
loose_speakerlook=loose_speakerlook,
allow_clarification=False,
)[0]
mem2 = interpret_reference_object(
interpreter,
speaker,
ref_obj_2,
loose_speakerlook=loose_speakerlook,
allow_clarification=False,
)[0]
if mem1 is None or mem2 is None:
raise ErrorWithResponse("I don't know what you're referring to")
mems = [mem1, mem2]
interpreter.memory.update_recent_entities(mems)
return mems
ref_obj = d.get("reference_object", SPEAKERLOOK["reference_object"])
mems = interpret_reference_object(
interpreter, speaker, ref_obj, limit=expected_num, loose_speakerlook=loose_speakerlook
)
if len(mems) < expected_num:
tags = set(tags_from_dict(ref_obj))
cands = interpreter.memory.get_recent_entities("Mob")
mems = [c for c in cands if any(set.intersection(set(c.get_tags()), tags))]
if len(mems) < expected_num:
cands = interpreter.memory.get_recent_entities("BlockObject")
mems = [c for c in cands if any(set.intersection(set(c.get_tags()), tags))]
if len(mems) < expected_num:
raise ErrorWithResponse("I don't know what you're referring to")
mems = mems[:expected_num]
interpreter.memory.update_recent_entities(mems)
# TODO: are there any memories where get_pos() doesn't return something?
return mems
def interpret_point_target(interpreter, speaker, d):
if d.get("location") is None:
# TODO other facings
raise ErrorWithResponse("I am not sure where you want me to point")
# TODO: We might want to specifically check for BETWEEN/INSIDE, I'm not sure
# what the +1s are in the return value
mems = interpret_reference_location(interpreter, speaker, d["location"])
steps, reldir = interpret_relative_direction(interpreter.agent, d)
loc, _ = compute_locations(interpreter, speaker, mems, steps, reldir)
return (loc[0], loc[1] + 1, loc[2], loc[0], loc[1] + 1, loc[2])
def number_from_span(span):
# this will fail in many cases....
words = span.split()
degrees = None
for w in words:
try:
degrees = int(w)
except:
pass
if not degrees:
try:
degrees = word_to_num(span)
except:
pass
return degrees
def interpret_facing(interpreter, speaker, d):
current_pitch = interpreter.agent.get_player().look.pitch
current_yaw = interpreter.agent.get_player().look.yaw
if d.get("yaw_pitch"):
span = d["yaw_pitch"]
# for now assumed in (yaw, pitch) or yaw, pitch or yaw pitch formats
yp = span.replace("(", "").replace(")", "").split()
return {"head_yaw_pitch": (int(yp[0]), int(yp[1]))}
elif d.get("yaw"):
# for now assumed span is yaw as word or number
w = d["yaw"].strip(" degrees").strip(" degree")
return {"head_yaw_pitch": (word_to_num(w), current_pitch)}
elif d.get("pitch"):
# for now assumed span is pitch as word or number
w = d["pitch"].strip(" degrees").strip(" degree")
return {"head_yaw_pitch": (current_yaw, word_to_num(w))}
elif d.get("relative_yaw"):
# TODO in the task use turn angle
if d["relative_yaw"].get("angle"):
return {"relative_yaw": int(d["relative_yaw"]["angle"])}
elif d["relative_yaw"].get("yaw_span"):
span = d["relative_yaw"].get("yaw_span")
left = "left" in span or "leave" in span # lemmatizer :)
degrees = number_from_span(span) or 90
if degrees > 0 and left:
return {"relative_yaw": -degrees}
else:
return {"relative_yaw": degrees}
else:
pass
elif d.get("relative_pitch"):
if d["relative_pitch"].get("angle"):
# TODO in the task make this relative!
return {"relative_pitch": int(d["relative_pitch"]["angle"])}
elif d["relative_pitch"].get("pitch_span"):
span = d["relative_pitch"].get("pitch_span")
down = "down" in span
degrees = number_from_span(span) or 90
if degrees > 0 and down:
return {"relative_pitch": -degrees}
else:
return {"relative_pitch": degrees}
else:
pass
elif d.get("location"):
mems = interpret_reference_location(interpreter, speaker, d["location"])
steps, reldir = interpret_relative_direction(interpreter, d["location"])
loc, _ = compute_locations(interpreter, speaker, mems, steps, reldir)
return {"head_xyz": loc}
else:
raise ErrorWithResponse("I am not sure where you want me to turn")
# def interpret_stop_condition(interpreter, speaker, d) -> Optional[StopCondition]:
# if d.get("condition_type") == "NEVER":
# return NeverStopCondition(interpreter.agent)
# elif d.get("condition_type") == "ADJACENT_TO_BLOCK_TYPE":
# block_type = d["block_type"]
# bid, meta = get_block_type(block_type)
# return AgentAdjacentStopCondition(interpreter.agent, bid)
# else:
# return None
def get_reference_objects(interpreter, *tags) -> List[Tuple[XYZ, ReferenceObjectNode]]:
"""Return a list of (xyz, memory) tuples encompassing all possible reference objects"""
f = {"triples": [{"pred_text": "has_tag", "obj_text": tag} for tag in tags]}
mems = interpreter.memory.get_reference_objects(f)
return [(m.get_pos(), m) for m in mems]
# TODO filter by INSIDE/AWAY/NEAR
def filter_by_sublocation(
interpreter,
speaker,
candidates: List[Tuple[XYZ, T]],
d: Dict,
limit=1,
all_proximity=10,
loose=False,
) -> List[Tuple[XYZ, T]]:
"""Select from a list of candidate (xyz, object) tuples given a sublocation
If limit == 'ALL', return all matching candidates
Returns a list of (xyz, mem) tuples
"""
F = d.get("filters")
assert F is not None, "no filters".format(d)
location = F.get("location", SPEAKERLOOK)
if limit == 1:
limit = get_repeat_num(d)
# handle SPEAKER_LOOK separately due to slightly different semantics
# (proximity to ray instead of point)
if location.get("location_type") == "SPEAKER_LOOK":
player_struct = interpreter.agent.perception_modules[
"low_level"
].get_player_struct_by_name(speaker)
return object_looked_at(
interpreter.agent, candidates, player_struct, limit=limit, loose=loose
)
reldir = location.get("relative_direction")
if reldir:
if reldir == "INSIDE":
if location.get("reference_object"):
# this is ugly, should probably return from interpret_reference_location...
ref_mems = interpret_reference_object(
interpreter, speaker, location["reference_object"]
)
for l, candidate_mem in candidates:
if heuristic_perception.check_inside([candidate_mem, ref_mems[0]]):
return [(l, candidate_mem)]
raise ErrorWithResponse("I can't find something inside that")
elif reldir == "AWAY":
raise ErrorWithResponse("I don't know which object you mean")
elif reldir == "NEAR":
pass # fall back to no reference direction
elif reldir == "BETWEEN":
mems = interpret_reference_location(interpreter, speaker, location)
steps, reldir = interpret_relative_direction(interpreter, d)
ref_loc, _ = compute_locations(interpreter, speaker, mems, steps, reldir)
candidates.sort(key=lambda c: euclid_dist(c[0], ref_loc))
return candidates[:limit]
else:
# reference object location, i.e. the "X" in "left of X"
mems = interpret_reference_location(interpreter, speaker, location)
ref_loc = mems[0].get_pos()
# relative direction, i.e. the "LEFT" in "left of X"
reldir_vec = rotation.DIRECTIONS[reldir]
# transform each object into the speaker look coordinate system,
# and project onto the reldir vector
look = (
interpreter.agent.perception_modules["low_level"]
.get_player_struct_by_name(speaker)
.look
)
proj = [
rotation.transform(np.array(l) - ref_loc, look.yaw, 0) @ reldir_vec
for (l, _) in candidates
]
# filter by relative dir, e.g. "left of Y"
proj_cands = [(p, c) for (p, c) in zip(proj, candidates) if p > 0]
# "the X left of Y" = the right-most X that is left of Y
if limit == "ALL":
limit = len(proj_cands)
return [c for (_, c) in sorted(proj_cands, key=lambda p: p[0])][:limit]
else: # is it even possible to end up in this branch? FIXME?
# no reference direction: choose the closest
mems = interpret_reference_location(interpreter, speaker, location)
steps, reldir = interpret_relative_direction(interpreter, d)
ref_loc, _ = compute_locations(interpreter, speaker, mems, steps, reldir)
if limit == "ALL":
return list(filter(lambda c: euclid_dist(c[0], ref_loc) <= all_proximity, candidates))
else:
candidates.sort(key=lambda c: euclid_dist(c[0], ref_loc))
return candidates[:limit]
return [] # this fixes flake but seems awful?
def get_repeat_num(d) -> Union[int, str]:
if "repeat" in d:
repeat_dict = d["repeat"]
if repeat_dict["repeat_key"] == "FOR":
try:
return word_to_num(repeat_dict["repeat_count"])
except:
return 2 # TODO: dialogue instead of default?
if repeat_dict["repeat_key"] == "ALL":
return "ALL"
return 1
def get_repeat_dir(d):
if "repeat" in d:
direction_name = d.get("repeat", {}).get("repeat_dir", "FRONT")
elif "schematic" in d:
direction_name = d["schematic"].get("repeat", {}).get("repeat_dir", "FRONT")
else:
direction_name = None
return direction_name
| craftassist-master | python/craftassist/dialogue_objects/interpreter_helper.py |
import numpy as np
import rotation
from shape_transforms import (
scale,
thicker,
shrink_sample,
replace_by_blocktype,
replace_by_halfspace,
fill_flat,
hollow,
rotate,
maybe_convert_to_list,
maybe_convert_to_npy,
)
from .interpreter_helper import (
ErrorWithResponse,
interpret_reference_location,
interpret_relative_direction,
)
from .reference_object_helpers import compute_locations
from .block_helpers import get_block_type
# TODO lots of reuse with build here....
# TODO don't destroy then build if its unecessary...
def handle_rigidmotion(interpreter, speaker, modify_dict, obj):
old_blocks = list(obj.blocks.items())
mx, my, mz = np.min([l for l, idm in old_blocks], axis=0)
angle = modify_dict.get("categorical_angle")
mirror = modify_dict.get("mirror")
no_change = False
if angle or mirror:
angle = angle or 0
angle = {0: 0, "LEFT": -90, "RIGHT": 90, "AROUND": 180}[angle]
if mirror:
mirror = 0
else:
mirror = -1
new_schematic = maybe_convert_to_list(rotate(old_blocks, angle, mirror))
else:
no_change = True
new_schematic = old_blocks
location_d = modify_dict.get("location")
if location_d:
mems = interpret_reference_location(interpreter, speaker, location_d)
steps, reldir = interpret_relative_direction(interpreter, location_d)
origin, _ = compute_locations(interpreter, speaker, mems, steps, reldir)
else:
origin = (mx, my, mz)
if no_change and origin == (mx, my, mz):
return None, None
destroy_task_data = {"schematic": old_blocks}
# FIXME deal with tags!!!
build_task_data = {
"blocks_list": new_schematic,
"origin": origin,
# "schematic_tags": tags,
}
return destroy_task_data, build_task_data
# TODO don't destroy the whole thing, just the extra blocks
def handle_scale(interpreter, speaker, modify_dict, obj):
old_blocks = list(obj.blocks.items())
bounds = obj.get_bounds()
mx, my, mz = (bounds[0], bounds[2], bounds[4])
csf = modify_dict.get("categorical_scale_factor")
origin = [mx, my, mz]
if not csf:
if modify_dict.get("numerical_scale_factor"):
raise ErrorWithResponse("I don't know how to handle numerical_scale_factor yet")
else:
raise ErrorWithResponse(
"I think I am supposed to scale something but I don't know which dimensions to scale"
)
destroy_task_data = {"schematic": old_blocks}
if csf == "WIDER":
if bounds[1] - bounds[0] > bounds[5] - bounds[4]:
lam = (2.0, 1.0, 1.0)
else:
lam = (1.0, 1.0, 2.0)
new_blocks = maybe_convert_to_list(scale(old_blocks, lam))
destroy_task_data = None
elif csf == "NARROWER":
if bounds[1] - bounds[0] > bounds[5] - bounds[4]:
lam = (0.5, 1.0, 1.0)
else:
lam = (1.0, 1.0, 0.5)
new_blocks = maybe_convert_to_list(shrink_sample(old_blocks, lam))
elif csf == "TALLER":
lam = (1.0, 2.0, 1.0)
new_blocks = maybe_convert_to_list(scale(old_blocks, lam))
destroy_task_data = None
elif csf == "SHORTER":
lam = (1.0, 0.5, 1.0)
new_blocks = maybe_convert_to_list(shrink_sample(old_blocks, lam))
elif csf == "SKINNIER":
lam = (0.5, 1.0, 0.5)
new_blocks = maybe_convert_to_list(shrink_sample(old_blocks, lam))
elif csf == "FATTER":
lam = (2.0, 1.0, 2.0)
new_blocks = maybe_convert_to_list(scale(old_blocks, lam))
destroy_task_data = None
elif csf == "BIGGER":
lam = (2.0, 2.0, 2.0)
destroy_task_data = None
new_blocks = maybe_convert_to_list(scale(old_blocks, lam))
elif csf == "SMALLER":
lam = (0.5, 0.5, 0.5)
new_blocks = maybe_convert_to_list(shrink_sample(old_blocks, lam))
M_new = np.max([l for l, idm in new_blocks], axis=0)
m_new = np.min([l for l, idm in new_blocks], axis=0)
new_extent = (M_new[0] - m_new[0], M_new[1] - m_new[1], M_new[2] - m_new[2])
old_extent = (bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4])
origin = (
mx - (new_extent[0] - old_extent[0]) // 2,
my,
mz - (new_extent[2] - old_extent[2]) // 2,
)
# FIXME deal with tags!!!
build_task_data = {
"blocks_list": new_blocks,
"origin": origin,
# "schematic_tags": tags,
}
return destroy_task_data, build_task_data
def handle_fill(interpreter, speaker, modify_dict, obj):
old_blocks = list(obj.blocks.items())
bounds = obj.get_bounds()
mx, my, mz = (bounds[0], bounds[2], bounds[4])
origin = [mx, my, mz]
destroy_task_data = None
if modify_dict.get("modify_type") == "FILL":
if modify_dict.get("new_block"):
# TODO FILTERS, also in build
block_type = get_block_type(modify_dict["new_block"])
new_blocks = fill_flat(old_blocks, fill_material=block_type)
else:
new_blocks = fill_flat(old_blocks)
else:
# modify_dict.get("modify_type") == "hollow"
new_blocks = hollow(old_blocks)
# destroy_task_data = {"schematic": old_blocks}
# FIXME deal with tags!!!
build_task_data = {
"blocks_list": maybe_convert_to_list(new_blocks),
"origin": origin,
# "schematic_tags": tags,
}
return destroy_task_data, build_task_data
def handle_replace(interpreter, speaker, modify_dict, obj):
old_blocks = list(obj.blocks.items())
bounds = obj.get_bounds()
mx, my, mz = (bounds[0], bounds[2], bounds[4])
origin = (mx, my, mz)
new_block_type = get_block_type(modify_dict["new_block"])
destroy_task_data = None
if modify_dict.get("old_block"):
# TODO FILTERS, also in build
# TODO "make the red blocks green" etc- currently get_block type does not return a list of possibilities
old_block_type = get_block_type(modify_dict["old_block"])
new_blocks = replace_by_blocktype(
old_blocks, new_idm=new_block_type, current_idm=old_block_type
)
else:
geom_d = modify_dict.get("replace_geometry")
geometry = {}
schematic = maybe_convert_to_npy(old_blocks)
geometry["offset"] = np.array(schematic.shape[:3]) / 2
reldir = geom_d.get("relative_direction", "TOP")
if reldir == "TOP":
reldir = "UP"
elif reldir == "BOTTOM":
reldir = "DOWN"
reldir_vec = rotation.DIRECTIONS[reldir]
look = (
interpreter.agent.perception_modules["low_level"]
.get_player_struct_by_name(speaker)
.look
)
dir_vec = rotation.transform(reldir_vec, look.yaw, 0, inverted=True)
geometry["v"] = dir_vec
projections = []
for l, idm in old_blocks:
projections.append((np.array(l) - geometry["offset"]) @ reldir_vec)
a = geom_d.get("amount", "HALF")
if a == "QUARTER":
geometry["threshold"] = (np.max(projections) - np.min(projections)) / 4
else:
geometry["threshold"] = 0.0
new_blocks = replace_by_halfspace(old_blocks, new_idm=new_block_type, geometry=geometry)
# FIXME deal with tags!!!
build_task_data = {
"blocks_list": maybe_convert_to_list(new_blocks),
"origin": origin,
# "schematic_tags": tags,
}
return destroy_task_data, build_task_data
# TODO don't destroy first
def handle_thicken(interpreter, speaker, modify_dict, obj):
old_blocks = list(obj.blocks.items())
bounds = obj.get_bounds()
mx, my, mz = (bounds[0], bounds[2], bounds[4])
origin = [mx, my, mz]
if modify_dict.get("modify_type") == "THICKER":
num_blocks = modify_dict.get("num_blocks", 1)
new_blocks = thicker(old_blocks, delta=num_blocks)
else:
raise ErrorWithResponse("I don't know how thin out blocks yet")
destroy_task_data = {"schematic": old_blocks}
# FIXME deal with tags!!!
build_task_data = {
"blocks_list": new_blocks,
"origin": origin,
# "schematic_tags": tags,
}
return destroy_task_data, build_task_data
| craftassist-master | python/craftassist/dialogue_objects/modify_helpers.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import logging
from multiprocessing import Queue, Process
import sys
import os
from mc_memory_nodes import InstSegNode
from heuristic_perception import all_nearby_objects
from shapes import get_bounds
VISION_DIR = os.path.dirname(os.path.realpath(__file__))
CRAFTASSIST_DIR = os.path.join(VISION_DIR, "../")
SEMSEG_DIR = os.path.join(VISION_DIR, "semantic_segmentation/")
sys.path.append(CRAFTASSIST_DIR)
sys.path.append(SEMSEG_DIR)
import build_utils as bu
from semseg_models import SemSegWrapper
# TODO all "subcomponent" operations are replaced with InstSeg
class SubcomponentClassifierWrapper:
def __init__(self, agent, model_path, vocab_path, perceive_freq=0):
self.agent = agent
self.memory = self.agent.memory
self.perceive_freq = perceive_freq
if model_path is not None:
self.subcomponent_classifier = SubComponentClassifier(voxel_model_path=model_path, vocab_path=vocab_path)
self.subcomponent_classifier.start()
else:
self.subcomponent_classifier = None
def perceive(self, force=False):
if self.perceive_freq == 0 and not force:
return
if self.perceive_freq > 0 and self.agent.count % self.perceive_freq != 0 and not force:
return
if self.subcomponent_classifier is None:
return
# TODO don't all_nearby_objects again, search in memory instead
to_label = []
# add all blocks in marked areas
for pos, radius in self.agent.areas_to_perceive:
for obj in all_nearby_objects(self.agent.get_blocks, pos, radius):
to_label.append(obj)
# add all blocks near the agent
for obj in all_nearby_objects(self.agent.get_blocks, self.agent.pos):
to_label.append(obj)
for obj in to_label:
self.subcomponent_classifier.block_objs_q.put(obj)
# everytime we try to retrieve as many recognition results as possible
while not self.subcomponent_classifier.loc2labels_q.empty():
loc2labels, obj = self.subcomponent_classifier.loc2labels_q.get()
loc2ids = dict(obj)
label2blocks = {}
def contaminated(blocks):
"""
Check if blocks are still consistent with the current world
"""
mx, Mx, my, My, mz, Mz = get_bounds(blocks)
yzxb = self.agent.get_blocks(mx, Mx, my, My, mz, Mz)
for b, _ in blocks:
x, y, z = b
if loc2ids[b][0] != yzxb[y - my, z - mz, x - mx, 0]:
return True
return False
for loc, labels in loc2labels.items():
b = (loc, loc2ids[loc])
for l in labels:
if l in label2blocks:
label2blocks[l].append(b)
else:
label2blocks[l] = [b]
for l, blocks in label2blocks.items():
## if the blocks are contaminated we just ignore
if not contaminated(blocks):
locs = [loc for loc, idm in blocks]
InstSegNode.create(self.memory, locs, [l])
class SubComponentClassifier(Process):
"""
A classifier class that calls a voxel model to output object tags.
"""
def __init__(self, voxel_model_path=None, vocab_path=None):
super().__init__()
if voxel_model_path is not None:
logging.info(
"SubComponentClassifier using voxel_model_path={}".format(voxel_model_path)
)
self.model = SemSegWrapper(voxel_model_path, vocab_path)
else:
raise Exception("specify a segmentation model")
self.block_objs_q = Queue() # store block objects to be recognized
self.loc2labels_q = Queue() # store loc2labels dicts to be retrieved by the agent
self.daemon = True
def run(self):
"""
The main recognition loop of the classifier
"""
while True: # run forever
tb = self.block_objs_q.get(block=True, timeout=None)
loc2labels = self._watch_single_object(tb)
self.loc2labels_q.put((loc2labels, tb))
def _watch_single_object(self, tuple_blocks):
"""
Input: a list of tuples, where each tuple is ((x, y, z), [bid, mid]). This list
represents a block object.
Output: a dict of (loc, [tag1, tag2, ..]) pairs for all non-air blocks.
"""
def get_tags(p):
"""
convert a list of tag indices to a list of tags
"""
return [self.model.tags[i][0] for i in p]
def apply_offsets(cube_loc, offsets):
"""
Convert the cube location back to world location
"""
return (cube_loc[0] + offsets[0], cube_loc[1] + offsets[1], cube_loc[2] + offsets[2])
np_blocks, offsets = bu.blocks_list_to_npy(blocks=tuple_blocks, xyz=True)
pred = self.model.segment_object(np_blocks)
# convert prediction results to string tags
return dict([(apply_offsets(loc, offsets), get_tags([p])) for loc, p in pred.items()])
def recognize(self, list_of_tuple_blocks):
"""
Multiple calls to _watch_single_object
"""
tags = dict()
for tb in list_of_tuple_blocks:
tags.update(self._watch_single_object(tb))
return tags
| craftassist-master | python/craftassist/voxel_models/subcomponent_classifier.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import logging
import sys
import os
import torch
GEOSCORER_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "geoscorer/")
sys.path.append(GEOSCORER_DIR)
from geoscorer_wrapper import ContextSegmentMergerWrapper
from spatial_utils import shift_sparse_voxel_to_origin, densify
class Geoscorer(object):
"""
A model class that provides geoscorer functionality.
This is distinct from the wrapper itself because I see the wrapper
becoming more specialized as we add more functionality and this object
possible becoming a process or holding multiple wrappers.
"""
def __init__(self, merger_model_path=None):
if merger_model_path is not None:
logging.info("Geoscorer using merger_model_path={}".format(merger_model_path))
self.merger_model = ContextSegmentMergerWrapper(merger_model_path)
else:
raise Exception("specify a geoscorer model")
self.radius = self.merger_model.context_sl // 2
self.seg_sl = self.merger_model.seg_sl
self.blacklist = ["BETWEEN", "INSIDE", "AWAY", "NEAR"]
# Define the circumstances where we can use geoscorer
def use(self, steps, repeat_num, rel_dir):
if repeat_num > 1 or steps is not None:
return False
if rel_dir is None or rel_dir in self.blacklist:
return False
return True
def produce_segment_pos_in_context(self, segment, context, brc):
# Offset puts us right outside of the bottom right corner
# c_offset = [sum(x) for x in zip(brc, (-1, -1, -1))]
c_offset = brc
context_p = self._process_context(context)
segment_p = self._process_segment(segment)
bottom_right_coord = self._seg_context_processed_to_coord(segment_p, context_p, c_offset)
return bottom_right_coord
def _seg_context_processed_to_coord(self, segment, context, context_off):
local_coord = self.merger_model.segment_context_to_pos(segment, context)
global_coord = [sum(x) for x in zip(local_coord, context_off)]
return global_coord
def _process_context(self, context):
c_tensor = torch.from_numpy(context[:, :, :, 0]).long().to(device="cuda")
return c_tensor
def _process_segment(self, segment):
"""
Takes a segment, described as a list of tuples of the form:
((x, y, z), (block_id, ?))
Returns an 8x8x8 block with the segment shifted to the origin its bounds.
"""
shifted_seg, _ = shift_sparse_voxel_to_origin(segment)
sl = self.seg_sl
c = self.seg_sl // 2
p, _ = densify(shifted_seg, [sl, sl, sl], center=[c, c, c], useid=True)
s_tensor = torch.from_numpy(p).long().to(device="cuda")
return s_tensor
| craftassist-master | python/craftassist/voxel_models/geoscorer.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib
import plotly.graph_objs as go
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import visdom
import pickle
import os
import torch
GEOSCORER_DIR = os.path.dirname(os.path.realpath(__file__))
MC_DIR = os.path.join(GEOSCORER_DIR, "../../../")
A = Axes3D # to make flake happy :(
def draw_color_hash(schematic, vis, title="", threshold=0.05, win=None, bins=3):
# schematic is DxHxW, each entry an index into hash bin
clrs = []
schematic = schematic.cpu()
X = torch.nonzero(schematic)
clrs = np.zeros((X.shape[0], 3))
for i in range(X.shape[0]):
r = schematic[X[i][0], X[i][1], X[i][2]]
r = r - 1
clrs[i][2] = r % bins
r = r - clrs[i][2]
clrs[i][1] = r / bins % bins
r = r - clrs[i][1] * bins
clrs[i][0] = r / bins ** 2
clrs = (256 * clrs / bins).astype("int64")
w = vis.scatter(
X=X.numpy(),
win=win,
opts={
"markercolor": clrs,
"markersymbol": "square",
"markersize": 15,
"title": title,
"camera": dict(eye=dict(x=2, y=0.1, z=2)),
},
)
vis._send({"win": w, "camera": dict(eye=dict(x=2, y=0.1, z=2))})
return w
def draw_rgb(schematic, vis, title="", threshold=0.05, win=None, colorio=2):
clrs = []
schematic = schematic.cpu()
szs = schematic.shape
X = torch.nonzero(schematic[:3, :, :, :].norm(2, 0) > threshold)
U = schematic.view(szs[0], -1).t()
X_lin = szs[2] * szs[3] * X[:, 0] + szs[3] * X[:, 1] + X[:, 2]
clrs = U[X_lin]
clrs = torch.clamp(clrs, 0, 1)
if clrs.shape[1] == 1:
clrs = clrs.repeat(1, 3)
clrs = clrs / 2
colors = (256 * clrs[:, 0:3]).long().numpy()
w = vis.scatter(
X=X,
win=win,
opts={
"markercolor": colors,
"markersymbol": "square",
"markersize": 15,
"title": title,
"camera": dict(eye=dict(x=2, y=0.1, z=2)),
},
)
vis._send({"win": w, "camera": dict(eye=dict(x=2, y=0.1, z=2))})
return w
def cuboid_data(pos, size=(1, 1, 1)):
# code taken from
# https://stackoverflow.com/a/35978146/4124317
# suppose axis direction: x: to left; y: to inside; z: to upper
# get the (left, outside, bottom) point
o = [a - b / 2 for a, b in zip(pos, size)]
# get the length, width, and height
l, w, h = size
x = [
[o[0], o[0] + l, o[0] + l, o[0], o[0]],
[o[0], o[0] + l, o[0] + l, o[0], o[0]],
[o[0], o[0] + l, o[0] + l, o[0], o[0]],
[o[0], o[0] + l, o[0] + l, o[0], o[0]],
]
y = [
[o[1], o[1], o[1] + w, o[1] + w, o[1]],
[o[1], o[1], o[1] + w, o[1] + w, o[1]],
[o[1], o[1], o[1], o[1], o[1]],
[o[1] + w, o[1] + w, o[1] + w, o[1] + w, o[1] + w],
]
z = [
[o[2], o[2], o[2], o[2], o[2]],
[o[2] + h, o[2] + h, o[2] + h, o[2] + h, o[2] + h],
[o[2], o[2], o[2] + h, o[2] + h, o[2]],
[o[2], o[2], o[2] + h, o[2] + h, o[2]],
]
return np.array(x), np.array(y), np.array(z)
def plotCubeAt(pos=(0, 0, 0), color=(0, 1, 0, 1), ax=None):
# Plotting a cube element at position pos
if ax is not None:
X, Y, Z = cuboid_data(pos)
ax.plot_surface(X, Y, Z, color=color, rstride=1, cstride=1, alpha=1)
class SchematicPlotter:
def __init__(self, viz):
self.viz = viz
ims = pickle.load(
open(os.path.join(MC_DIR, "minecraft_specs/block_images/block_data"), "rb")
)
colors = []
alpha = []
self.bid_to_index = {}
self.index_to_color = {}
self.bid_to_color = {}
count = 0
for b, I in ims["bid_to_image"].items():
I = I.reshape(1024, 4)
if all(I[:, 3] < 0.2):
colors = (0, 0, 0)
else:
colors = I[I[:, 3] > 0.2, :3].mean(axis=0) / 256.0
alpha = I[:, 3].mean() / 256.0
self.bid_to_color[b] = (colors[0], colors[1], colors[2], alpha)
self.bid_to_index[b] = count
self.index_to_color[count] = (colors[0], colors[1], colors[2], alpha)
count = count + 1
def drawMatplot(self, schematic, n=1, title=""):
fig = plt.figure()
ax = fig.gca(projection="3d")
ax.set_aspect("equal")
if type(schematic) is np.ndarray:
for i in range(schematic.shape[0]):
for j in range(schematic.shape[1]):
for k in range(schematic.shape[2]):
if schematic[i, j, k, 0] > 0:
c = self.bid_to_color.get(tuple(schematic[i, j, k, :]))
if c:
plotCubeAt(pos=(i, k, j), color=c, ax=ax) # x, z, y
else:
for b in schematic:
if b[1][0] > 0:
c = self.bid_to_color.get(b[1])
if c:
plotCubeAt(pos=(b[0][0], b[0][2], b[0][1]), color=c, ax=ax) # x, z, y
plt.title(title)
visrotate(n, ax, self.viz)
return fig, ax
def drawGeoscorerPlotly(self, schematic):
x = []
y = []
z = []
id = []
if type(schematic) is torch.Tensor:
sizes = list(schematic.size())
for i in range(sizes[0]):
for j in range(sizes[1]):
for k in range(sizes[2]):
if schematic[i, j, k] > 0:
x.append(i)
y.append(j)
z.append(k)
id.append(schematic[i, j, k].item())
elif type(schematic) is np.ndarray:
for i in range(schematic.shape[0]):
for j in range(schematic.shape[1]):
for k in range(schematic.shape[2]):
if schematic[i, j, k, 0] > 0:
c = self.bid_to_color.get(tuple(schematic[i, j, k, :]))
if c:
x.append(i)
y.append(j)
z.append(k)
id.append(i + j + k)
else:
for b in schematic:
if b[1][0] > 0:
c = self.bid_to_color.get(b[1])
if c:
x.append(b[0][0])
y.append(b[0][2])
z.append(b[0][1])
id.append(i + j + k)
trace1 = go.Scatter3d(
x=np.asarray(x).transpose(),
y=np.asarray(y).transpose(),
z=np.asarray(z).transpose(),
mode="markers",
marker=dict(
size=5,
symbol="square",
color=id,
colorscale="Viridis",
line=dict(color="rgba(217, 217, 217, 1.0)", width=0),
opacity=1.0,
),
)
data = [trace1]
layout = go.Layout(margin=dict(l=0, r=0, b=0, t=0))
fig = go.Figure(data=data, layout=layout)
self.viz.plotlyplot(fig)
return fig
def drawPlotly(self, schematic, title="", ptype="scatter"):
x = []
y = []
z = []
id = []
clrs = []
if type(schematic) is torch.Tensor:
sizes = list(schematic.size())
for i in range(sizes[0]):
for j in range(sizes[1]):
for k in range(sizes[2]):
if schematic[i, j, k] > 0:
x.append(i)
y.append(j)
z.append(k)
id.append(schematic[i, j, k].item())
elif type(schematic) is np.ndarray:
for i in range(schematic.shape[0]):
for j in range(schematic.shape[1]):
for k in range(schematic.shape[2]):
if schematic[i, j, k, 0] > 0:
c = self.bid_to_color.get(tuple(schematic[i, j, k, :]))
if c:
x.append(i)
y.append(j)
z.append(k)
id.append(i + j + k)
clrs.append(c)
else:
for b in schematic:
if b[1][0] > 0:
c = self.bid_to_color.get(b[1])
if c:
x.append(b[0][0])
y.append(b[0][2])
z.append(b[0][1])
id.append(i + j + k)
clrs.append(c)
# clrs.append(self.bid_to_index[b[1]])
if ptype == "scatter":
X = torch.Tensor([x, y, z]).t()
if len(clrs) == 0:
raise Exception("all 0 input?")
colors = (256 * torch.Tensor(clrs)[:, 0:3]).long().numpy()
w = self.viz.scatter(
X=X,
opts={
"markercolor": colors,
"markersymbol": "square",
"markersize": 15,
"title": title,
"camera": dict(eye=dict(x=2, y=0.1, z=2)),
},
)
# layout = go.Layout(camera =dict(eye=dict(x=2, y=.1, z=2)))
self.viz._send({"win": w, "camera": dict(eye=dict(x=2, y=0.1, z=2))})
return w
else:
maxid = max(clrs)
clr_set = set(clrs)
cmap = [
[
c / maxid,
"rgb({},{},{})".format(
self.index_to_color[c][0],
self.index_to_color[c][1],
self.index_to_color[c][0],
),
]
for c in clr_set
]
trace1 = go.Volume(
x=np.asarray(x).transpose(),
y=np.asarray(y).transpose(),
z=np.asarray(z).transpose(),
value=np.asarray(clrs).transpose(),
isomin=0.1,
isomax=0.8,
colorscale=cmap,
opacity=0.1, # needs to be small to see through all surfaces
surface_count=21, # needs to be a large number for good volume rendering
)
data = [trace1]
layout = go.Layout(margin=dict(l=0, r=0, b=0, t=0))
fig = go.Figure(data=data, layout=layout)
self.viz.plotlyplot(fig)
return fig
def visrotate(n, ax, viz):
for angle in range(45, 405, 360 // n):
ax.view_init(30, angle)
plt.draw()
viz.matplot(plt)
if __name__ == "__main__":
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset",
type=str,
default="shapes",
help="which\
dataset to visualize (shapes|segments)",
)
opts = parser.parse_args()
CRAFTASSIST_DIR = os.path.join(GEOSCORER_DIR, "../")
sys.path.append(CRAFTASSIST_DIR)
vis = visdom.Visdom(server="http://localhost")
sp = SchematicPlotter(vis)
# fig, ax = sp.drawMatplot(schematic, 4, "yo")
if opts.dataset == "shapes":
import shape_dataset as sdata
num_examples = 3
num_neg = 3
dataset = sdata.SegmentCenterShapeData(
nexamples=num_examples, for_vis=True, useid=True, shift_max=10, nneg=num_neg
)
for n in range(num_examples):
curr_data = dataset[n]
sp.drawPlotly(curr_data[0])
for i in range(num_neg):
sp.drawPlotly(curr_data[i + 1])
elif opts.dataset == "segments":
import inst_seg_dataset as idata
num_examples = 1
num_neg = 1
dataset = idata.SegmentCenterInstanceData(
nexamples=num_examples, shift_max=10, nneg=num_neg
)
for n in range(num_examples):
curr_data = dataset[n]
sp.drawPlotly(curr_data[0])
for i in range(num_neg):
sp.drawPlotly(curr_data[i + 1])
else:
raise Exception("Unknown dataset: {}".format(opts.dataset))
"""
oldc = clrs[0]
clrs[0] = 0
maxid = max(clrs)
clr_set = set(clrs)
cmap = [[c/maxid, "rgb({},{},{})".format(self.index_to_color[c][0],
self.index_to_color[c][1],
self.index_to_color[c][0])]
for c in clr_set]
# clrs[0] = oldc
trace1 = go.Scatter3d(
x=np.asarray(x).transpose(),
y=np.asarray(y).transpose(),
z=np.asarray(z).transpose(),
mode="markers",
marker=dict(
size=15,
symbol="square",
color=clrs,
# color=id,
colorscale=cmap,
# colorscale="Viridis",
line=dict(color="rgba(217, 217, 217, 1.0)", width=0),
opacity=1.0,
),
)
"""
| craftassist-master | python/craftassist/voxel_models/plot_voxels.py |
import pickle
import argparse
import glob
import os
import numpy as np
from typing import List, Dict, Set, Tuple
from pathlib import Path
from copy import deepcopy
from tqdm import tqdm
def open_house_schematic(house_directory: Path) -> np.ndarray:
with open(Path(house_directory) / "schematic.npy", "rb") as file:
return np.load(file)
def get_unique_pairs(house_dir: Path) -> Set:
try:
pairs = set()
schematic = open_house_schematic(house_dir)
# House schematic is in yzx format
# (instance schematics are in xyz).
for y in range(schematic.shape[0]):
for z in range(schematic.shape[1]):
for x in range(schematic.shape[2]):
pair = (int(schematic[y, z, x, 0]), int(schematic[y, z, x, 1]))
pairs.add(pair)
return pairs
except FileNotFoundError:
print(f"schematic not found at {house_dir}")
return set()
def make_id_vocabulary(houses_data_path: Path) -> Dict[Tuple[int, int], int]:
all_houses = glob.glob(str(houses_data_path / "houses" / "*"))
all_sets = map(get_unique_pairs, tqdm(all_houses))
block_meta_pairs: Set[Tuple[int, int]] = set()
for s in all_sets:
block_meta_pairs = block_meta_pairs.union(s)
vocabulary = {pair: i for i, pair in enumerate(block_meta_pairs)}
return vocabulary
def make_new_item(house_data_path: Path, item: List, vocabulary) -> List:
instance_schematic: np.ndarray = item[0]
house_name = item[-1]
house_schematic = open_house_schematic(house_data_path / "houses" / house_name)
assert house_schematic.shape[0] == instance_schematic.shape[1]
assert house_schematic.shape[1] == instance_schematic.shape[2]
assert house_schematic.shape[2] == instance_schematic.shape[0]
new_schematic = instance_schematic.astype(np.int16)
for x in range(instance_schematic.shape[0]):
for y in range(instance_schematic.shape[1]):
for z in range(instance_schematic.shape[2]):
pair = (int(house_schematic[y, z, x, 0]), int(house_schematic[y, z, x, 1]))
new_schematic[x, y, z] = vocabulary[pair]
new_item = list(deepcopy(item))
new_item[0] = new_schematic
return tuple(new_item)
def create_new_seg_ds(house_data_path: Path, segmentation_data: List, vocabulary: Dict[Tuple[int, int], int]) -> List:
new_seg_data = []
for item in segmentation_data:
new_item = make_new_item(house_data_path, item, vocabulary)
new_seg_data.append(new_item)
return new_seg_data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--house-data-dir", "--house", type=str, required=True)
parser.add_argument("--segmentation-data-dir", "--seg", type=str, required=True)
parser.add_argument("--out-dir", "--out", type=str, required=True)
parser.add_argument("--vocabulary-in", "--vin", type=str, required=False)
parser.add_argument("--vocabulary-out", "--vout", type=str, required=False)
args = parser.parse_args()
assert args.vocabulary_in is not None or args.vocabulary_out is not None, "Must specify vin or vout"
house_data_dir = Path(args.house_data_dir)
segmentation_data_dir = Path(args.segmentation_data_dir)
out_dir = Path(args.out_dir)
os.makedirs(out_dir, exist_ok=True)
vocab_in = args.vocabulary_in
if vocab_in:
with open(vocab_in, "rb") as file:
vocabulary = pickle.load(file)
else:
vocabulary = make_id_vocabulary(house_data_dir)
with open(args.vocabulary_out, "wb") as file:
pickle.dump(vocabulary, file)
for ds_name in ["training_data.pkl", "validation_data.pkl"]:
in_path = segmentation_data_dir / ds_name
out_path = out_dir / ds_name
with open(in_path, "rb") as file:
seg_data = pickle.load(file)
new_ds = create_new_seg_ds(house_data_dir, seg_data, vocabulary)
with open(out_path, "wb") as file:
pickle.dump(new_ds, file)
| craftassist-master | python/craftassist/voxel_models/make_seg_ds.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import os
import argparse
import sys
from data_loaders import InstSegData
import torch
import torch.nn as nn
import torch.optim as optim
import instseg_models as models
##################################################
# for debugging
##################################################
def print_slices(model, H, r, c, n, data):
x, y = data[n]
x = x.unsqueeze(0).cuda()
yhat = model(x).squeeze()
print(x[0, c - r : c + r, H, c - r : c + r].cpu())
print(y[c - r : c + r, H, c - r : c + r])
_, mm = yhat.max(0)
print(mm[c - r : c + r, H, c - r : c + r].cpu())
def blocks_from_data(data, n):
x, y = data[n]
ids = x.nonzero()
idl = ids.tolist()
blocks = [((b[0], b[1], b[2]), (x[b[0], b[1], b[2]].item() + 1, 0)) for b in idl]
return x, y, blocks
def watcher_output(S, n, data):
x, y, blocks = blocks_from_data(data, n)
class_stats = {}
for i in range(29):
class_stats[train_data.classes["idx2name"][i]] = len((y == i).nonzero())
# print(train_data.classes['idx2name'][i], len((y==i).nonzero()))
a = S._watch_single_object(blocks)
return class_stats, a
##################################################
# training loop
##################################################
def validate(model, validation_data):
pass
def train_epoch(model, DL, loss, optimizer, args):
model.train()
losses = []
for b in DL:
x = b[0]
s = b[1].unsqueeze(1).float()
y = b[2].float()
masks = b[3].float()
if args.cuda:
x = x.cuda()
s = s.cuda()
y = y.cuda()
masks = masks.cuda()
model.train()
yhat = model(x, s)
# loss is expected to not reduce
preloss = loss(yhat, y)
u = torch.zeros_like(masks).uniform_(0, 1)
idx = u.view(-1).gt((1 - args.sample_empty_prob)).nonzero().squeeze()
masks.view(-1)[idx] = 1
preloss *= masks
l = preloss.sum() / masks.sum()
losses.append(l.item())
l.backward()
optimizer.step()
return losses
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--debug", type=int, default=-1, help="no shuffle, keep only debug num examples"
)
parser.add_argument("--num_labels", type=int, default=50, help="How many top labels to use")
parser.add_argument("--num_epochs", type=int, default=50, help="training epochs")
parser.add_argument("--num_scales", type=int, default=3, help="if 0 use flat ")
parser.add_argument("--augment", default="none", help="none or maxshift:K_underdirt:J")
parser.add_argument("--cuda", action="store_true", help="use cuda")
parser.add_argument("--gpu_id", type=int, default=0, help="which gpu to use")
parser.add_argument("--batchsize", type=int, default=32, help="batch size")
parser.add_argument("--data_dir", default="/checkpoint/aszlam/minecraft/segmentation_data/")
parser.add_argument("--save_model", default="", help="where to save model (nowhere if blank)")
parser.add_argument(
"--load_model", default="", help="from where to load model (nowhere if blank)"
)
parser.add_argument("--save_logs", default="/dev/null", help="where to save logs")
parser.add_argument(
"--hidden_dim", type=int, default=128, help="size of hidden dim in fc layer"
)
parser.add_argument("--embedding_dim", type=int, default=4, help="size of blockid embedding")
parser.add_argument("--lr", type=float, default=0.01, help="step size for net")
parser.add_argument(
"--sample_empty_prob",
type=float,
default=0.01,
help="prob of taking gradients on empty locations",
)
parser.add_argument("--mom", type=float, default=0.0, help="momentum")
parser.add_argument("--ndonkeys", type=int, default=4, help="workers in dataloader")
args = parser.parse_args()
this_dir = os.path.dirname(os.path.realpath(__file__))
parent_dir = os.path.join(this_dir, "../")
sys.path.append(parent_dir)
print("loading train data")
aug = {}
if args.augment != "none":
a = args.augment.split("_")
aug = {t.split(":")[0]: int(t.split(":")[1]) for t in a}
aug["flip_rotate"] = True
if args.debug > 0 and len(aug) > 0:
print("warning debug and augmentation together?")
train_data = InstSegData(
args.data_dir + "training_data.pkl", nexamples=args.debug, augment=aug
)
shuffle = True
if args.debug > 0:
shuffle = False
print("making dataloader")
rDL = torch.utils.data.DataLoader(
train_data,
batch_size=args.batchsize,
shuffle=shuffle,
pin_memory=True,
drop_last=True,
num_workers=args.ndonkeys,
)
print("making model")
args.load = False
if args.load_model != "":
args.load = True
if args.num_scales == 0:
model = models.FlatInstSegNet(args)
else:
model = models.MsInstSegNet(args)
bce = nn.BCEWithLogitsLoss(reduction="none")
if args.cuda:
model.cuda()
bce.cuda()
optimizer = optim.Adagrad(model.parameters(), lr=args.lr)
# optimizer = optim.Adam(model.parameters(), lr=args.lr)
print("training")
for m in range(args.num_epochs):
losses = train_epoch(model, rDL, bce, optimizer, args)
print(" \nEpoch {} loss: {}".format(m, sum(losses) / len(losses)))
if args.save_model != "":
model.save(args.save_model)
| craftassist-master | python/craftassist/voxel_models/instance_segmentation/train_instance_segmentation.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import pickle
import numpy as np
import torch
from torch.utils import data as tds
import random
def get_rectanguloid_mask(y, fat=1):
M = y.nonzero().max(0)[0].tolist()
m = y.nonzero().min(0)[0].tolist()
M = [min(M[i] + fat, y.shape[i] - 1) for i in range(3)]
m = [max(v - fat, 0) for v in m]
mask = torch.zeros_like(y)
mask[m[0] : M[0], m[1] : M[1], m[2] : M[2]] = 1
return mask
def underdirt(schematic, labels=None, max_shift=0, nothing_id=0):
# todo fancier dirt!
# FIXME!!!! label as ground where appropriate
shift = torch.randint(max_shift + 1, (1,)).item()
if shift > 0:
new_schematic = torch.LongTensor(schematic.size())
new_schematic[:, shift:, :] = schematic[:, :-shift, :]
new_schematic[:, :shift, :] = 3
new_labels = None
if labels is not None:
new_labels = torch.LongTensor(labels.size())
new_labels[:, shift:, :] = labels[:, :-shift, :]
new_labels[:, :shift, :] = nothing_id
return new_schematic, new_labels
else:
return schematic, labels
def flip_rotate(c, l=None, idx=None):
"""
Randomly transform the cube for more data.
The transformation is chosen from:
0. original
1. x-z plane rotation 90
2. x-z plane rotation 180
3. x-z plane rotation 270
4. x-axis flip
5. z-axis flip
"""
idx = np.random.choice(range(6)) if (idx is None) else idx
l_ = l
if idx == 0:
c_ = c
l_ = l
elif idx >= 1 and idx <= 3: # rotate
npc = c.numpy()
npc = np.rot90(npc, idx, axes=(0, 2)) # rotate on the x-z plane
c_ = torch.from_numpy(npc.copy())
if l is not None:
npl = l.numpy()
npl = np.rot90(npl, idx, axes=(0, 2)) # rotate on the x-z plane
l_ = torch.from_numpy(npl.copy())
else: # flip
npc = c.numpy()
npc = np.flip(npc, axis=(idx - 4) * 2) # 0 or 2
c_ = torch.from_numpy(npc.copy())
if l is not None:
npl = l.numpy()
npl = np.flip(npl, axis=(idx - 4) * 2) # 0 or 2
l_ = torch.from_numpy(npl.copy())
return c_, l_, idx
def pad_to_sidelength(schematic, labels=None, nothing_id=0, sidelength=32):
szs = list(schematic.size())
szs = np.add(szs, -sidelength)
pad = []
# this is all backwards bc pytorch pad semantics :(
for s in szs:
if s >= 0:
pad.append(0)
else:
pad.append(-s)
pad.append(0)
schematic = torch.nn.functional.pad(schematic, pad[::-1])
if labels is not None:
labels = torch.nn.functional.pad(labels, pad[::-1], value=nothing_id)
return schematic, labels
# TODO cut outliers
# FIXME this should be better
def fit_in_sidelength(
schematic, center_on_labels=False, labels=None, nothing_id=0, sl=32, max_shift=0
):
schematic, labels = pad_to_sidelength(
schematic, labels=labels, nothing_id=nothing_id, sidelength=sl
)
if center_on_labels:
nz = labels.nonzero()
else:
nz = schematic.nonzero()
m, _ = nz.median(0)
min_y, _ = nz.min(0)
min_y = min_y[1]
xshift = max(torch.randint(-max_shift, max_shift + 1, (1,)).item() - m[0].item() + sl // 2, 0)
zshift = max(torch.randint(-max_shift, max_shift + 1, (1,)).item() - m[2].item() + sl // 2, 0)
new_schematic = torch.LongTensor(sl, sl, sl).fill_(1)
new_schematic[xshift:, : sl - min_y, zshift:] = schematic[
: sl - xshift, min_y:sl, : sl - zshift
]
new_labels = None
if labels is not None:
new_labels = torch.LongTensor(sl, sl, sl).fill_(nothing_id)
new_labels[xshift:, : sl - min_y, zshift:] = labels[: sl - xshift, min_y:sl, : sl - zshift]
return new_schematic, new_labels, (xshift, -min_y, zshift)
def make_example_from_raw(
schematic, labels=None, center_on_labels=False, augment={}, nothing_id=0, sl=32
):
max_shift = augment.get("max_shift", 0)
s, l, o = fit_in_sidelength(
schematic,
labels=labels,
center_on_labels=center_on_labels,
nothing_id=nothing_id,
max_shift=max_shift,
)
if len(augment) > 0:
if augment.get("flip_rotate", False):
s, l, _ = flip_rotate(s, l=l)
m = augment.get("underdirt")
if m is not None:
# really should fix offset here.....TODO
s, l = underdirt(s, labels=l, max_shift=m, nothing_id=nothing_id)
s[s == 0] = 1
s -= 1
return s, l, o
class InstSegData(tds.Dataset):
def __init__(
self,
data_path,
nexamples=-1,
sidelength=32,
classes=None,
augment={},
min_inst_size=1,
mask_fat=1,
useid=True,
):
self.sidelength = sidelength
self.useid = useid
self.examples = []
self.inst_data = pickle.load(open(data_path, "rb"))
self.nexamples = nexamples
self.augment = augment
self.mask_fat = mask_fat
if self.nexamples < 0:
self.nexamples = len(self.inst_data)
else:
self.nexamples = min(len(self.inst_data), self.nexamples)
def __getitem__(self, index):
x = self.inst_data[index]
has_label = [i for i in range(len(x[2])) if x[2][i] != "none"]
i = random.choice(has_label)
labels = (x[1] == i).astype("uint8")
labels = torch.from_numpy(labels)
s, l, o = make_example_from_raw(
torch.from_numpy(x[0]),
labels=labels,
sl=self.sidelength,
augment=self.augment,
center_on_labels=True,
)
seed = random.choice(l.nonzero().tolist())
seed_oh = l.clone().zero_()
seed_oh[seed[0], seed[1], seed[2]] = 1
mask = get_rectanguloid_mask(l, fat=self.mask_fat)
return s, seed_oh, l, mask
def __len__(self):
return self.nexamples
# def drawme(s, islabel=False):
# ss = s.clone()
# if not islabel:
# ss += 1
# ss[ss == 1] = 0
# else:
# # fixme (4), also need to swap
# ss[ss == 4] = 0
# fig, ax = sp.draw((torch.stack([ss, ss.clone().zero_()], 3)).numpy(), 4, "yo")
if __name__ == "__main__":
# import sys
# import visdom
# sys.path.append("/private/home/aszlam/fairinternal/minecraft/python/craftassist/geoscorer/")
# import plot_voxels
S = InstSegData("/checkpoint/aszlam/minecraft/segmentation_data/training_data.pkl")
# viz = visdom.Visdom(server="http://localhost")
# sp = plot_voxels.SchematicPlotter(viz)
# def plot(i):
# h = S[i]
# z = torch.zeros(h[0].size()).long()
# schematic = torch.stack([h[0], z], 3)
# fig, ax = sp.draw(schematic.numpy(), 4, "yo")
# return fig, ax, h
| craftassist-master | python/craftassist/voxel_models/instance_segmentation/data_loaders.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
import torch
import torch.nn as nn
from data_loaders import make_example_from_raw
def conv3x3x3(in_planes, out_planes, stride=1, bias=True):
"""3x3x3 convolution with padding"""
return nn.Conv3d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=bias)
def conv3x3x3up(in_planes, out_planes, bias=True):
"""3x3x3 convolution with padding"""
return nn.ConvTranspose3d(
in_planes, out_planes, stride=2, kernel_size=3, padding=1, output_padding=1
)
def convbn(in_planes, out_planes, stride=1, bias=True):
return nn.Sequential(
(conv3x3x3(in_planes, out_planes, stride=stride, bias=bias)),
nn.BatchNorm3d(out_planes),
nn.ReLU(inplace=True),
)
def convbnup(in_planes, out_planes, bias=True):
return nn.Sequential(
(conv3x3x3up(in_planes, out_planes, bias=bias)),
nn.BatchNorm3d(out_planes),
nn.ReLU(inplace=True),
)
class InstSegNet(nn.Module):
def __init__(self, opts):
super(InstSegNet, self).__init__()
if opts.load:
if opts.load_model != "":
self.load(opts.load_model)
else:
raise ("loading from file specified but no load_filepath specified")
else:
self.opts = opts
self._build()
def forward(self, x):
raise NotImplementedError
def save(self, filepath):
self.cpu()
sds = {}
sds["opts"] = self.opts
sds["state_dict"] = self.state_dict()
torch.save(sds, filepath)
if self.opts.cuda:
self.cuda()
def load(self, filepath):
sds = torch.load(filepath)
self.opts = sds["opts"]
print("loading from file, using opts")
print(self.opts)
self._build()
self.load_state_dict(sds["state_dict"])
self.zero_grad()
class FlatInstSegNet(InstSegNet):
def __init__(self, opts):
super(FlatInstSegNet, self).__init__(opts)
def _build(self):
opts = self.opts
embedding_dim = getattr(opts, "embedding_dim", 8)
num_words = getattr(opts, "num_words", 255)
num_layers = getattr(opts, "num_layers", 4)
hidden_dim = getattr(opts, "hidden_dim", 64)
self.embedding_dim = embedding_dim
self.embedding = nn.Embedding(num_words, embedding_dim)
self.layers = nn.ModuleList()
self.num_layers = num_layers
self.layers.append(
nn.Sequential(
nn.Conv3d(embedding_dim + 1, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm3d(hidden_dim),
nn.ReLU(inplace=True),
)
)
for i in range(num_layers - 1):
self.layers.append(
nn.Sequential(
nn.Conv3d(hidden_dim + 1, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm3d(hidden_dim),
nn.ReLU(inplace=True),
)
)
self.out = nn.Conv3d(hidden_dim, 1, kernel_size=5, padding=2)
def forward(self, x, seed_oh):
# FIXME when pytorch is ready for this, embedding
# backwards is soooooo slow
# z = self.embedding(x)
szs = list(x.size())
x = x.view(-1)
z = self.embedding.weight.index_select(0, x)
szs.append(self.embedding_dim)
z = z.view(torch.Size(szs))
z = z.permute(0, 4, 1, 2, 3).contiguous()
for i in range(self.num_layers):
z = torch.cat((z, seed_oh), 1)
z = self.layers[i](z)
return self.out(z).squeeze()
# num_scales = 3:
# o --> o
# ^ |
# | v
# o --> o --> o
# ^ |
# | v
# o --> o --> o --> o
# ^ |
# | v
# o --> o --> o --> o --> o --> o --> o --> o --> o
# * * * * *
#
class MsInstSegNet(InstSegNet):
def __init__(self, opts):
super(MsInstSegNet, self).__init__(opts)
def _build(self):
opts = self.opts
embedding_dim = getattr(opts, "embedding_dim", 8)
num_words = getattr(opts, "num_words", 255)
num_layers_per_scale = getattr(opts, "num_layers_per_scale", 1)
hidden_dim = getattr(opts, "hidden_dim", 64)
num_scales = getattr(opts, "num_scales", 3)
num_cleanup = getattr(opts, "num_cleanup", 3)
self.embedding_dim = embedding_dim
self.embedding = nn.Embedding(num_words, embedding_dim)
self.start = convbn(embedding_dim + 1, hidden_dim)
self.scales = nn.ModuleList()
self.downsamplers = nn.ModuleList()
self.upsamplers = nn.ModuleList()
self.cleanup = nn.ModuleList()
for i in range(num_scales):
scale = nn.ModuleList()
if i != 0:
self.downsamplers.append(convbn(hidden_dim, hidden_dim, stride=2))
self.upsamplers.append(convbnup(hidden_dim, hidden_dim))
for j in range(num_layers_per_scale * (num_scales - i)):
d = hidden_dim
e = d
if i == 0:
e = e + 1 # keep the seed around
scale.append(convbn(e, d))
self.scales.append(scale)
for i in range(num_cleanup):
self.cleanup.append(convbn(hidden_dim, hidden_dim))
self.out = nn.Conv3d(hidden_dim, 1, kernel_size=5, padding=2)
def forward(self, x, seed_oh):
# FIXME when pytorch is ready for this, embedding
# backwards is soooooo slow
# z = self.embedding(x)
nscales = len(self.scales)
szs = list(x.size())
x = x.view(-1)
z = self.embedding.weight.index_select(0, x)
szs.append(self.embedding_dim)
z = z.view(torch.Size(szs))
z = z.permute(0, 4, 1, 2, 3).contiguous()
z = self.start(torch.cat((z, seed_oh), 1))
scales = []
v = 0 # flake...
for i in range(nscales):
if i > 0:
u = self.downsamplers[i - 1](v)
else:
u = z
for j in range(len(self.scales[i])):
m = self.scales[i][j]
if i == 0:
u = torch.cat((u, seed_oh), 1)
u = m(u)
if j == 0:
v = u.clone()
scales.append(u)
for i in range(nscales - 2, -1, -1):
scales[i] = scales[i] + self.upsamplers[i](scales[i + 1])
z = scales[0]
for m in self.cleanup:
z = m(z)
return self.out(z).squeeze()
class Opt:
pass
############################NOT DONE!!!!!
class InstSegWrapper:
def __init__(self, model, threshold=-1.0, blocks_only=True, cuda=False):
if type(model) is str:
opts = Opt()
opts.load = True
opts.load_model = model
model = InstSegNet(opts)
self.model = model
self.cuda = cuda
if self.cuda:
model.cuda()
else:
model.cpu()
@torch.no_grad()
def segment_object(self, blocks):
self.model.eval()
blocks = torch.from_numpy(blocks)[:, :, :, 0]
blocks, _, o = make_example_from_raw(blocks)
blocks = blocks.unsqueeze(0)
if self.cuda:
blocks = blocks.cuda()
y = self.model(blocks)
_, mids = y.squeeze().max(0)
locs = mids.nonzero()
locs = locs.tolist()
if self.blocks_only:
return {
tuple(np.subtract(l, o)): mids[l[0], l[1], l[2]].item()
for l in locs
if blocks[0, l[0], l[1], l[2]] > 0
}
else:
return {tuple(ll for ll in l): mids[l[0], l[1], l[2]].item() for l in locs}
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--hidden_dim", type=int, default=128, help="size of hidden dim in fc layer"
)
parser.add_argument("--embedding_dim", type=int, default=16, help="size of blockid embedding")
parser.add_argument("--num_words", type=int, default=256, help="number of blocks")
parser.add_argument("--num_classes", type=int, default=20, help="number of blocks")
args = parser.parse_args()
args.load = False
N = MsInstSegNet(args)
| craftassist-master | python/craftassist/voxel_models/instance_segmentation/instseg_models.py |
import random
import sys
import argparse
sys.path.append("/private/home/rebeccaqian/minecraft/python/craftassist/")
import minecraft_specs
from shape_helpers import SHAPE_NAMES
ID_DELIM = "^"
BLOCK_NAMES = [v for k, v in minecraft_specs.get_block_data()["bid_to_name"].items() if k[0] < 256]
COLOR_NAMES = [
"aqua",
"black",
"blue",
"fuchsia",
"green",
"gray",
"lime",
"maroon",
"navy",
"olive",
"purple",
"red",
"silver",
"teal",
"white",
"yellow",
"orange",
"brown",
"sienna",
"pink",
"light yellow",
"dark yellow",
"dark yellow",
"gold",
"gold",
]
# COLOR_DATA = minecraft_specs.get_colour_data()
def build_lf(ref_obj_dict, modify_dict):
action_dict = {"action_type": "MODIFY", "modify_dict": modify_dict}
if ref_obj_dict is not None:
action_dict["reference_object"] = ref_obj_dict
y = {"dialogue_type": "HUMAN_GIVE_COMMAND", "action_sequence": [action_dict]}
return y
def replace_with_span(d, split_text):
if type(d) is dict:
for k, v in d.items():
if type(v) is str:
vsplit = v.split()
identifier = vsplit[0].split(ID_DELIM)
if len(identifier) > 1:
identifier = identifier[1]
has_id = [i for i, word in enumerate(split_text) if identifier in word]
span = [0, [min(has_id), max(has_id)]]
d[k] = {"span": span}
for i in has_id:
split_text[i] = split_text[i].strip(ID_DELIM + identifier)
else:
replace_with_span(v, split_text)
else:
return
def get_target_object():
shape_name = random.choice(SHAPE_NAMES).lower()
shape_name_split = shape_name.split("_")
rid = str(random.random())
object_text = " ".join([v + ID_DELIM + rid for v in shape_name_split])
ref_obj = {"filters": {"has_name": object_text}}
ref_obj_text = "the " + object_text
return ref_obj, ref_obj_text
def get_target_location():
loc_dict = {"location": {"location_type": "SPEAKER_LOOK"}}
loc_text = "there"
return loc_dict, loc_text
def get_block():
rid = str(random.random())
if random.random() < 0.5:
csplit = random.choice(COLOR_NAMES).split()
colour = " ".join([w + ID_DELIM + rid for w in csplit])
block_dict = {"has_colour": colour}
block_text = colour + " blocks"
else:
bsplit = random.choice(BLOCK_NAMES).split()
blockname = " ".join([w + ID_DELIM + rid for w in bsplit])
block_dict = {"has_name": blockname}
block_text = blockname
return block_dict, block_text
# THICKEN/SCALE/RIGIDMOTION/REPLACE/FILL
class ModifyTemplates:
def __init__(self):
pass
def generate(self):
pass
class ThickenTemplates(ModifyTemplates):
def __init__(self, opts):
pass
def generate(self):
ref_obj, ref_obj_text = get_target_object()
modify_text = "make " + ref_obj_text
if random.random() > 0.5:
modify_text += " thicker"
modify_dict = {"modify_type": "THICKER"}
else:
modify_text += " thinner"
modify_dict = {"modify_type": "THINNER"}
return modify_text, modify_dict, ref_obj_text, ref_obj
class ScaleTemplates(ModifyTemplates):
def __init__(self, opts):
self.not_makephrase = 0.5
def generate(self):
ref_obj, ref_obj_text = get_target_object()
s = random.choice(
["WIDER", "NARROWER", "TALLER", "SHORTER", "SKINNIER", "FATTER", "BIGGER", "SMALLER"]
)
modify_dict = {"modify_type": "SCALE", "categorical_scale_factor": s}
modify_text = "make " + ref_obj_text + " " + s.lower()
if random.random() < self.not_makephrase:
if s == "WIDER":
modify_text = "widen " + ref_obj_text
elif s == "NARROWER":
modify_text = "narrow " + ref_obj_text
elif s == "SHORTER":
modify_text = "shorten " + ref_obj_text
elif s == "FATTER":
modify_text = "fatten " + ref_obj_text
elif s == "BIGGER":
modify_text = (
random.choice(["upscale ", "grow ", "increase the size of "]) + ref_obj_text
)
elif s == "SMALLER":
modify_text = "shrink " + ref_obj_text
return modify_text, modify_dict, ref_obj_text, ref_obj
class RigidmotionTemplates(ModifyTemplates):
def __init__(self, opts):
self.opts = opts
def generate(self):
ref_obj, ref_obj_text = get_target_object()
modify_dict = {"modify_type": "RIGIDMOTION"}
if random.random() < self.opts.translate_prob:
loc_dict, loc_text = get_target_location()
modify_dict["location"] = loc_dict
modify_text = random.choice(["move ", "put "]) + ref_obj_text + " " + loc_text
else:
if random.random() < self.opts.flip_prob:
modify_dict["mirror"] = True
modify_text = random.choice(["flip ", "mirror "]) + ref_obj_text
else:
d = random.choice(["LEFT", "RIGHT", "AROUND"])
modify_dict["categorical_angle"] = d
modify_text = random.choice(["rotate ", "turn "]) + ref_obj_text + " " + d.lower()
return modify_text, modify_dict, ref_obj_text, ref_obj
class ReplaceTemplates(ModifyTemplates):
def __init__(self, opts):
self.opts = opts
def generate(self):
ref_obj, ref_obj_text = get_target_object()
modify_dict = {"modify_type": "REPLACE"}
new_block_dict, new_block_text = get_block()
t = random.choice(["make |", "replace with", "swap with", "change to"]).split()
if random.random() < self.opts.old_blocktype:
# TODO "all"
old_block_dict, old_block_text = get_block()
modify_text = (
t[0] + " the " + old_block_text + " " + t[1].strip("|") + " " + new_block_text
)
modify_dict["old_block"] = old_block_dict
if random.random() > 0.5:
modify_text += " in the " + ref_obj_text
else:
ref_obj = None
else:
# TODO geom *and* blocktype, every n
d = random.choice(["LEFT", "RIGHT", "TOP", "BOTTOM", "FRONT", "BACK"])
fraction = random.choice(["QUARTER", "HALF", ""])
if fraction == "":
modify_dict["replace_geometry"] = {"relative_direction": d.lower()}
modify_text = (
t[0]
+ " the "
+ d.lower()
+ " of "
+ ref_obj_text
+ " "
+ t[1].strip("|")
+ " "
+ new_block_text
)
else:
modify_dict["replace_geometry"] = {"relative_direction": d.lower()}
modify_text = (
t[0]
+ " the "
+ d.lower()
+ " "
+ fraction.lower()
+ " of "
+ ref_obj_text
+ " "
+ t[1].strip("|")
+ " "
+ new_block_text
)
modify_dict["new_block"] = new_block_dict
return modify_text, modify_dict, ref_obj_text, ref_obj
class FillTemplates(ModifyTemplates):
def __init__(self, opts):
pass
def generate(self):
ref_obj, ref_obj_text = get_target_object()
if random.random() > 0.5:
modify_text = "fill up the " + ref_obj_text
modify_dict = {"modify_type": "FILL"}
if random.random() > 0.5:
new_block_dict, new_block_text = get_block()
modify_dict["new_block"] = new_block_dict
modify_text += " with " + new_block_text
else:
modify_text = "hollow out the " + ref_obj_text
modify_dict = {"modify_type": "HOLLOW"}
return modify_text, modify_dict, ref_obj_text, ref_obj
class TemplateHolder:
def __init__(self, opts):
# TODO
# self.gen_weights = opts.gen_weights
self.templates = {
"thicken": ThickenTemplates(opts),
"scale": ScaleTemplates(opts),
"rigidmotion": RigidmotionTemplates(opts),
"replace": ReplaceTemplates(opts),
"fill": FillTemplates(opts),
}
def generate(self):
modify_text, modify_dict, ref_obj_text, ref_obj_dict = random.choice(
list(self.templates.values())
).generate()
split_modify_text = modify_text.split()
replace_with_span(modify_dict, split_modify_text)
if ref_obj_dict:
replace_with_span(ref_obj_dict, split_modify_text)
modify_text = " ".join(split_modify_text)
return modify_text, build_lf(ref_obj_dict, modify_dict)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--target_dir",
default="/checkpoint/rebeccaqian/datasets/modify_templates/",
type=str,
help="where to write modify data",
)
parser.add_argument(
"--translate_prob", default=0.25, type=int, help="where to write modify data"
)
parser.add_argument("--flip_prob", default=0.1, type=int, help="where to write modify data")
parser.add_argument(
"--old_blocktype", default=0.25, type=str, help="where to write modify data"
)
parser.add_argument("-N", default=100, type=int, help="number of samples to generate")
opts = parser.parse_args()
T = TemplateHolder(opts)
data = []
for i in range(opts.N):
data.append(T.generate())
f = open(opts.target_dir + "templated_modify.txt", "w")
for d in data:
cmd, action_dict = d
f.write("{}|{}\n".format(cmd, action_dict))
if __name__ == "__main__":
main()
| craftassist-master | python/craftassist/voxel_models/modify/st_templates.py |
if __name__ == "__main__":
import os
import sys
import torch
import argparse
import conv_models as models
from shape_transform_dataloader import ModifyData
from voxel_models.plot_voxels import SchematicPlotter, draw_rgb # noqa
import visdom
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument("--num_examples", type=int, default=1000000, help="num examples to encode")
parser.add_argument("--color_io", action="store_true", help="input uses color-alpha")
parser.add_argument(
"--max_meta", type=int, default=20, help="allow that many meta values when hashing idmeta"
)
parser.add_argument("--gpu_id", type=int, default=0, help="which gpu to use")
parser.add_argument("--data_dir", default="")
parser.add_argument("--model_filepath", default="", help="from where to load model")
parser.add_argument(
"--load_dictionary",
default="/private/home/aszlam/junk/word_modify_word_ids.pk",
help="where to get word dict",
)
parser.add_argument("--lr", type=float, default=0.01, help="step size for net")
parser.add_argument(
"--sbatch", action="store_true", help="cluster run mode, no visdom, formatted stdout"
)
parser.add_argument("--ndonkeys", type=int, default=8, help="workers in dataloader")
args = parser.parse_args()
if not args.sbatch:
vis = visdom.Visdom(server="http://localhost")
sp = SchematicPlotter(vis)
this_dir = os.path.dirname(os.path.realpath(__file__))
parent_dir = os.path.join(this_dir, "../")
sys.path.append(parent_dir)
print("loading train data")
################# FIXME!!!!
args.allow_same = False
args.debug = -1
args.words_length = 12
args.sidelength = 32
args.nexamples = args.num_examples + 10000
args.tform_weights = {
"thicker": 1.0,
"scale": 1.0,
"rotate": 1.0,
"replace_by_block": 1.0,
# 'replace_by_n': 1.0,
"replace_by_halfspace": 1.0,
"fill": 1.0,
}
train_data = ModifyData(args, dictionary=args.load_dictionary)
num_workers = args.ndonkeys
print("making dataloader")
rDL = torch.utils.data.DataLoader(
train_data,
batch_size=32,
shuffle=False,
pin_memory=True,
drop_last=True,
num_workers=args.ndonkeys,
)
print("making models")
args.num_words = train_data.padword + 1
args.word_padding_idx = train_data.padword
model = models.AE(args, args.model_filepath)
model.cuda()
model.eval()
X = None
it = iter(rDL)
with torch.no_grad():
for i in tqdm(range(args.num_examples // 32 + 1)):
b = it.next()
words = b[0]
x = b[1]
y = b[2]
x = x.cuda()
y = y.cuda()
z = model(x)
if X is None:
szs = model.hidden_state.shape
X = torch.zeros(args.num_examples, szs[1], szs[2], szs[3], szs[4])
Y = torch.zeros(args.num_examples, szs[1], szs[2], szs[3], szs[4])
all_words = torch.LongTensor(args.num_examples, 12)
c = min((i + 1) * 32, args.num_examples)
X[i * 32 : c] = model.hidden_state
z = model(y)
Y[i * 32 : c] = model.hidden_state
all_words[i * 32 : c] = words
| craftassist-master | python/craftassist/voxel_models/modify/encode_many.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import torch
import torch.nn as nn
import numpy as np
import os
import pickle
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
MC_DIR = os.path.join(THIS_DIR, "../../../../")
def model_filename_from_opts(opts, savedir=None, uid=None):
filler = "]["
name = "["
if opts.ae:
name += "ae"
name += filler
name = name + "hdim" + str(opts.hidden_dim) + filler
name = name + "edim" + str(opts.embedding_dim) + filler
# name = name + "lrp" + str(opts.lr_patience) + filler
name = name + "lrd" + str(opts.lr_decay) + filler
name = name + "res" + str(opts.residual_connection) + filler
name = name + "num_layers" + str(opts.num_layers) + filler
name = name + "color_io" + str(opts.color_io) + filler
name = name + "color_hash" + str(opts.color_hash) + filler
name = name + "sl" + str(opts.sidelength) + filler
name = name + "sigmoid" + str(opts.last_layer_sigmoid) + filler
name = name + "lr" + str(opts.lr) + filler
name = name + opts.optim_type
if uid is not None and uid != "":
name = name + filler + uid
name = name + "].pth"
if savedir is not None:
name = os.path.join(savedir, name)
return name
def get_colors():
ims = pickle.load(open(os.path.join(MC_DIR, "minecraft_specs/block_images/block_data"), "rb"))
colors = {}
for b, I in ims["bid_to_image"].items():
I = I.reshape(1024, 4)
c = np.zeros(4)
if not all(I[:, 3] < 0.2):
c[:3] = I[I[:, 3] > 0.2, :3].mean(axis=0) / 256.0
c[3] = I[:, 3].mean() / 256.0
colors[b] = c
return colors
def build_rgba_embed(max_meta, color_io=2):
if color_io > 1:
edim = 4
else:
edim = 1
embedding = nn.Embedding(256 * max_meta, edim)
embedding.weight.requires_grad = False
colors = get_colors()
for b, c in colors.items():
u = c
if color_io == 1:
u = (c.mean(),)
elif color_io == 0:
u = (1,)
bid = b[1] + max_meta * b[0]
if bid >= 256 * max_meta:
continue
embedding.weight[bid][0] = u[0]
if color_io > 1:
embedding.weight[bid][1] = u[1]
embedding.weight[bid][2] = u[2]
embedding.weight[bid][3] = u[3]
return embedding
def fake_embedding_fwd(x, embedding_weights):
embedding_dim = embedding_weights.shape[1]
szs = list(x.size())
x = x.view(-1)
z = embedding_weights.index_select(0, x)
szs.append(embedding_dim)
z = z.view(torch.Size(szs))
z = z.permute(0, 4, 1, 2, 3).contiguous()
return z
def compressed_onehot_distribution(x, allowed_idxs, pool=False):
"""x is a B x H x W x D LongTensor of indices;
if not pool, returns a tensor of the same size, with indices mapped to 0:len(allowed_idxs)-1
if pool, maps to onehot, and pools, returning B x len(allowed_idxs) x H x W x D"""
k = len(allowed_idxs)
vals, sidxs = allowed_idxs.sort()
r = torch.arange(0, len(vals), dtype=allowed_idxs.dtype, device=allowed_idxs.device)
u = torch.zeros(vals[-1].item() + 1, dtype=allowed_idxs.dtype, device=allowed_idxs.device)
u[vals] = r
mapped_x = u[x]
if pool:
weight = torch.eye(k, device=x.device)
onehot = fake_embedding_fwd(mapped_x, weight)
return torch.nn.functional.avg_pool3d(onehot, pool, stride=pool)
else:
return mapped_x
def color_hash(x, nbins=3):
# x is assumed to be Nx4, and each entry is 0<=x[i]<= 1
q = (x[:, :3] * (nbins - 0.001)).floor().to(dtype=torch.long)
b = x[:, 3] < 0.02
q[b] = 0
b = 1 - b.to(dtype=torch.long)
return b + q[:, 0] * nbins ** 2 + q[:, 1] * nbins + q[:, 2]
class ConvNLL(nn.Module):
def __init__(self, max_meta=20, subsample_zeros=-1):
super(ConvNLL, self).__init__()
self.embedding = build_rgba_embed(max_meta, color_io=2)
self.subsample_zeros = subsample_zeros
self.nll = nn.NLLLoss()
self.lsm = nn.LogSoftmax()
def cuda(self):
self.embedding.cuda()
def forward(self, gold, scores, nbins):
gold = gold.view(-1)
embedded_gold = self.embedding.weight.index_select(0, gold)
hashed_eg = color_hash(embedded_gold, nbins=nbins)
if self.subsample_zeros > 0:
mask = (hashed_eg == 0).float()
n = torch.rand(hashed_eg.shape[0], device=mask.device)
mask = mask - n
keep_nz_idx = torch.nonzero(mask < self.subsample_zeros).view(-1)
scores = scores.permute(0, 2, 3, 4, 1).contiguous()
szs = list(scores.size())
scores = scores.view(-1, szs[-1])
if self.subsample_zeros > 0:
scores = scores[keep_nz_idx]
hashed_eg = hashed_eg[keep_nz_idx]
return self.nll(self.lsm(scores), hashed_eg)
""" does a batch nce over B x c x H x W x D
draws negatives from self.embedder
"""
class ConvDistributionMatch(nn.Module):
def __init__(self, embedding, pool=False, subsample_zeros=-1):
super(ConvDistributionMatch, self).__init__()
self.pool = pool
self.embedding = embedding
self.K = self.embedding.weight.shape[0]
self.lsm = nn.LogSoftmax(dim=1)
self.lsm.to(self.embedding.weight.device)
self.subsample_zeros = subsample_zeros
def forward(self, gold, z, allowed_idxs):
pool = self.pool
mapped_gold = compressed_onehot_distribution(gold, allowed_idxs, pool=pool)
if not pool:
mapped_gold = mapped_gold.view(-1)
else:
mapped_gold = mapped_gold.permute(0, 2, 3, 4, 1).contiguous()
szs = list(mapped_gold.size())
mapped_gold = mapped_gold.view(-1, szs[-1])
self.mapped_gold = mapped_gold
# FIXME will break with pool
if self.subsample_zeros > 0:
mask = (mapped_gold == 0).float()
n = torch.rand(mapped_gold.shape[0], device=mask.device)
mask = mask - n
keep_nz_idx = torch.nonzero(mask < self.subsample_zeros).view(-1)
weight = self.embedding.weight.index_select(0, allowed_idxs)
k = weight.shape[0]
d = weight.shape[1]
scores = nn.functional.conv3d(z, weight.view(k, d, 1, 1, 1))
self.scores = scores
scores = scores.permute(0, 2, 3, 4, 1).contiguous()
szs = list(scores.size())
scores = scores.view(-1, szs[-1])
if self.subsample_zeros > 0:
scores = scores[keep_nz_idx]
mapped_gold = mapped_gold[keep_nz_idx]
if pool:
kl = nn.KLDivLoss()
return kl(self.lsm(scores), mapped_gold)
else:
# nll_weight = torch.ones(len(allowed_idxs), device=weight.device)
# nll_weight[0] = 0.01
# nll = nn.NLLLoss(weight=nll_weight)
nll = nn.NLLLoss()
return nll(self.lsm(scores), mapped_gold)
# this will need ot be fixed when we have relative directions!!!!
class SimpleWordEmbedder(nn.Module):
def __init__(self, opts):
super(SimpleWordEmbedder, self).__init__()
self.embedding = nn.Embedding(
opts.num_words, opts.hidden_dim, padding_idx=opts.word_padding_idx
)
def forward(self, words):
return self.embedding(words).mean(1)
class SimpleBase(nn.Module):
def __init__(self, opts, filepath=None):
super(SimpleBase, self).__init__()
self.loaded_from = None
if not filepath and opts.load_model_dir != "":
filepath = model_filename_from_opts(
opts, savedir=opts.load_model_dir, uid=opts.save_model_uid
)
if filepath:
try:
self.load(filepath)
self.loaded_from = filepath
except:
if opts.load_strict:
raise ("tried to load from " + filepath + " but failed")
else:
print("warning: tried to load from " + filepath + " but failed")
print("starting new model")
self.opts = opts
self._build()
else:
self.opts = opts
self._build()
def _build(self):
pass
def save(self, filepath):
self.cpu()
sds = {}
sds["opts"] = self.opts
sds["state_dict"] = self.state_dict()
torch.save(sds, filepath)
if self.opts.cuda:
self.cuda()
def load(self, filepath):
sds = torch.load(filepath)
self.opts = sds["opts"]
print("loading from file, using opts")
print(self.opts)
self._build()
self.load_state_dict(sds["state_dict"])
self.zero_grad()
class SimpleConv(SimpleBase):
def __init__(self, opts, pool=False):
opts.pool = pool
super(SimpleConv, self).__init__(opts)
def _build(self):
opts = self.opts
if hasattr(opts, "pool"):
self.pool = opts.pool
else:
self.pool = None
self.max_meta = max(opts.max_meta, 20)
self.num_blocks = 256 * self.max_meta
num_blocks = self.num_blocks
embedding_dim = opts.embedding_dim
num_layers = opts.num_layers
hidden_dim = opts.hidden_dim
self.embedding_dim = embedding_dim
if opts.color_io >= 0:
self.embedding = build_rgba_embed(self.max_meta, color_io=opts.color_io)
self.embedding_dim = self.embedding.weight.shape[1]
else:
self.embedding_dim = embedding_dim
self.embedding = nn.Embedding(num_blocks, embedding_dim)
self.layers = nn.ModuleList()
self.num_layers = num_layers
self.layers.append(
nn.Sequential(
nn.Conv3d(self.embedding_dim, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm3d(hidden_dim),
nn.ReLU(inplace=True),
)
)
self.gate_layers = nn.ModuleList()
for i in range(num_layers - 1):
self.layers.append(
nn.Sequential(
nn.Conv3d(hidden_dim, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm3d(hidden_dim),
nn.ReLU(inplace=True),
)
)
if self.opts.color_hash > 0:
self.out = nn.Conv3d(hidden_dim, self.opts.color_hash ** 3 + 1, kernel_size=1)
else:
self.out = nn.Conv3d(hidden_dim, self.embedding_dim, kernel_size=1)
self.lvar_embedder = nn.Embedding(opts.num_lvars, hidden_dim)
self.words_embedder = SimpleWordEmbedder(opts)
# TODO attention everywhere...
def forward(self, blocks_array, words, lvars):
words_embeddings = self.words_embedder(words)
lvar_embeddings = self.lvar_embedder(lvars)
# FIXME when pytorch is ready for this, embedding
# backwards is soooooo slow
# z = self.embedding(x)
z = fake_embedding_fwd(blocks_array, self.embedding.weight)
if self.pool:
z = torch.nn.functional.avg_pool3d(z, self.pool, stride=self.pool)
# words_embeddings should be a batchsize x hidden_dim vector
# z = z + words_embeddings.view(wszs).expand(szs)
# z = z + lvar_embeddings.view(wszs).expand(szs)
for i in range(self.num_layers):
oz = z.clone()
z = self.layers[i](z)
szs = list(z.size())
wszs = szs.copy()
wszs[2] = 1
wszs[3] = 1
wszs[4] = 1
z = z + words_embeddings.view(wszs).expand(szs)
z = z + lvar_embeddings.view(wszs).expand(szs)
if self.opts.residual_connection > 0 and oz.shape[1] == z.shape[1]:
z = z + oz
return self.out(z)
class AE(SimpleBase):
def __init__(self, opts, filepath=None):
super(AE, self).__init__(opts, filepath=filepath)
def _build(self):
opts = self.opts
self.do_sigmoid = opts.last_layer_sigmoid == 1
self.max_meta = max(opts.max_meta, 20)
self.num_blocks = 256 * self.max_meta
num_blocks = self.num_blocks
embedding_dim = opts.embedding_dim
num_layers = opts.num_layers
if opts.color_io >= 0:
self.embedding = build_rgba_embed(self.max_meta, color_io=opts.color_io)
self.embedding_dim = self.embedding.weight.shape[1]
else:
self.embedding_dim = embedding_dim
self.embedding = nn.Embedding(num_blocks, embedding_dim)
self.layers = nn.ModuleList()
self.num_layers = num_layers
current_dim = self.embedding_dim
for i in range(num_layers):
if i == 0:
hdim = self.opts.hidden_dim
else:
hdim = 2 * current_dim
self.layers.append(
nn.Sequential(
nn.Conv3d(current_dim, hdim, kernel_size=5, stride=2, padding=2),
nn.BatchNorm3d(hdim),
nn.ReLU(inplace=True),
)
)
current_dim = hdim
for i in range(num_layers):
self.layers.append(
nn.Sequential(
nn.ConvTranspose3d(
current_dim,
current_dim // 2,
kernel_size=5,
stride=2,
padding=2,
output_padding=1,
),
nn.BatchNorm3d(current_dim // 2),
nn.ReLU(inplace=True),
)
)
current_dim = current_dim // 2
if self.opts.color_hash > 0:
self.pre_out = nn.Conv3d(current_dim, self.opts.color_hash ** 3 + 1, kernel_size=1)
else:
self.pre_out = nn.Conv3d(current_dim, self.embedding_dim, kernel_size=1)
# TODO attention everywhere...
def forward(self, blocks_array):
# FIXME when pytorch is ready for this, embedding
# backwards is soooooo slow
# z = self.embedding(x)
z = fake_embedding_fwd(blocks_array, self.embedding.weight)
self.input_embed = z.clone()
for i in range(self.num_layers):
z = self.layers[i](z)
self.hidden_state = z
for i in range(self.num_layers, 2 * self.num_layers):
z = self.layers[i](z)
z = self.pre_out(z)
if self.do_sigmoid and self.opts.color_hash < 0:
return torch.sigmoid(z)
else:
return z
class ConvGenerator(nn.Module):
def __init__(self, opts):
super(ConvGenerator, self).__init__()
self.opts = opts
self.hidden_dim = opts.hidden_dim
self.zdim = opts.zdim
self.do_sigmoid = opts.last_layer_sigmoid == 1
self.layers = nn.ModuleList()
self.num_layers = opts.num_layers
self.expected_output_size = opts.expected_output_size
self.base_grid = opts.expected_output_size // 2 ** self.num_layers
current_dim = self.hidden_dim
self.layers.append(nn.Linear(self.zdim, self.hidden_dim * self.base_grid ** 3))
for i in range(self.num_layers):
self.layers.append(
nn.Sequential(
nn.ConvTranspose3d(
current_dim,
current_dim // 2,
kernel_size=5,
stride=2,
padding=2,
output_padding=1,
),
nn.BatchNorm3d(current_dim // 2),
nn.ReLU(inplace=True),
)
)
current_dim = current_dim // 2
self.pre_out = nn.Conv3d(current_dim, 4, kernel_size=1)
def forward(self, z, c=None):
z = self.layers[0](z)
szs = z.shape
z = z.view(szs[0], -1, self.base_grid, self.base_grid, self.base_grid)
for i in range(self.num_layers):
z = self.layers[i + 1](z)
z = self.pre_out(z)
if self.do_sigmoid:
return torch.sigmoid(z)
else:
return z
class ConvDiscriminator(nn.Module):
def __init__(self, opts):
super(ConvDiscriminator, self).__init__()
self.opts = opts
self.zdim = opts.zdim
self.do_sigmoid = opts.last_layer_sigmoid == 1
self.layers = nn.ModuleList()
self.num_layers = opts.num_layers
self.expected_input_size = opts.expected_input_size
self.layers = nn.ModuleList()
current_dim = 4 # RGBA
for i in range(self.num_layers):
if i == 0:
hdim = self.opts.hidden_dim
else:
hdim = 2 * current_dim
self.layers.append(
nn.Sequential(
nn.Conv3d(current_dim, hdim, kernel_size=5, stride=2, padding=2),
nn.BatchNorm3d(hdim),
nn.ReLU(inplace=True),
)
)
current_dim = hdim
self.base_grid = opts.expected_input_size // 2 ** self.num_layers
self.pre_out = nn.Linear(current_dim * self.base_grid ** 3, 1)
def forward(self, z, c=None):
for i in range(self.num_layers):
z = self.layers[i](z)
z = z.view(z.shape[0], -1)
z = self.pre_out(z)
return torch.tanh(z)
class GAN(SimpleBase):
def __init__(self, opts):
super(GAN, self).__init__(opts)
def _build(self):
self.D = ConvDiscriminator(self.opts)
self.G = ConvGenerator(self.opts)
def forward(self, x, mode="D"):
if mode == "D":
return self.D(x)
else:
return self.G(x)
class Opt:
pass
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--hidden_dim", type=int, default=128, help="size of hidden dim in fc layer"
)
parser.add_argument("--embedding_dim", type=int, default=16, help="size of blockid embedding")
parser.add_argument("--num_words", type=int, default=256, help="number of blocks")
parser.add_argument("--num_classes", type=int, default=20, help="number of blocks")
args = parser.parse_args()
# N = SemSegNet(args)
| craftassist-master | python/craftassist/voxel_models/modify/conv_models.py |
if __name__ == "__main__":
import os
import sys
import torch
import argparse
from shape_transform_dataloader import ModifyData
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument("--num_examples", type=int, default=1000000, help="num examples to build")
parser.add_argument("--data_dir", default="")
parser.add_argument("--max_meta", type=int, default=20, help="max meta")
parser.add_argument("--sidelength", type=int, default=32, help="sidelength for dataloader")
parser.add_argument(
"--load_dictionary",
default="/private/home/aszlam/junk/word_modify_word_ids.pk",
help="where to get word dict",
)
parser.add_argument("--ndonkeys", type=int, default=8, help="workers in dataloader")
args = parser.parse_args()
this_dir = os.path.dirname(os.path.realpath(__file__))
parent_dir = os.path.join(this_dir, "../")
sys.path.append(parent_dir)
################# FIXME!!!!
args.allow_same = False
args.debug = -1
args.words_length = 12
args.sidelength = args.sidelength
args.nexamples = args.num_examples + 10000
args.tform_weights = {
"thicker": 1.0,
"scale": 1.0,
"rotate": 1.0,
"replace_by_block": 1.0,
# 'replace_by_n': 1.0,
"replace_by_halfspace": 1.0,
"fill": 1.0,
}
train_data = ModifyData(args, dictionary=args.load_dictionary)
num_workers = args.ndonkeys
print("making dataloader")
rDL = torch.utils.data.DataLoader(
train_data,
batch_size=32,
shuffle=False,
pin_memory=True,
drop_last=True,
num_workers=args.ndonkeys,
)
X = torch.zeros(
args.num_examples, args.sidelength, args.sidelength, args.sidelength, dtype=torch.int
)
Y = torch.zeros(
args.num_examples, args.sidelength, args.sidelength, args.sidelength, dtype=torch.int
)
words = torch.zeros(args.num_examples, args.words_length)
it = iter(rDL)
for i in tqdm(range(args.num_examples // 32 + 1)):
b = it.next()
c = min((i + 1) * 32, args.num_examples)
X[i * 32 : c] = b[1]
Y[i * 32 : c] = b[2]
words[i * 32 : c] = b[0]
| craftassist-master | python/craftassist/voxel_models/modify/build_static_dataset.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import os
print(os.getcwd())
import sys
sys.path = [""] + sys.path
from shape_transform_dataloader import ModifyData
import torch
# import torch.nn as nn
import torch.optim as optim
import conv_models as models
# predict allowed blocks
# quantize to nearest in allowed set
def format_stats(stats_dict):
status = "STATS :: epoch@{} | loss@{}".format(stats_dict["epoch"], stats_dict["loss"])
return status
# FIXME allow restarting optimizer via opts
def get_optimizer(args, model, lr=None, allow_load=True):
if not lr:
lr = args.lr
sd = None
if allow_load and args.load_model_dir != "" and model.loaded_from is not None:
fname = os.path.basename(model.loaded_from)
fdir = os.path.dirname(model.loaded_from)
optimizer_path = os.path.join(fdir, "optim." + fname)
try:
sd = torch.load(optimizer_path)
except:
print("warning, unable to load optimizer from ")
print(optimizer_path)
print("restarting optimzier")
if args.optim_type == "adam":
optimizer = optim.Adam(model.parameters(), lr=lr)
elif args.optim_type == "adagrad":
optimizer = optim.Adagrad(model.parameters(), lr=lr)
else:
optimizer = optim.SGD(model.parameters(), lr=lr)
if sd:
try:
optimizer.load_state_dict(sd)
except:
print("warning, optimizer from ")
print(optimizer_path)
print("appears corrupted, restarting optimzier")
return optimizer
def lower_lr(model, args, optimizer, new_lr):
new_optimizer = get_optimizer(args, model, lr=new_lr, allow_load=False)
sd = optimizer.state_dict()
new_optimizer.load_state_dict(sd)
return new_optimizer
def save_optimizer(optimizer, fpath):
fname = os.path.basename(fpath)
fdir = os.path.dirname(fpath)
optimizer_path = os.path.join(fdir, "optim." + fname)
torch.save(optimizer.state_dict(), optimizer_path)
##################################################
# for debugging
##################################################
def draw_all(n):
sp.drawPlotly(unhash_volume(b[1][n], 20)) # noqa
draw_color_hash(maxi[n], vis) # noqa
sp.drawPlotly(unhash_volume(b[2][n], 20), title=args._train_data.print_text(b[0][n])) # noqa
def unhash_volume(x, max_meta):
meta = x % max_meta
bid = (x - meta) // max_meta
return torch.stack((bid, meta), 3).cpu().numpy()
def get_im(z, embedding, allowed_idxs):
weight = embedding.weight.index_select(0, allowed_idxs)
k = weight.shape[0]
d = weight.shape[1]
scores = torch.nn.functional.conv3d(z, weight.view(k, d, 1, 1, 1))
scores = scores.permute(0, 2, 3, 4, 1).contiguous()
maxs, maxi = scores.max(4)
return allowed_idxs[maxi]
def f_get_im_and_draw(w, z, embedding, allowed_idxs, i, train_data):
B = get_im(z, embedding, allowed_idxs)
text = " ".join(
[
train_data.dictionary["i2w"][l.item()]
for l in w[i]
if l.item() < len(train_data.dictionary["i2w"])
]
)
idm = unhash_volume(B[i], train_data.max_meta)
sp.drawPlotly(idm, title=" ".join(text))
##################################################
# training loop
##################################################
def find_lvars(model, x, y, words, loss_fn, args):
with torch.no_grad():
losses = torch.zeros(x.shape[0], args.num_lvars, device=x.device)
for i in range(args.num_lvars):
lvars = torch.zeros(x.shape[0], 1, device=x.device, dtype=torch.long).fill_(i)
z = model(x, words, lvars)
if args.color_io >= 0 and args.color_hash < 0:
l = torch.nn.functional.mse_loss(z, model.input_embed)
elif args.color_hash > 0:
l = loss_fn(y, z, args.color_hash)
losses[:, i] = l
minval, mini = losses.min(1)
return mini
def validate(model, validation_data):
pass
def train_epoch(model, DL, loss_fn, optimizer, args):
model.train()
losses = []
for b in DL:
optimizer.zero_grad()
words = b[0]
x = b[1]
y = b[2]
if args.cuda:
words = words.cuda()
x = x.cuda()
y = y.cuda()
model.train()
allowed_idxs = torch.unique(y)
if args.ae:
z = model(y)
else:
lvars = find_lvars(model, x, y, words, loss_fn, args)
z = model(x, words, lvars)
if args.color_io >= 0 and args.color_hash < 0:
l = torch.nn.functional.mse_loss(z, model.input_embed)
elif args.color_hash > 0:
l = loss_fn(y, z, args.color_hash)
else:
l = loss_fn(y, z, allowed_idxs)
losses.append(l.item())
l.backward()
optimizer.step()
return losses
def main(args):
print("loading train data")
################# FIXME!!!!
args.allow_same = False
args.nexamples = 1024
args.tform_weights = {
"thicker": 1.0,
"scale": 1.0,
"rotate": 1.0,
"replace_by_block": 1.0,
# 'replace_by_n': 1.0,
"replace_by_halfspace": 1.0,
"fill": 1.0,
}
train_data = ModifyData(args, dictionary=args.load_dictionary)
shuffle = True
if args.debug > 0:
num_workers = 0
shuffle = False
else:
num_workers = args.ndonkeys
print("making dataloader")
rDL = torch.utils.data.DataLoader(
train_data,
batch_size=args.batchsize,
shuffle=shuffle,
pin_memory=True,
drop_last=True,
num_workers=num_workers,
)
print("making models")
args.num_words = train_data.padword + 1
args.word_padding_idx = train_data.padword
###########################################
# args.pool = 8 #!!!!!!!!!!!!!!!!!!!!!!!!!!!!
args.pool = None
###########################################
if args.ae:
model = models.AE(args)
else:
model = models.SimpleConv(args, pool=args.pool)
if args.cuda:
model.cuda()
else:
print("warning: no cuda")
############
if args.color_hash > 0:
loss_fn = models.ConvNLL(max_meta=args.max_meta)
if args.cuda:
loss_fn.cuda()
else:
loss_fn = models.ConvDistributionMatch(model.embedding, subsample_zeros=0.01)
############
optimizer = get_optimizer(args, model)
args._model = model
args._train_data = train_data
print("training")
minloss = 1000000
badcount = 0
lr = args.lr
for m in range(args.num_epochs):
losses = train_epoch(model, rDL, loss_fn, optimizer, args)
mean_epoch_loss = sum(losses) / len(losses)
status = format_stats({"epoch": m, "loss": mean_epoch_loss})
print(status)
if args.save_model_dir != "":
fpath = models.model_filename_from_opts(
args, savedir=args.save_model_dir, uid=args.save_model_uid
)
model.save(fpath)
save_optimizer(optimizer, fpath)
if mean_epoch_loss < 0.99 * minloss:
minloss = mean_epoch_loss
badcount = 0
else:
badcount = badcount + 1
if badcount > args.lr_patience and args.lr_decay < 1.0:
lr = args.lr_decay * lr
optimizer = lower_lr(model, args, optimizer, lr)
print("lowered lr to " + str(lr))
badcount = 0
return model, train_data, rDL
if __name__ == "__main__":
import argparse
from voxel_models.plot_voxels import SchematicPlotter, draw_rgb, draw_color_hash # noqa
import visdom
parser = argparse.ArgumentParser()
parser.add_argument(
"--debug", type=int, default=-1, help="no shuffle, keep only debug num examples"
)
parser.add_argument("--num_examples", type=int, default=1024, help="num examples in an epoch")
parser.add_argument("--num_epochs", type=int, default=1500, help="training epochs")
parser.add_argument("--last_layer_sigmoid", type=int, default=1, help="do sigmoid if 1")
parser.add_argument(
"--color_hash", type=int, default=-1, help="if > 0 hash color cube into bins"
)
parser.add_argument("--num_lvars", type=int, default=10, help="number of random vars")
parser.add_argument(
"--lr_patience", type=int, default=8, help="how many epochs to wait before decreasing lr"
)
parser.add_argument("--lr_decay", type=float, default=1.0, help="lr decrease multiple")
parser.add_argument("--num_layers", type=int, default=4, help="num conv layers")
# parser.add_argument("--augment", default="none", help="none or maxshift:K_underdirt:J")
parser.add_argument("--sidelength", type=int, default=32, help="sidelength for dataloader")
parser.add_argument(
"--color_io",
type=int,
default=2,
help="if 2 input uses color-alpha, 1 intensity, 0 bw, -1 emebdding",
)
parser.add_argument("--ae", action="store_true", help="plain ae")
parser.add_argument("--cuda", action="store_true", help="use cuda")
parser.add_argument(
"--max_meta", type=int, default=20, help="allow that many meta values when hashing idmeta"
)
parser.add_argument("--gpu_id", type=int, default=0, help="which gpu to use")
parser.add_argument("--batchsize", type=int, default=32, help="batch size")
parser.add_argument("--data_dir", default="")
parser.add_argument(
"--save_model_dir", default="", help="where to save model (nowhere if blank)"
)
parser.add_argument(
"--load_model_dir", default="", help="from where to load model (nowhere if blank)"
)
parser.add_argument(
"--load_strict",
action="store_true",
help="error if model to load doesn't exist. if false just builds new",
)
parser.add_argument(
"--save_model_uid", default="", help="unique identifier on top of options-name"
)
parser.add_argument(
"--words_length", type=int, default=12, help="sentence pad length. FIXME?"
)
parser.add_argument("--optim_type", default="adam", help="optim type, adam, adagrad, sgd")
parser.add_argument("--save_logs", default="/dev/null", help="where to save logs")
parser.add_argument(
"--residual_connection", type=int, default=0, help="if bigger than 0 use resnet-style"
)
parser.add_argument("--hidden_dim", type=int, default=64, help="size of hidden dim")
parser.add_argument("--embedding_dim", type=int, default=16, help="size of blockid embedding")
parser.add_argument(
"--load_dictionary",
default="/private/home/aszlam/junk/word_modify_word_ids.pk",
help="where to get word dict",
)
parser.add_argument("--lr", type=float, default=0.01, help="step size for net")
parser.add_argument(
"--sbatch", action="store_true", help="cluster run mode, no visdom, formatted stdout"
)
parser.add_argument(
"--sample_empty_prob",
type=float,
default=0.01,
help="prob of taking gradients on empty locations",
)
parser.add_argument("--ndonkeys", type=int, default=8, help="workers in dataloader")
args = parser.parse_args()
if not args.sbatch:
vis = visdom.Visdom(server="http://localhost")
sp = SchematicPlotter(vis)
this_dir = os.path.dirname(os.path.realpath(__file__))
parent_dir = os.path.join(this_dir, "../")
sys.path.append(parent_dir)
grandparent_dir = os.path.join(this_dir, "../")
sys.path.append(parent_dir)
main(args)
| craftassist-master | python/craftassist/voxel_models/modify/train_conv_model.py |
#########################################################
# TAKEN FROM https://github.com/LMescheder/GAN_stability/
#########################################################
# coding: utf-8
import torch
from torch.nn import functional as F
import torch.utils.data
import torch.utils.data.distributed
from torch import autograd
class Trainer(object):
def __init__(
self, generator, discriminator, g_optimizer, d_optimizer, gan_type, reg_type, reg_param
):
self.generator = generator
self.discriminator = discriminator
self.g_optimizer = g_optimizer
self.d_optimizer = d_optimizer
self.gan_type = gan_type
self.reg_type = reg_type
self.reg_param = reg_param
def generator_trainstep(self, y, z):
# assert(y.size(0) == z.size(0))
toggle_grad(self.generator, True)
toggle_grad(self.discriminator, False)
self.generator.train()
self.discriminator.train()
self.g_optimizer.zero_grad()
x_fake = self.generator(z, y)
d_fake = self.discriminator(x_fake, y)
gloss = self.compute_loss(d_fake, 1)
gloss.backward()
self.g_optimizer.step()
return gloss.item()
def discriminator_trainstep(self, x_real, y, z):
toggle_grad(self.generator, False)
toggle_grad(self.discriminator, True)
self.generator.train()
self.discriminator.train()
self.d_optimizer.zero_grad()
# On real data
x_real.requires_grad_()
d_real = self.discriminator(x_real, y)
dloss_real = self.compute_loss(d_real, 1)
if self.reg_type == "real" or self.reg_type == "real_fake":
dloss_real.backward(retain_graph=True)
reg = self.reg_param * compute_grad2(d_real, x_real).mean()
reg.backward()
else:
dloss_real.backward()
# On fake data
with torch.no_grad():
x_fake = self.generator(z, y)
x_fake.requires_grad_()
d_fake = self.discriminator(x_fake, y)
dloss_fake = self.compute_loss(d_fake, 0)
if self.reg_type == "fake" or self.reg_type == "real_fake":
dloss_fake.backward(retain_graph=True)
reg = self.reg_param * compute_grad2(d_fake, x_fake).mean()
reg.backward()
else:
dloss_fake.backward()
if self.reg_type == "wgangp":
reg = self.reg_param * self.wgan_gp_reg(x_real, x_fake, y)
reg.backward()
elif self.reg_type == "wgangp0":
reg = self.reg_param * self.wgan_gp_reg(x_real, x_fake, y, center=0.0)
reg.backward()
self.d_optimizer.step()
toggle_grad(self.discriminator, False)
# Output
dloss = dloss_real + dloss_fake
if self.reg_type == "none":
reg = torch.tensor(0.0)
return dloss.item(), reg.item()
def compute_loss(self, d_out, target):
targets = d_out.new_full(size=d_out.size(), fill_value=target)
if self.gan_type == "standard":
loss = F.binary_cross_entropy_with_logits(d_out, targets)
elif self.gan_type == "wgan":
loss = (2 * target - 1) * d_out.mean()
else:
raise NotImplementedError
return loss
def wgan_gp_reg(self, x_real, x_fake, y, center=1.0):
batch_size = y.size(0)
eps = torch.rand(batch_size, device=y.device).view(batch_size, 1, 1, 1)
x_interp = (1 - eps) * x_real + eps * x_fake
x_interp = x_interp.detach()
x_interp.requires_grad_()
d_out = self.discriminator(x_interp, y)
reg = (compute_grad2(d_out, x_interp).sqrt() - center).pow(2).mean()
return reg
# Utility functions
def toggle_grad(model, requires_grad):
for p in model.parameters():
p.requires_grad_(requires_grad)
def compute_grad2(d_out, x_in):
batch_size = x_in.size(0)
grad_dout = autograd.grad(
outputs=d_out.sum(), inputs=x_in, create_graph=True, retain_graph=True, only_inputs=True
)[0]
grad_dout2 = grad_dout.pow(2)
assert grad_dout2.size() == x_in.size()
reg = grad_dout2.view(batch_size, -1).sum(1)
return reg
def update_average(model_tgt, model_src, beta):
toggle_grad(model_src, False)
toggle_grad(model_tgt, False)
param_dict_src = dict(model_src.named_parameters())
for p_name, p_tgt in model_tgt.named_parameters():
p_src = param_dict_src[p_name]
assert p_src is not p_tgt
p_tgt.copy_(beta * p_tgt + (1.0 - beta) * p_src)
| craftassist-master | python/craftassist/voxel_models/modify/gan_trainer.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import os
from shape_transform_dataloader import ModifyData
import torch
from torch import distributions
# import torch.nn as nn
import torch.optim as optim
import conv_models as models
from gan_trainer import Trainer
# predict allowed blocks
# quantize to nearest in allowed set
def format_stats(stats_dict):
status = "STATS :: epoch@{} | gloss@{} | dloss@{}".format(
stats_dict["epoch"], stats_dict["gloss"], stats_dict["dloss"]
)
return status
# FIXME allow restarting optimizer via opts
def get_optimizer(args, model):
sds = None
if args.load_model_dir != "" and model.loaded_from is not None:
fname = os.path.basename(model.loaded_from)
fdir = os.path.dirname(model.loaded_from)
optimizer_path = os.path.join(fdir, "optim." + fname)
try:
sds = torch.load(optimizer_path)
sd_g = sds["g"]
sd_d = sds["d"]
except:
print("warning, unable to load optimizer from ")
print(optimizer_path)
print("restarting optimzier")
if args.optim_type == "adam":
optimizer_d = optim.Adam(model.D.parameters(), lr=args.lr_d)
optimizer_g = optim.Adam(model.G.parameters(), lr=args.lr_g)
elif args.optim_type == "adagrad":
optimizer_d = optim.Adagrad(model.D.parameters(), lr=args.lr_d)
optimizer_g = optim.Adagrad(model.G.parameters(), lr=args.lr_g)
elif args.optim_type == "rmsprop":
optimizer_d = optim.RMSprop(model.D.parameters(), lr=args.lr_d, alpha=0.99, eps=1e-8)
optimizer_g = optim.RMSprop(model.G.parameters(), lr=args.lr_g, alpha=0.99, eps=1e-8)
else:
optimizer_d = optim.SGD(model.D.parameters(), lr=args.lr_d)
optimizer_g = optim.SGD(model.G.parameters(), lr=args.lr_g)
if sds:
try:
optimizer_d.load_state_dict(sd_d)
optimizer_g.load_state_dict(sd_g)
except:
print("warning, optimizer from ")
print(optimizer_path)
print("appears corrupted, restarting optimzier")
return optimizer_d, optimizer_g
def save_optimizer(optimizer_d, optimizer_g, fpath):
fname = os.path.basename(fpath)
fdir = os.path.dirname(fpath)
optimizer_path = os.path.join(fdir, "optim." + fname)
torch.save({"d": optimizer_d.state_dict(), "g": optimizer_g.state_dict()}, optimizer_path)
##################################################
# for debugging
##################################################
def unhash_volume(x, max_meta):
meta = x % max_meta
bid = (x - meta) // max_meta
return torch.stack((bid, meta), 3).cpu().numpy()
def get_im(z, embedding, allowed_idxs):
weight = embedding.weight.index_select(0, allowed_idxs)
k = weight.shape[0]
d = weight.shape[1]
scores = torch.nn.functional.conv3d(z, weight.view(k, d, 1, 1, 1))
scores = scores.permute(0, 2, 3, 4, 1).contiguous()
maxs, maxi = scores.max(4)
return allowed_idxs[maxi]
def f_get_im_and_draw(w, z, embedding, allowed_idxs, i, train_data):
B = get_im(z, embedding, allowed_idxs)
text = " ".join(
[
train_data.dictionary["i2w"][l.item()]
for l in w[i]
if l.item() < len(train_data.dictionary["i2w"])
]
)
idm = unhash_volume(B[i], train_data.max_meta)
sp.drawPlotly(idm, title=" ".join(text))
##################################################
# training loop
##################################################
def get_zdist(dist_name, dim, device=None):
# Get distribution
if dist_name == "uniform":
low = -torch.ones(dim, device=device)
high = torch.ones(dim, device=device)
zdist = distributions.Uniform(low, high)
elif dist_name == "gauss":
mu = torch.zeros(dim, device=device)
scale = torch.ones(dim, device=device)
zdist = distributions.Normal(mu, scale)
else:
raise NotImplementedError
# Add dim attribute
zdist.dim = dim
return zdist
def validate(model, validation_data):
pass
def train_epoch(model, DL, trainer, args):
model.train()
losses = {"g": [], "d": []}
for b in DL:
words = b[0]
x = b[1]
y = b[2]
if args.cuda:
words = words.cuda()
x = x.cuda()
y = y.cuda()
zdist = get_zdist("gauss", args.zdim, device=x.device)
x_real = models.fake_embedding_fwd(y, trainer.rgba_embedding.weight)
z = zdist.sample((args.batchsize,))
# Discriminator updates
dloss, reg = trainer.discriminator_trainstep(x_real, None, z)
losses["d"].append(dloss)
# Generators updates
# if ((it + 1) % args.d_steps) == 0:
z = zdist.sample((args.batchsize,))
gloss = trainer.generator_trainstep(None, z)
losses["g"].append(gloss)
return losses
def get_data(args):
print("loading train data")
################# FIXME!!!!
args.allow_same = False
args.max_meta = 20
args.sidelength = 16
args.expected_input_size = args.sidelength
args.expected_output_size = args.sidelength
args.nexamples = 1024
args.tform_weights = {
"thicker": 1.0,
"scale": 1.0,
"rotate": 1.0,
"replace_by_block": 1.0,
# 'replace_by_n': 1.0,
"replace_by_halfspace": 1.0,
"fill": 1.0,
}
train_data = ModifyData(args, dictionary=args.load_dictionary)
shuffle = True
if args.debug > 0:
num_workers = 0
shuffle = False
else:
num_workers = args.ndonkeys
print("making dataloader")
rDL = torch.utils.data.DataLoader(
train_data,
batch_size=args.batchsize,
shuffle=shuffle,
pin_memory=True,
drop_last=True,
num_workers=num_workers,
)
return rDL, train_data
def main(args):
rDL, train_data = get_data(args)
print("making models")
model = models.GAN(args)
rgba_embedding = models.build_rgba_embed(args.max_meta)
if args.cuda:
model.cuda()
rgba_embedding = rgba_embedding.cuda()
else:
print("warning: no cuda")
optimizer_d, optimizer_g = get_optimizer(args, model)
trainer = Trainer(
model.G,
model.D,
optimizer_g,
optimizer_d,
gan_type="standard",
reg_type="real",
reg_param=args.reg_param,
)
trainer.rgba_embedding = rgba_embedding
args._model = model
args._rgba_embedding = rgba_embedding
print("training")
win_name = None
for m in range(args.num_epochs):
losses = train_epoch(model, rDL, trainer, args)
status = format_stats(
{
"epoch": m,
"gloss": sum(losses["g"]) / len(losses["g"]),
"dloss": sum(losses["d"]) / len(losses["d"]),
}
)
print(status)
if args.save_model_dir != "":
fpath = models.model_filename_from_opts(
args, savedir=args.save_model_dir, uid=args.save_model_uid
)
model.save(fpath)
save_optimizer(optimizer_d, optimizer_g, fpath)
if args.visualize:
zdist = get_zdist("gauss", args.zdim, device=model.G.layers[0].weight.device)
z = zdist.sample((4,))
with torch.no_grad():
u = model.G(z)
win_name = draw_rgb(u[0], vis, threshold=0.1, win=win_name, title=str(m))
return model, train_data, rDL
if __name__ == "__main__":
import argparse
from voxel_models.plot_voxels import SchematicPlotter, draw_rgb # noqa
import visdom
parser = argparse.ArgumentParser()
parser.add_argument(
"--debug", type=int, default=-1, help="no shuffle, keep only debug num examples"
)
parser.add_argument("--visualize", action="store_true", help="draw pictures")
parser.add_argument("--num_examples", type=int, default=1024, help="num examples in an epoch")
parser.add_argument("--num_epochs", type=int, default=1500, help="training epochs")
parser.add_argument("--last_layer_sigmoid", type=int, default=1, help="do sigmoid if 1")
parser.add_argument("--num_layers", type=int, default=3, help="numm conv layers")
# parser.add_argument("--augment", default="none", help="none or maxshift:K_underdirt:J")
parser.add_argument("--cuda", action="store_true", help="use cuda")
parser.add_argument("--gpu_id", type=int, default=0, help="which gpu to use")
parser.add_argument("--zdim", type=int, default=256, help="hidden variable size")
parser.add_argument("--batchsize", type=int, default=32, help="batch size")
parser.add_argument("--data_dir", default="")
parser.add_argument(
"--save_model_dir", default="", help="where to save model (nowhere if blank)"
)
parser.add_argument(
"--load_model_dir", default="", help="from where to load model (nowhere if blank)"
)
parser.add_argument(
"--load_strict",
action="store_true",
help="error if model to load doesn't exist. if false just builds new",
)
parser.add_argument(
"--save_model_uid", default="", help="unique identifier on top of options-name"
)
parser.add_argument(
"--words_length", type=int, default=12, help="sentence pad length. FIXME?"
)
parser.add_argument(
"--optim_type", default="rmsprop", help="optim type, rmsprop, adam, adagrad, sgd"
)
parser.add_argument("--hidden_dim", type=int, default=64, help="size of hidden dim")
parser.add_argument(
"--load_dictionary",
default="/private/home/aszlam/junk/word_modify_word_ids.pk",
help="where to get word dict",
)
parser.add_argument("--reg_param", type=float, default=10.0, help="reg_param")
parser.add_argument("--lr_g", type=float, default=0.0001, help="step size for net")
parser.add_argument("--lr_d", type=float, default=0.0001, help="step size for net")
parser.add_argument("--lr_anneal", type=float, default=1.0, help="step multiplier on anneal")
parser.add_argument("--lr_anneal_every", type=int, default=150000, help="when to anneal")
parser.add_argument("--d_steps", type=int, default=1, help="when to anneal")
parser.add_argument(
"--sbatch", action="store_true", help="cluster run mode, no visdom, formatted stdout"
)
parser.add_argument("--ndonkeys", type=int, default=8, help="workers in dataloader")
args = parser.parse_args()
if not args.sbatch:
vis = visdom.Visdom(server="http://localhost")
sp = SchematicPlotter(vis)
main(args)
| craftassist-master | python/craftassist/voxel_models/modify/train_gan_model.py |
from datetime import datetime
import argparse
import os
import stat
parser = argparse.ArgumentParser()
parser.add_argument("--sweep_config_path", default="", help="path to sweep config")
parser.add_argument("--sweep_scripts_output_dir", default="", help="where to put script")
parser.add_argument("--sweep_name", default="", help="name of sweep")
parser.add_argument("--output_dir", default="", help="where to put job_output")
parser.add_argument(
"--append_date", action="store_false", help="append date to output dir and job name"
)
parser.add_argument("--partition", default="learnfair", help="name of partition")
opts = parser.parse_args()
assert opts.sweep_scripts_output_dir != ""
now = datetime.now()
nowstr = now.strftime("_%m_%d_%H_%M")
job_name = opts.sweep_name
output_dir = opts.output_dir
scripts_dir = opts.sweep_scripts_output_dir
if opts.append_date:
job_name = job_name + nowstr
output_dir = os.path.join(output_dir, job_name)
scripts_dir = os.path.join(scripts_dir, job_name)
os.makedirs(output_dir, exist_ok=True)
os.makedirs(scripts_dir, exist_ok=True)
with open(opts.sweep_config_path) as f:
args = {}
for l in f.readlines():
if len(l) > 0 and l[0] != "#":
w = l.split("=")
argname = w[0].strip()
if len(w) == 1:
argvals_text = [""]
else:
argvals_text = w[1].strip().split(",")
args[argname] = [av.strip() for av in argvals_text]
# grid search... TODO random
varying_args = [""]
static_args = " "
for argname in args.keys():
new_arglist = []
if len(args[argname]) > 1:
for a in varying_args:
for argval in args[argname]:
new_arglist.append(a + " --" + argname + " " + argval)
varying_args = new_arglist.copy()
else:
static_args = static_args + " --" + argname + " " + args[argname][0]
all_arglists = []
for a in varying_args:
all_arglists.append(a + static_args)
errpaths = []
outpaths = []
modelpaths = []
for i in range(len(all_arglists)):
uid = job_name + "_P" + str(i)
body = "#! /bin/bash \n"
body += "#SBATCH --job-name=" + uid + "\n"
body += "#SBATCH --output=" + os.path.join(output_dir, str(i) + ".out") + "\n"
outpaths.append(os.path.join(output_dir, str(i) + ".out"))
body += "#SBATCH --error=" + os.path.join(output_dir, str(i) + ".err") + "\n"
errpaths.append(os.path.join(output_dir, str(i) + ".err"))
body += "#SBATCH --partition=" + opts.partition + "\n"
body += "#SBATCH --nodes=1 \n"
body += "#SBATCH --ntasks-per-node=1 \n"
body += "#SBATCH --gres=gpu:1 \n"
body += "#SBATCH --cpus-per-task=10 \n"
body += "#SBATCH --signal=B:USR1@60 #Signal is sent to batch script itself \n"
body += "#SBATCH --open-mode=append \n"
body += "#SBATCH --time=4320 \n"
body += "\n"
# body += st #FIXME
body += "\n"
body += "module purge\n"
# TODO make a env loader...
body += "module load NCCL/2.2.13-cuda.9.0 \n"
body += "module load anaconda3/5.0.1 \n"
body += "source activate /private/home/kavyasrinet/.conda/envs/minecraft_env\n"
body += "cd /private/home/aszlam/fairinternal/minecraft/python/craftassist\n"
#######
body += "/private/home/kavyasrinet/.conda/envs/minecraft_env/bin/python voxel_models/modify/train_conv_model.py"
body += all_arglists[i]
body += " --sbatch --save_model_uid " + job_name + "_" + str(i)
scriptname = os.path.join(scripts_dir, str(i) + ".sh")
g = open(scriptname, "w")
g.write(body)
g.close()
st = os.stat(scriptname)
os.chmod(scriptname, st.st_mode | stat.S_IEXEC)
g = open(os.path.join(scripts_dir, "master"), "w")
g.write("#! /bin/sh \n")
for i in range(len(all_arglists)):
g.write("# opts :: " + varying_args[i] + " :: " + outpaths[i] + "\n")
for i in range(len(all_arglists)):
g.write("echo " + "'" + varying_args[i] + " :: " + outpaths[i] + "'" + "\n")
for i in range(len(all_arglists)):
g.write("sbatch " + str(i) + ".sh &\n")
g.close()
| craftassist-master | python/craftassist/voxel_models/modify/build_sweep_scripts.py |
import numpy as np
import random
import torch
from torch.utils import data as tds
import pickle
import shape_transforms
import shape_helpers as sh
from build_utils import blocks_list_to_npy
import minecraft_specs
from block_data import COLOR_BID_MAP
BLOCK_DATA = minecraft_specs.get_block_data()
# FIXME....
NEW_BLOCK_CHOICES = [idm for v in COLOR_BID_MAP.values() for idm in v]
BID_TO_COLOR = {idm: c for c in COLOR_BID_MAP.keys() for idm in COLOR_BID_MAP[c]}
##############################################
# WARNING: all npy arrays in this file are xyz
# not yzx
def rand():
return torch.rand(1).item()
def thicker(schematic):
data = {}
data["tform_data"] = {"delta": int(np.floor(2 * rand()) + 1)}
# FIXME prob
if rand() > 0.5:
thick_or_thin = "thicker"
data["inverse"] = False
else:
thick_or_thin = "thinner"
data["inverse"] = True
text = "make it " + thick_or_thin
tform = shape_transforms.thicker
return tform, text, data
def scale(schematic):
data = {}
data["tform_data"] = {}
d = random.choice(
["wider", "narrower", "taller", "shorter", "fatter", "skinnier", "bigger", "smaller"]
)
text = "make it " + d
if d == "wider" or d == "narrower":
# FIXME prob
scale_factor = rand() + 1.0
if rand() > 0.5:
data["tform_data"]["lams"] = (1.0, 1.0, scale_factor)
else:
data["tform_data"]["lams"] = (scale_factor, 1.0, 1.0)
if d == "wider":
data["inverse"] = False
else:
data["inverse"] = True
elif d == "fatter" or d == "skinnier":
scale_factor = rand() + 1.0
data["tform_data"]["lams"] = (scale_factor, 1.0, scale_factor)
if d == "fatter":
data["inverse"] = False
else:
data["inverse"] = True
elif d == "taller" or d == "shorter":
scale_factor = rand() + 1.0
data["tform_data"]["lams"] = (1.0, scale_factor, 1.0)
if d == "taller":
data["inverse"] = False
else:
data["inverse"] = True
elif d == "bigger" or d == "smaller":
scale_factor = rand() + 1.0
data["tform_data"]["lams"] = (scale_factor, scale_factor, scale_factor)
if d == "bigger":
data["inverse"] = False
else:
data["inverse"] = True
else:
print(d)
raise Exception("what?")
tform = shape_transforms.scale_sparse
return tform, text, data
def rotate(schematic):
data = {}
angle = random.choice([90, -90])
data["tform_data"] = {"angle": angle}
data["inverse"] = False
if angle == 90:
ccw = "clockwise"
else:
ccw = "counter-clockwise"
text = "rotate it " + ccw
tform = shape_transforms.rotate
return tform, text, data
def replace_by_block(schematic):
data = {}
data["inverse"] = False
new_color = None
# FIXME prob
if rand() > 0.5:
new_color = random.choice(list(COLOR_BID_MAP.keys()))
new_idm = random.choice(COLOR_BID_MAP[new_color])
else:
new_idm = random.choice(NEW_BLOCK_CHOICES)
data["tform_data"] = {"new_idm": new_idm}
if rand() > 0.25:
idx = tuple(random.choice(np.transpose(schematic[:, :, :, 0].nonzero())))
idm = tuple(schematic[idx].squeeze())
if rand() > 0.5 or not (BID_TO_COLOR.get(idm)):
block_name = BLOCK_DATA["bid_to_name"][idm]
else:
block_name = BID_TO_COLOR[idm] + " blocks"
text = "change all the " + block_name + " to "
data["tform_data"]["current_idm"] = idm
else:
data["tform_data"]["every_n"] = 1
text = "change all the blocks to "
if new_color:
text = text + new_color + " blocks "
else:
text = text + BLOCK_DATA["bid_to_name"][new_idm]
tform = shape_transforms.replace_by_blocktype
return tform, text, data
# TODO middle...
# TODO look vector and sides + front + back
def replace_by_halfspace(schematic):
data = {}
data["inverse"] = False
new_color = None
# FIXME prob
if rand() > 0.5:
new_color = random.choice(list(COLOR_BID_MAP.keys()))
new_idm = random.choice(COLOR_BID_MAP[new_color])
else:
new_idm = random.choice(NEW_BLOCK_CHOICES)
data["tform_data"] = {"new_idm": new_idm}
geometry = {}
geometry["offset"] = np.array(schematic.shape[:3]) / 2 + 0.5
text = "make the "
if rand() > 0.5:
nz = np.transpose(schematic[:, :, :, 0].nonzero())
mins = np.min(nz, axis=0)
maxs = np.max(nz, axis=0)
amount = "quarter "
geometry["threshold"] = (maxs[1] - mins[1]) / 4
else:
amount = "half "
geometry["threshold"] = 0.0
if rand() > 0.5:
text = text + "top " + amount
geometry["v"] = np.array((0, 1.0, 0))
else:
text = text + "bottom " + amount
geometry["v"] = np.array((0, -1.0, 0))
data["tform_data"]["geometry"] = geometry
if new_color:
text = text + new_color
else:
text = text + BLOCK_DATA["bid_to_name"][new_idm]
tform = shape_transforms.replace_by_halfspace
return tform, text, data
def fill(schematic):
data = {}
data["tform_data"] = {}
if rand() > 0.5:
data["inverse"] = False
text = "fill it up"
else:
data["inverse"] = True
text = "hollow it out"
tform = shape_transforms.fill_flat
return tform, text, data
def get_schematic():
shape_name = random.choice(sh.SHAPE_NAMES)
opts = sh.SHAPE_HELPERS[shape_name]()
opts["bid"] = sh.bid()
blocks = sh.SHAPE_FNS[shape_name](**opts)
if len(blocks) == 0:
import ipdb
ipdb.set_trace()
return blocks
class ModifyData(tds.Dataset):
def __init__(self, opts, dictionary=None):
self.opts = opts
self.templates = {
"thicker": thicker,
"scale": scale,
"rotate": rotate,
"replace_by_block": replace_by_block,
# 'replace_by_n': replace_by_n,
"replace_by_halfspace": replace_by_halfspace,
"fill": fill,
}
self.debug = opts.debug
self.stored = []
self.template_sampler = torch.distributions.Categorical(
torch.Tensor(list(opts.tform_weights.values()))
)
self.tform_names = list(opts.tform_weights.keys())
self.nexamples = opts.nexamples
self.sidelength = opts.sidelength
self.allow_same = opts.allow_same
self.words_length = opts.words_length
self.max_meta = opts.max_meta
self.dictionary = dictionary
if self.dictionary:
if type(self.dictionary) is str:
with open(dictionary, "rb") as f:
self.dictionary = pickle.load(f)
self.unkword = len(self.dictionary["w2i"])
self.padword = len(self.dictionary["w2i"]) + 1
# for debug...
def print_text(self, word_tensor):
# words is the tensor output of indexing the dataset
words = ""
for i in range(word_tensor.shape[0]):
w = word_tensor[i].item()
if w == self.padword:
break
else:
words = words + self.dictionary["i2w"][w] + " "
return words
# TODO deal with reshifting back?
def generate_task(self):
size_fits = False
while not size_fits:
schematic, _ = blocks_list_to_npy(get_schematic(), xyz=True)
if max(schematic.shape) < self.sidelength:
size_fits = True
schematic = shape_transforms.moment_at_center(schematic, self.sidelength)
tform_name = self.tform_names[self.template_sampler.sample()]
tform, text, task_data = self.templates[tform_name](schematic)
return tform, text, task_data, schematic
def maybe_words_to_tensor(self, text):
words = text.split()
if self.dictionary:
wt = torch.LongTensor(self.words_length).fill_(self.padword)
for i in range(len(words)):
wt[i] = self.dictionary["w2i"].get(words[i], self.unkword)
return wt
else:
return words
def maybe_hash_idm(self, x):
if self.max_meta > 0:
return x[:, :, :, 1] + self.max_meta * x[:, :, :, 0]
else:
return x
def __getitem__(self, index):
if self.debug > 0:
if len(self.stored) == 0:
for i in range(self.debug):
self.stored.append(self._generate_item(0))
return self.stored[index]
else:
return self._generate_item(0)
def _generate_item(self, index):
change = False
size_fits = False
while not change and not size_fits:
tform, text, task_data, schematic = self.generate_task()
new_schematic = tform(schematic, **task_data["tform_data"])
if max(new_schematic.shape) <= self.sidelength:
size_fits = True
new_schematic = shape_transforms.moment_at_center(new_schematic, self.sidelength)
diff = (schematic - new_schematic).any()
if self.allow_same:
change = True
if not diff:
text = ["no", "change"]
else:
if diff:
change = True
w = self.maybe_words_to_tensor(text)
schematic = self.maybe_hash_idm(torch.LongTensor(schematic))
new_schematic = self.maybe_hash_idm(torch.LongTensor(new_schematic))
if task_data["inverse"]:
return w, new_schematic, schematic
else:
return w, schematic, new_schematic
def __len__(self):
if self.debug > 0:
return self.debug
else:
return self.nexamples
if __name__ == "__main__":
from voxel_models.plot_voxels import SchematicPlotter
import visdom
import argparse
vis = visdom.Visdom(server="http://localhost")
sp = SchematicPlotter(vis)
parser = argparse.ArgumentParser()
parser.add_argument("--nexamples", type=int, default=100, help="size of epoch")
parser.add_argument("--sidelength", type=int, default=32, help="size of epoch")
parser.add_argument(
"--save_dict_file",
default="/private/home/aszlam/junk/word_modify_word_ids.pk",
help="where to save word dict",
)
parser.add_argument(
"--examples_for_dict",
type=int,
default=-1,
help="if bigger than 0, uses that many examples to build the word dict and save it",
)
parser.add_argument(
"--words_length", type=int, default=12, help="sentence pad length. FIXME?"
)
opts = parser.parse_args()
opts.allow_same = False
opts.max_meta = -1
opts.tform_weights = {
"thicker": 1.0,
"scale": 1.0,
"rotate": 1.0,
"replace_by_block": 1.0,
# 'replace_by_n': 1.0,
"replace_by_halfspace": 1.0,
"fill": 1.0,
}
opts.debug = False
M = ModifyData(opts)
def sample_and_draw():
text, old_schematic, new_schematic = M[0]
sp.drawMatplot(old_schematic)
sp.drawMatplot(new_schematic, title=" ".join(text))
return text, old_schematic, new_schematic
# for i in range(100):
# print(i)
# text, old_schematic, new_schematic = M[0]
# for i in range(10):
# a, b, c = sample_and_draw()
w2i = {}
i2w = {}
wcount = 0
for i in range(opts.examples_for_dict):
text, a, b = M[0]
for w in text:
if w2i.get(w) is None:
w2i[w] = wcount
i2w[wcount] = w
wcount += 1
print("new word! " + str(wcount) + " " + w)
if len(w2i) > 0:
with open(opts.save_dict_file, "wb") as f:
pickle.dump({"w2i": w2i, "i2w": i2w}, f)
| craftassist-master | python/craftassist/voxel_models/modify/shape_transform_dataloader.py |
import torch
import torch.utils.data
import torchvision
from box_ops import box_cxcywh_to_xyxy
from datasets.lvis import LvisDetectionBase
from pycocotools import mask as coco_mask
from pycocotools.coco import COCO
def convert_to_coco_api(ds):
coco_ds = COCO()
# annotation IDs need to start at 1, not 0, see torchvision issue #1530
ann_id = 1
dataset = {"images": [], "categories": [], "annotations": []}
categories = set()
for img_idx in range(len(ds)):
# find better way to get target
# targets = ds.get_annotations(img_idx)
img, targets = ds.get_in_coco_format(img_idx)
image_id = targets["image_id"].item()
img_dict = {}
img_dict["id"] = image_id
height, width = targets["orig_size"].tolist()
img_dict["height"] = height
img_dict["width"] = width
dataset["images"].append(img_dict)
bboxes = targets["boxes"]
# the boxes are in 0-1 format, in cxcywh format
# let's convert it into the format expected by COCO api
bboxes = box_cxcywh_to_xyxy(bboxes)
bboxes = bboxes * torch.tensor([width, height, width, height], dtype=torch.float32)
bboxes[:, 2:] -= bboxes[:, :2]
bboxes = bboxes.tolist()
labels = targets["labels"].tolist()
areas = targets["area"].tolist()
iscrowd = targets["iscrowd"].tolist()
if "masks" in targets:
masks = targets["masks"]
# make masks Fortran contiguous for coco_mask
masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
if "keypoints" in targets:
keypoints = targets["keypoints"]
keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
num_objs = len(bboxes)
for i in range(num_objs):
ann = {}
ann["image_id"] = image_id
ann["bbox"] = bboxes[i]
ann["category_id"] = labels[i]
categories.add(labels[i])
ann["area"] = areas[i]
ann["iscrowd"] = iscrowd[i]
ann["id"] = ann_id
if "masks" in targets:
ann["segmentation"] = coco_mask.encode(masks[i].numpy())
if "keypoints" in targets:
ann["keypoints"] = keypoints[i]
ann["num_keypoints"] = sum(k != 0 for k in keypoints[i][2::3])
dataset["annotations"].append(ann)
ann_id += 1
dataset["categories"] = [{"id": i} for i in sorted(categories)]
coco_ds.dataset = dataset
coco_ds.createIndex()
return coco_ds
def get_coco_api_from_dataset(dataset):
for _ in range(10):
# if isinstance(dataset, torchvision.datasets.CocoDetection):
# break
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
if isinstance(dataset, LvisDetectionBase):
return dataset.lvis
if isinstance(dataset, torchvision.datasets.CocoDetection):
return dataset.coco
return convert_to_coco_api(dataset)
class PrepareInstance(object):
CLASSES = (
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
)
def __call__(self, image, target):
anno = target["annotation"]
h, w = anno["size"]["height"], anno["size"]["width"]
boxes = []
classes = []
area = []
iscrowd = []
objects = anno["object"]
if not isinstance(objects, list):
objects = [objects]
for obj in objects:
bbox = obj["bndbox"]
bbox = [int(bbox[n]) - 1 for n in ["xmin", "ymin", "xmax", "ymax"]]
boxes.append(bbox)
classes.append(self.CLASSES.index(obj["name"]))
iscrowd.append(int(obj["difficult"]))
area.append((bbox[2] - bbox[0]) * (bbox[3] - bbox[1]))
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
classes = torch.as_tensor(classes)
area = torch.as_tensor(area)
iscrowd = torch.as_tensor(iscrowd)
image_id = anno["filename"][:-4]
image_id = torch.as_tensor([int(image_id)])
target = {}
target["boxes"] = boxes
target["labels"] = classes
target["image_id"] = image_id
# useful metadata
target["orig_size"] = torch.as_tensor([int(h), int(w)])
target["size"] = torch.as_tensor([int(h), int(w)])
# for conversion to coco api
target["area"] = area
target["iscrowd"] = iscrowd
return image, target
| craftassist-master | python/craftassist/voxel_models/detection-transformer/to_coco_api.py |
import random
import torch
import torchvision.transforms.functional as F
import torchvision.transforms as T
from torchvision.ops.misc import interpolate
from box_ops import box_xyxy_to_cxcywh
import PIL
def crop(image, target, region):
cropped_image = F.crop(image, *region)
target = target.copy()
i, j, h, w = region
# should we do something wrt the original size?
target["size"] = torch.tensor([h, w])
fields = ["labels", "area", "iscrowd"]
if "boxes" in target:
boxes = target["boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
target["boxes"] = cropped_boxes.reshape(-1, 4)
target["area"] = area
fields.append("boxes")
if "masks" in target:
# FIXME should we update the area here if there are no boxes?
target["masks"] = target["masks"][:, i : i + h, j : j + w]
fields.append("masks")
# remove elements for which the boxes or masks that have zero area
if "boxes" in target or "masks" in target:
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
if "boxes" in target:
cropped_boxes = target["boxes"].reshape(-1, 2, 2)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
else:
keep = target["masks"].flatten(1).any(1)
for field in fields:
target[field] = target[field][keep]
return cropped_image, target
def hflip(image, target):
flipped_image = F.hflip(image)
w, h = image.size
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor(
[w, 0, w, 0]
)
target["boxes"] = boxes
if "masks" in target:
target["masks"] = target["masks"].flip(-1)
return flipped_image, target
def resize(image, target, size, max_size=None):
# size can be min_size (scalar) or (w, h) tuple
def get_size_with_aspect_ratio(image_size, size, max_size=None):
w, h = image_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def get_size(image_size, size, max_size=None):
if isinstance(size, (list, tuple)):
return size[::-1]
else:
return get_size_with_aspect_ratio(image_size, size, max_size)
size = get_size(image.size, size, max_size)
rescaled_image = F.resize(image, size)
if target is None:
return rescaled_image, None
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
ratio_width, ratio_height = ratios
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
scaled_boxes = boxes * torch.as_tensor(
[ratio_width, ratio_height, ratio_width, ratio_height]
)
target["boxes"] = scaled_boxes
area = target["area"]
scaled_area = area * (ratio_width * ratio_height)
target["area"] = scaled_area
h, w = size
target["size"] = torch.tensor([h, w])
if "masks" in target:
target["masks"] = (
interpolate(target["masks"][:, None].float(), size, mode="nearest")[:, 0] > 0.5
)
return rescaled_image, target
def pad(image, target, padding):
# assumes that we only pad on the bottom right corners
padded_image = F.pad(image, (0, 0, padding[0], padding[1]))
if target is None:
return padded_image, None
target = target.copy()
# should we do something wrt the original size?
target["size"] = torch.tensor(padded_image[::-1])
if "masks" in target:
target["masks"] = torch.nn.functional.pad(target["masks"], (0, padding[0], 0, padding[1]))
return padded_image, target
class RandomCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, target):
region = T.RandomCrop.get_params(img, self.size)
return crop(img, target, region)
class RandomSizeCrop(object):
def __init__(self, min_size: int, max_size: int):
self.min_size = min_size
self.max_size = max_size
def __call__(self, img: PIL.Image.Image, target: dict):
w = random.randint(self.min_size, min(img.width, self.max_size))
h = random.randint(self.min_size, min(img.height, self.max_size))
region = T.RandomCrop.get_params(img, [h, w])
return crop(img, target, region)
class CenterCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, target):
image_width, image_height = img.size
crop_height, crop_width = self.size
crop_top = int(round((image_height - crop_height) / 2.0))
crop_left = int(round((image_width - crop_width) / 2.0))
return crop(img, target, (crop_top, crop_left, crop_height, crop_width))
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return hflip(img, target)
return img, target
class RandomResize(object):
def __init__(self, sizes, max_size=None):
assert isinstance(sizes, (list, tuple))
self.sizes = sizes
self.max_size = max_size
def __call__(self, img, target=None):
size = random.choice(self.sizes)
return resize(img, target, size, self.max_size)
class RandomPad(object):
def __init__(self, max_pad):
self.max_pad = max_pad
def __call__(self, img, target):
pad_x = random.randint(0, self.max_pad)
pad_y = random.randint(0, self.max_pad)
return pad(img, target, (pad_x, pad_y))
class RandomSelect(object):
"""
Randomly selects between transforms1 and transforms2,
with probability p for transforms1 and (1 - p) for transforms2
"""
def __init__(self, transforms1, transforms2, p=0.5):
self.transforms1 = transforms1
self.transforms2 = transforms2
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return self.transforms1(img, target)
return self.transforms2(img, target)
class ToTensor(object):
def __call__(self, img, target):
return F.to_tensor(img), target
class RandomErasing(object):
def __init__(self, *args, **kwargs):
self.eraser = T.RandomErasing(*args, **kwargs)
def __call__(self, img, target):
return self.eraser(img), target
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, target=None):
image = F.normalize(image, mean=self.mean, std=self.std)
if target is None:
return image, None
target = target.copy()
h, w = image.shape[-2:]
if "boxes" in target:
boxes = target["boxes"]
boxes = box_xyxy_to_cxcywh(boxes)
boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
target["boxes"] = boxes
return image, target
class RemoveDifficult(object):
def __init__(self, enabled=False):
self.remove_difficult = enabled
def __call__(self, image, target=None):
if target is None:
return image, None
target = target.copy()
keep = ~target["iscrowd"].to(torch.bool) | (not self.remove_difficult)
if "boxes" in target:
target["boxes"] = target["boxes"][keep]
target["labels"] = target["labels"][keep]
target["iscrowd"] = target["iscrowd"][keep]
return image, target
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
| craftassist-master | python/craftassist/voxel_models/detection-transformer/transforms.py |
import argparse
import builtins
import datetime
import json
import os
import random
import time
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, DistributedSampler
from torch.optim.lr_scheduler import StepLR, MultiStepLR
import datasets
import to_coco_api
import utils
from datasets import build_dataset
from engine import evaluate, train_one_epoch
from models import build_model
def get_args_parser():
parser = argparse.ArgumentParser("Set transformer detector", add_help=False)
parser.add_argument("--lr", default=1e-4, type=float)
parser.add_argument("--lr_backbone", default=1e-5, type=float)
parser.add_argument("--batch_size", default=2, type=int)
parser.add_argument("--weight_decay", default=1e-4, type=float)
parser.add_argument("--epochs", default=300, type=int)
parser.add_argument("--lr_drop", default=200, type=int)
parser.add_argument("--optimizer", default="adam", type=str)
parser.add_argument(
"--clip_max_norm", default=0.1, type=float, help="gradient clipping max norm"
)
parser.add_argument(
"--eval_skip", default=1, type=int, help='do evaluation every "eval_skip" frames'
)
parser.add_argument("--schedule", default="step", type=str, choices=("step", "multistep"))
# model params
parser.add_argument("--model_file", default="model_parallel")
parser.add_argument(
"--mask_model", default="none", type=str, choices=("none", "smallconv", "v2")
)
parser.add_argument("--dropout", default=0.1, type=float)
parser.add_argument("--nheads", default=8, type=int)
parser.add_argument("--enc_layers", default=6, type=int)
parser.add_argument("--dec_layers", default=6, type=int)
parser.add_argument("--dim_feedforward", default=248, type=int)
parser.add_argument("--hidden_dim", default=384, type=int)
parser.add_argument(
"--set_loss",
default="hungarian",
type=str,
choices=("sequential", "hungarian", "lexicographical"),
)
parser.add_argument("--set_cost_class", default=1, type=float)
parser.add_argument("--set_cost_bbox", default=5, type=float)
parser.add_argument("--set_cost_giou", default=1, type=float)
parser.add_argument("--mask_loss_coef", default=1, type=float)
parser.add_argument("--dice_loss_coef", default=1, type=float)
parser.add_argument("--bbox_loss_coef", default=5, type=float)
parser.add_argument("--giou_loss_coef", default=1, type=float)
parser.add_argument("--backbone", default="semseg", type=str)
# parser.add_argument('--backbone', default='resnet50', type=str)
parser.add_argument("--position_embedding", default="v2", type=str, choices=("v1", "v2", "v3"))
parser.add_argument("--resample_features_to_size", default=-1, type=int)
parser.add_argument("--eos_coef", default=0.1, type=float)
parser.add_argument("--num_queries", default=99, type=int)
parser.add_argument("--pre_norm", action="store_true")
parser.add_argument("--aux_loss", action="store_true")
parser.add_argument("--pass_pos_and_query", action="store_true")
parser.add_argument("--dilation", action="store_true")
# dataset parameters
parser.add_argument("--dataset_file", default="coco")
parser.add_argument("--remove_difficult", action="store_true")
parser.add_argument(
"--crowdfree", action="store_true", help="Remove crowd images from training on COCO"
)
parser.add_argument("--masks", action="store_true")
parser.add_argument("--output-dir", default="", help="path where to save, empty for no saving")
parser.add_argument("--device", default="cuda", help="device to use for training / testing")
parser.add_argument("--seed", default=42, type=int)
parser.add_argument("--resume", default="", help="resume from checkpoint")
parser.add_argument("--start-epoch", default=0, type=int, metavar="N", help="start epoch")
parser.add_argument("--eval", action="store_true")
parser.add_argument("--num_workers", default=2, type=int)
# distributed training parameters
parser.add_argument(
"--world-size", default=1, type=int, help="number of distributed processes"
)
parser.add_argument(
"--dist-url", default="env://", help="url used to set up distributed training"
)
return parser
def main(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
if args.mask_model != "none":
args.masks = True
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
model, criterion, postprocessor = build_model(args)
postprocessor.rescale_to_orig_size = True # for evaluation
model.to(device)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = builtins.sum(p.numel() for p in model.parameters() if p.requires_grad)
print("number of params:", n_parameters)
# optimizer = torch.optim.Adam(model.parameters())
param_dicts = [
{"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" not in n]},
{
"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n],
"lr": args.lr_backbone,
},
]
if args.optimizer == "sgd":
optimizer = torch.optim.SGD(
param_dicts, lr=args.lr, momentum=0.9, weight_decay=args.weight_decay
)
elif args.optimizer in ["adam", "adamw"]:
optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, weight_decay=args.weight_decay)
else:
raise RuntimeError(f"Unsupported optimizer {args.optimizer}")
if args.schedule == "step":
lr_scheduler = StepLR(optimizer, args.lr_drop)
elif args.schedule == "multistep":
milestones = list(range(args.lr_drop, args.epochs, 50))
lr_scheduler = MultiStepLR(optimizer, gamma=0.5, milestones=milestones)
dataset_train = build_dataset(image_set="trainval", args=args)
dataset_val = build_dataset(image_set="test", args=args)
if args.distributed:
sampler_train = DistributedSampler(dataset_train)
sampler_val = DistributedSampler(dataset_val, shuffle=False)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
batch_sampler_train = torch.utils.data.BatchSampler(
sampler_train, args.batch_size, drop_last=True
)
data_loader_train = DataLoader(
dataset_train,
batch_sampler=batch_sampler_train,
collate_fn=utils.collate_fn,
num_workers=args.num_workers,
)
data_loader_val = DataLoader(
dataset_val,
args.batch_size,
sampler=sampler_val,
drop_last=False,
collate_fn=utils.collate_fn,
num_workers=args.num_workers,
)
if args.dataset_file == "coco_panoptic":
# We also evaluate AP during panoptic training, on original coco DS
coco_val = datasets.coco.build("val", args)
base_ds = to_coco_api.get_coco_api_from_dataset(coco_val)
else:
base_ds = None # to_coco_api.get_coco_api_from_dataset(dataset_val)
output_dir = Path(args.output_dir)
if args.resume:
checkpoint = torch.load(args.resume, map_location="cpu")
model_without_ddp.load_state_dict(checkpoint["model"])
optimizer.load_state_dict(checkpoint["optimizer"])
lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
args.start_epoch = checkpoint["epoch"] + 1
if args.eval:
test_stats, coco_evaluator = evaluate(
model,
criterion,
postprocessor,
data_loader_val,
base_ds,
device,
eval_bbox=True,
eval_masks=args.masks,
)
if args.output_dir:
utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth")
return
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
sampler_train.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, data_loader_train, optimizer, device, epoch, args.clip_max_norm
)
lr_scheduler.step()
if args.output_dir:
checkpoint_paths = [output_dir / "checkpoint.pth"]
# extra checkpoint before LR drop and every 100 epochs
if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 100 == 0:
checkpoint_paths.append(output_dir / f"checkpoint{epoch:04}.pth")
for checkpoint_path in checkpoint_paths:
utils.save_on_master(
{
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"epoch": epoch,
"args": args,
},
checkpoint_path,
)
# if epoch % args.eval_skip == 0:
# test_stats, coco_evaluator = evaluate(
# model, criterion, postprocessor, data_loader_val, base_ds, device, eval_bbox=True, eval_masks=args.masks
# )
# else:
# test_stats, coco_evaluator = {}, None
test_stats, coco_evaluator = {}, None
log_stats = {
**{f"train_{k}": v for k, v in train_stats.items()},
**{f"test_{k}": v for k, v in test_stats.items()},
"n_parameters": n_parameters,
}
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
# for evaluation logs
if coco_evaluator is not None:
os.makedirs(os.path.join(args.output_dir, "eval"), exist_ok=True)
if "bbox" in coco_evaluator.coco_eval:
filenames = ["latest.pth"]
if epoch % 50 == 0:
filenames.append(f"{epoch:03}.pth")
for name in filenames:
torch.save(
coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval" / name
)
with (output_dir / "log_tb.txt").open("a") as f:
f.write(f"TORCHBOARD_METRICS[epoch] = {epoch}\n")
for k, v in vars(args).items():
f.write(f"TORCHBOARD_METRICS[{k}] = {v}")
for key in log_stats:
v = log_stats[key]
if isinstance(v, list):
for i, vi in enumerate(v):
f.write(f"TORCHBOARD_METRICS[{key}_{i}] = {vi}\n")
else:
f.write(f"TORCHBOARD_METRICS[{key}] = {v}\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print("Training time {}".format(total_time_str))
if __name__ == "__main__":
parser = argparse.ArgumentParser("Set transformer detector", parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
os.makedirs(args.output_dir, exist_ok=True)
main(args)
| craftassist-master | python/craftassist/voxel_models/detection-transformer/detection.py |
import torch
def box_area(boxes):
"""
Computes the area of a set of bounding boxes, which are specified by its
(x1, y1, z1, x2, y2, z2) coordinates.
Arguments:
boxes (Tensor[N, 6]): boxes for which the area will be computed. They
are expected to be in (x1, y1, z1, x2, y2, z2) format
Returns:
area (Tensor[N]): area for each box
"""
return (boxes[:, 3] - boxes[:, 0]) * (boxes[:, 4] - boxes[:, 1]) * (boxes[:, 5] - boxes[:, 2])
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
def box_cxcyczwhd_to_xyzxyz(x):
x_c, y_c, z_c, w, h, d = x.unbind(-1)
b = [
(x_c - 0.5 * w),
(y_c - 0.5 * h),
(z_c - 0.5 * d),
(x_c + 0.5 * w),
(y_c + 0.5 * h),
(z_c + 0.5 * d),
]
return torch.stack(b, dim=-1)
def box_xyzxyz_to_cxcyczwhd(x):
x0, y0, x1, y1, z0, z1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2, (z0 + z1) / 2, (x1 - x0), (y1 - y0), (z1 - z0)]
return torch.stack(b, dim=-1)
# modified from torchvision to also return the union
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :3], boxes2[:, :3]) # [N,M,3]
rb = torch.min(boxes1[:, None, 3:], boxes2[:, 3:]) # [N,M,3]
whd = (rb - lt).clamp(min=0) # [N,M,3]
inter = whd[:, :, 0] * whd[:, :, 1] * whd[:, :, 2] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 3:] >= boxes1[:, :3]).all()
assert (boxes2[:, 3:] >= boxes2[:, :3]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :3], boxes2[:, :3])
rb = torch.max(boxes1[:, None, 3:], boxes2[:, 3:])
whd = (rb - lt).clamp(min=0) # [N,M,2]
area = whd[:, :, 0] * whd[:, :, 1] * whd[:, :, 2]
return iou - (area - union) / area
def masks_to_boxes(masks):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float)
x = torch.arange(0, w, dtype=torch.float)
y, x = torch.meshgrid(y, x)
x_mask = masks * x.unsqueeze(0)
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = masks * y.unsqueeze(0)
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack(
[
x_min,
y_min,
torch.full(x_max.shape, 0, dtype=torch.float),
x_max,
y_max,
torch.full(x_max.shape, 50, dtype=torch.float),
],
1,
)
| craftassist-master | python/craftassist/voxel_models/detection-transformer/box_ops.py |
import argparse
import os
import uuid
from pathlib import Path
import detection
import submitit
def parse_args():
detection_parser = detection.get_args_parser()
parser = argparse.ArgumentParser("Submitit for detection", parents=[detection_parser])
parser.add_argument(
"--partition", default="learnfair", type=str, help="Partition where to submit"
)
parser.add_argument(
"--ngpus", default=8, type=int, help="Number of gpus to request on each node"
)
parser.add_argument("--nodes", default=4, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=60, type=int, help="Duration of the job")
parser.add_argument(
"--job_dir", default="", type=str, help="Job dir. Leave empty for automatic."
)
parser.add_argument("--use_volta32", action="store_true", help="Big models? Use this")
parser.add_argument(
"--mail", default="", type=str, help="Email this user when the job finishes if specified"
)
parser.add_argument(
"--comment",
default="",
type=str,
help="Comment to pass to scheduler, e.g. priority message",
)
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import detection
self._setup_gpu_args()
detection.main(self.args)
def checkpoint(self):
import os
import submitit
self.args.dist_url = get_init_file().as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, max_num_timeout=30)
# executor = submitit.LocalExecutor(folder=get_shared_folder() / "%j")
# cluster setup is defined by environment variables
num_gpus_per_node = args.ngpus
nodes = args.nodes
partition = args.partition
timeout_min = args.timeout
kwargs = {}
if args.use_volta32:
kwargs["constraint"] = "volta32gb"
if args.comment:
kwargs["comment"] = args.comment
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
# Below are cluster dependent parameters
hostgroup="fblearner_ash_bigsur_fair",
partition=partition,
signal_delay_s=120,
**kwargs,
)
executor.update_parameters(name="detectransformer")
if args.mail:
executor.update_parameters(
additional_parameters={"mail-user": args.mail, "mail-type": "END"}
)
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
print("Submitted job_id:", job.job_id)
# job.task(0).result()
if __name__ == "__main__":
main()
| craftassist-master | python/craftassist/voxel_models/detection-transformer/run_with_submitit.py |
import unittest
import torch
import box_ops
class Tester(unittest.TestCase):
def test_box_cxcywh_to_xyxy(self):
t = torch.rand(10, 4)
r = box_ops.box_xyxy_to_cxcywh(box_ops.box_cxcywh_to_xyxy(t))
self.assertTrue((t - r).abs().max() < 1e-5)
if __name__ == "__main__":
unittest.main()
| craftassist-master | python/craftassist/voxel_models/detection-transformer/test_box_ops.py |
import copy
import datetime
from collections import OrderedDict, defaultdict
import numpy as np
import torch
import torch._six
import pycocotools.mask as mask_util
import utils
from datasets.lvis import LVIS
#################################################################
# From LVIS, with following changes:
# * fixed LVISEval constructor to accept empty dt
# * Removed logger
# * LVIS results supporst numpy inputs
#################################################################
class Params:
def __init__(self, iou_type):
"""Params for LVIS evaluation API."""
self.img_ids = []
self.cat_ids = []
# np.arange causes trouble. the data point on arange is slightly
# larger than the true value
self.iou_thrs = np.linspace(0.5, 0.95, np.round((0.95 - 0.5) / 0.05) + 1, endpoint=True)
self.rec_thrs = np.linspace(0.0, 1.00, np.round((1.00 - 0.0) / 0.01) + 1, endpoint=True)
self.max_dets = 300
self.area_rng = [
[0 ** 2, 1e5 ** 2],
[0 ** 2, 32 ** 2],
[32 ** 2, 96 ** 2],
[96 ** 2, 1e5 ** 2],
]
self.area_rng_lbl = ["all", "small", "medium", "large"]
self.use_cats = 1
# We bin categories in three bins based how many images of the training
# set the category is present in.
# r: Rare : < 10
# c: Common : >= 10 and < 100
# f: Frequent: >= 100
self.img_count_lbl = ["r", "c", "f"]
self.iou_type = iou_type
class LVISResults(LVIS):
def __init__(self, lvis_gt, results, max_dets=300):
"""Constructor for LVIS results.
Args:
lvis_gt (LVIS class instance, or str containing path of
annotation file)
results (str containing path of result file or a list of dicts)
max_dets (int): max number of detections per image. The official
value of max_dets for LVIS is 300.
"""
super(LVISResults, self).__init__()
assert isinstance(lvis_gt, LVIS)
self.dataset["images"] = [img for img in lvis_gt.dataset["images"]]
if isinstance(results, str):
result_anns = self._load_json(results)
elif type(results) == np.ndarray:
result_anns = self.loadNumpyAnnotations(results)
else:
result_anns = results
if max_dets >= 0:
result_anns = self.limit_dets_per_image(result_anns, max_dets)
if len(result_anns) > 0 and "bbox" in result_anns[0]:
self.dataset["categories"] = copy.deepcopy(lvis_gt.dataset["categories"])
for id, ann in enumerate(result_anns):
x1, y1, w, h = ann["bbox"]
x2 = x1 + w
y2 = y1 + h
if "segmentation" not in ann:
ann["segmentation"] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann["area"] = w * h
ann["id"] = id + 1
elif len(result_anns) > 0 and "segmentation" in result_anns[0]:
self.dataset["categories"] = copy.deepcopy(lvis_gt.dataset["categories"])
for id, ann in enumerate(result_anns):
# Only support compressed RLE format as segmentation results
ann["area"] = mask_util.area(ann["segmentation"])
if "bbox" not in ann:
ann["bbox"] = mask_util.toBbox(ann["segmentation"])
ann["id"] = id + 1
self.dataset["annotations"] = result_anns
self._create_index()
# #FIXME: disabling this check for now
# img_ids_in_result = [ann["image_id"] for ann in result_anns]
# assert set(img_ids_in_result) == (
# set(img_ids_in_result) & set(self.get_img_ids())
# ), "Results do not correspond to current LVIS set."
def limit_dets_per_image(self, anns, max_dets):
img_ann = defaultdict(list)
for ann in anns:
img_ann[ann["image_id"]].append(ann)
for img_id, _anns in img_ann.items():
if len(_anns) <= max_dets:
continue
_anns = sorted(_anns, key=lambda ann: ann["score"], reverse=True)
img_ann[img_id] = _anns[:max_dets]
return [ann for anns in img_ann.values() for ann in anns]
def get_top_results(self, img_id, score_thrs):
ann_ids = self.get_ann_ids(img_ids=[img_id])
anns = self.load_anns(ann_ids)
return list(filter(lambda ann: ann["score"] > score_thrs, anns))
class LVISEval:
def __init__(self, lvis_gt, lvis_dt=None, iou_type="segm"):
"""Constructor for LVISEval.
Args:
lvis_gt (LVIS class instance, or str containing path of annotation file)
lvis_dt (LVISResult class instance, or str containing path of result file,
or list of dict)
iou_type (str): segm or bbox evaluation
"""
if iou_type not in ["bbox", "segm"]:
raise ValueError("iou_type: {} is not supported.".format(iou_type))
if isinstance(lvis_gt, LVIS):
self.lvis_gt = lvis_gt
elif isinstance(lvis_gt, str):
self.lvis_gt = LVIS(lvis_gt)
else:
raise TypeError("Unsupported type {} of lvis_gt.".format(lvis_gt))
if isinstance(lvis_dt, LVISResults):
self.lvis_dt = lvis_dt
elif isinstance(lvis_dt, (str, list)):
self.lvis_dt = LVISResults(self.lvis_gt, lvis_dt)
elif lvis_dt is not None:
raise TypeError("Unsupported type {} of lvis_dt.".format(lvis_dt))
# per-image per-category evaluation results
self.eval_imgs = defaultdict(list)
self.eval = {} # accumulated evaluation results
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self.params = Params(iou_type=iou_type) # parameters
self.results = OrderedDict()
self.stats = []
self.ious = {} # ious between all gts and dts
self.params.img_ids = sorted(self.lvis_gt.get_img_ids())
self.params.cat_ids = sorted(self.lvis_gt.get_cat_ids())
def _to_mask(self, anns, lvis):
for ann in anns:
rle = lvis.ann_to_rle(ann)
ann["segmentation"] = rle
def _prepare(self):
"""Prepare self._gts and self._dts for evaluation based on params."""
cat_ids = self.params.cat_ids if self.params.cat_ids else None
gts = self.lvis_gt.load_anns(
self.lvis_gt.get_ann_ids(img_ids=self.params.img_ids, cat_ids=cat_ids)
)
dts = self.lvis_dt.load_anns(
self.lvis_dt.get_ann_ids(img_ids=self.params.img_ids, cat_ids=cat_ids)
)
# convert ground truth to mask if iou_type == 'segm'
if self.params.iou_type == "segm":
self._to_mask(gts, self.lvis_gt)
self._to_mask(dts, self.lvis_dt)
# set ignore flag
for gt in gts:
if "ignore" not in gt:
gt["ignore"] = 0
for gt in gts:
self._gts[gt["image_id"], gt["category_id"]].append(gt)
# For federated dataset evaluation we will filter out all dt for an
# image which belong to categories not present in gt and not present in
# the negative list for an image. In other words detector is not penalized
# for categories about which we don't have gt information about their
# presence or absence in an image.
img_data = self.lvis_gt.load_imgs(ids=self.params.img_ids)
# per image map of categories not present in image
img_nl = {d["id"]: d["neg_category_ids"] for d in img_data}
# per image list of categories present in image
img_pl = defaultdict(set)
for ann in gts:
img_pl[ann["image_id"]].add(ann["category_id"])
# per image map of categoires which have missing gt. For these
# categories we don't penalize the detector for flase positives.
self.img_nel = {d["id"]: d["not_exhaustive_category_ids"] for d in img_data}
for dt in dts:
img_id, cat_id = dt["image_id"], dt["category_id"]
if cat_id not in img_nl[img_id] and cat_id not in img_pl[img_id]:
continue
self._dts[img_id, cat_id].append(dt)
self.freq_groups = self._prepare_freq_group()
def _prepare_freq_group(self):
freq_groups = [[] for _ in self.params.img_count_lbl]
cat_data = self.lvis_gt.load_cats(self.params.cat_ids)
for idx, _cat_data in enumerate(cat_data):
frequency = _cat_data["frequency"]
freq_groups[self.params.img_count_lbl.index(frequency)].append(idx)
return freq_groups
def evaluate(self):
"""
Run per image evaluation on given images and store results
(a list of dict) in self.eval_imgs.
"""
self.params.img_ids = list(np.unique(self.params.img_ids))
if self.params.use_cats:
cat_ids = self.params.cat_ids
else:
cat_ids = [-1]
self._prepare()
self.ious = {
(img_id, cat_id): self.compute_iou(img_id, cat_id)
for img_id in self.params.img_ids
for cat_id in cat_ids
}
# loop through images, area range, max detection number
self.eval_imgs = [
self.evaluate_img(img_id, cat_id, area_rng)
for cat_id in cat_ids
for area_rng in self.params.area_rng
for img_id in self.params.img_ids
]
def _get_gt_dt(self, img_id, cat_id):
"""Create gt, dt which are list of anns/dets. If use_cats is true
only anns/dets corresponding to tuple (img_id, cat_id) will be
used. Else, all anns/dets in image are used and cat_id is not used.
"""
if self.params.use_cats:
gt = self._gts[img_id, cat_id]
dt = self._dts[img_id, cat_id]
else:
gt = [_ann for _cat_id in self.params.cat_ids for _ann in self._gts[img_id, cat_id]]
dt = [_ann for _cat_id in self.params.cat_ids for _ann in self._dts[img_id, cat_id]]
return gt, dt
def compute_iou(self, img_id, cat_id):
gt, dt = self._get_gt_dt(img_id, cat_id)
if len(gt) == 0 and len(dt) == 0:
return []
# Sort detections in decreasing order of score.
idx = np.argsort([-d["score"] for d in dt], kind="mergesort")
dt = [dt[i] for i in idx]
iscrowd = [int(False)] * len(gt)
if self.params.iou_type == "segm":
ann_type = "segmentation"
elif self.params.iou_type == "bbox":
ann_type = "bbox"
else:
raise ValueError("Unknown iou_type for iou computation.")
gt = [g[ann_type] for g in gt]
dt = [d[ann_type] for d in dt]
# compute iou between each dt and gt region
# will return array of shape len(dt), len(gt)
ious = mask_util.iou(dt, gt, iscrowd)
return ious
def evaluate_img(self, img_id, cat_id, area_rng):
"""Perform evaluation for single category and image."""
gt, dt = self._get_gt_dt(img_id, cat_id)
if len(gt) == 0 and len(dt) == 0:
return None
# Add another filed _ignore to only consider anns based on area range.
for g in gt:
if g["ignore"] or (g["area"] < area_rng[0] or g["area"] > area_rng[1]):
g["_ignore"] = 1
else:
g["_ignore"] = 0
# Sort gt ignore last
gt_idx = np.argsort([g["_ignore"] for g in gt], kind="mergesort")
gt = [gt[i] for i in gt_idx]
# Sort dt highest score first
dt_idx = np.argsort([-d["score"] for d in dt], kind="mergesort")
dt = [dt[i] for i in dt_idx]
# load computed ious
ious = (
self.ious[img_id, cat_id][:, gt_idx]
if len(self.ious[img_id, cat_id]) > 0
else self.ious[img_id, cat_id]
)
num_thrs = len(self.params.iou_thrs)
num_gt = len(gt)
num_dt = len(dt)
# Array to store the "id" of the matched dt/gt
gt_m = np.zeros((num_thrs, num_gt))
dt_m = np.zeros((num_thrs, num_dt))
gt_ig = np.array([g["_ignore"] for g in gt])
dt_ig = np.zeros((num_thrs, num_dt))
for iou_thr_idx, iou_thr in enumerate(self.params.iou_thrs):
if len(ious) == 0:
break
for dt_idx, _dt in enumerate(dt):
iou = min([iou_thr, 1 - 1e-10])
# information about best match so far (m=-1 -> unmatched)
# store the gt_idx which matched for _dt
m = -1
for gt_idx, _ in enumerate(gt):
# if this gt already matched continue
if gt_m[iou_thr_idx, gt_idx] > 0:
continue
# if _dt matched to reg gt, and on ignore gt, stop
if m > -1 and gt_ig[m] == 0 and gt_ig[gt_idx] == 1:
break
# continue to next gt unless better match made
if ious[dt_idx, gt_idx] < iou:
continue
# if match successful and best so far, store appropriately
iou = ious[dt_idx, gt_idx]
m = gt_idx
# No match found for _dt, go to next _dt
if m == -1:
continue
# if gt to ignore for some reason update dt_ig.
# Should not be used in evaluation.
dt_ig[iou_thr_idx, dt_idx] = gt_ig[m]
# _dt match found, update gt_m, and dt_m with "id"
dt_m[iou_thr_idx, dt_idx] = gt[m]["id"]
gt_m[iou_thr_idx, m] = _dt["id"]
# For LVIS we will ignore any unmatched detection if that category was
# not exhaustively annotated in gt.
dt_ig_mask = [
d["area"] < area_rng[0]
or d["area"] > area_rng[1]
or d["category_id"] in self.img_nel[d["image_id"]]
for d in dt
]
dt_ig_mask = np.array(dt_ig_mask).reshape((1, num_dt)) # 1 X num_dt
dt_ig_mask = np.repeat(dt_ig_mask, num_thrs, 0) # num_thrs X num_dt
# Based on dt_ig_mask ignore any unmatched detection by updating dt_ig
dt_ig = np.logical_or(dt_ig, np.logical_and(dt_m == 0, dt_ig_mask))
# store results for given image and category
return {
"image_id": img_id,
"category_id": cat_id,
"area_rng": area_rng,
"dt_ids": [d["id"] for d in dt],
"gt_ids": [g["id"] for g in gt],
"dt_matches": dt_m,
"gt_matches": gt_m,
"dt_scores": [d["score"] for d in dt],
"gt_ignore": gt_ig,
"dt_ignore": dt_ig,
}
def accumulate(self):
"""Accumulate per image evaluation results and store the result in
self.eval.
"""
if not self.eval_imgs:
print("Warning: Please run evaluate first.")
if self.params.use_cats:
cat_ids = self.params.cat_ids
else:
cat_ids = [-1]
num_thrs = len(self.params.iou_thrs)
num_recalls = len(self.params.rec_thrs)
num_cats = len(cat_ids)
num_area_rngs = len(self.params.area_rng)
num_imgs = len(self.params.img_ids)
# -1 for absent categories
precision = -np.ones((num_thrs, num_recalls, num_cats, num_area_rngs))
recall = -np.ones((num_thrs, num_cats, num_area_rngs))
# Initialize dt_pointers
dt_pointers = {}
for cat_idx in range(num_cats):
dt_pointers[cat_idx] = {}
for area_idx in range(num_area_rngs):
dt_pointers[cat_idx][area_idx] = {}
# Per category evaluation
for cat_idx in range(num_cats):
Nk = cat_idx * num_area_rngs * num_imgs
for area_idx in range(num_area_rngs):
Na = area_idx * num_imgs
E = [self.eval_imgs[Nk + Na + img_idx] for img_idx in range(num_imgs)]
# Remove elements which are None
E = [e for e in E if e is not None]
if len(E) == 0:
continue
# Append all scores: shape (N,)
dt_scores = np.concatenate([e["dt_scores"] for e in E], axis=0)
dt_ids = np.concatenate([e["dt_ids"] for e in E], axis=0)
dt_idx = np.argsort(-dt_scores, kind="mergesort")
dt_scores = dt_scores[dt_idx]
dt_ids = dt_ids[dt_idx]
dt_m = np.concatenate([e["dt_matches"] for e in E], axis=1)[:, dt_idx]
dt_ig = np.concatenate([e["dt_ignore"] for e in E], axis=1)[:, dt_idx]
gt_ig = np.concatenate([e["gt_ignore"] for e in E])
# num gt anns to consider
num_gt = np.count_nonzero(gt_ig == 0)
if num_gt == 0:
continue
tps = np.logical_and(dt_m, np.logical_not(dt_ig))
fps = np.logical_and(np.logical_not(dt_m), np.logical_not(dt_ig))
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
dt_pointers[cat_idx][area_idx] = {"dt_ids": dt_ids, "tps": tps, "fps": fps}
for iou_thr_idx, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
num_tp = len(tp)
rc = tp / num_gt
if num_tp:
recall[iou_thr_idx, cat_idx, area_idx] = rc[-1]
else:
recall[iou_thr_idx, cat_idx, area_idx] = 0
# np.spacing(1) ~= eps
pr = tp / (fp + tp + np.spacing(1))
pr = pr.tolist()
# Replace each precision value with the maximum precision
# value to the right of that recall level. This ensures
# that the calculated AP value will be less suspectable
# to small variations in the ranking.
for i in range(num_tp - 1, 0, -1):
if pr[i] > pr[i - 1]:
pr[i - 1] = pr[i]
rec_thrs_insert_idx = np.searchsorted(rc, self.params.rec_thrs, side="left")
pr_at_recall = [0.0] * num_recalls
try:
for _idx, pr_idx in enumerate(rec_thrs_insert_idx):
pr_at_recall[_idx] = pr[pr_idx]
except Exception:
pass
precision[iou_thr_idx, :, cat_idx, area_idx] = np.array(pr_at_recall)
self.eval = {
"params": self.params,
"counts": [num_thrs, num_recalls, num_cats, num_area_rngs],
"date": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"precision": precision,
"recall": recall,
"dt_pointers": dt_pointers,
}
def _summarize(self, summary_type, iou_thr=None, area_rng="all", freq_group_idx=None):
aidx = [
idx for idx, _area_rng in enumerate(self.params.area_rng_lbl) if _area_rng == area_rng
]
if summary_type == "ap":
s = self.eval["precision"]
if iou_thr is not None:
tidx = np.where(iou_thr == self.params.iou_thrs)[0]
s = s[tidx]
if freq_group_idx is not None:
s = s[:, :, self.freq_groups[freq_group_idx], aidx]
else:
s = s[:, :, :, aidx]
else:
s = self.eval["recall"]
if iou_thr is not None:
tidx = np.where(iou_thr == self.params.iou_thrs)[0]
s = s[tidx]
s = s[:, :, aidx]
if len(s[s > -1]) == 0:
mean_s = -1
else:
mean_s = np.mean(s[s > -1])
return mean_s
def summarize(self):
"""Compute and display summary metrics for evaluation results."""
if not self.eval:
raise RuntimeError("Please run accumulate() first.")
max_dets = self.params.max_dets
self.results["AP"] = self._summarize("ap")
self.results["AP50"] = self._summarize("ap", iou_thr=0.50)
self.results["AP75"] = self._summarize("ap", iou_thr=0.75)
self.results["APs"] = self._summarize("ap", area_rng="small")
self.results["APm"] = self._summarize("ap", area_rng="medium")
self.results["APl"] = self._summarize("ap", area_rng="large")
self.results["APr"] = self._summarize("ap", freq_group_idx=0)
self.results["APc"] = self._summarize("ap", freq_group_idx=1)
self.results["APf"] = self._summarize("ap", freq_group_idx=2)
self.stats = np.zeros((9,))
self.stats[0] = self._summarize("ap")
self.stats[1] = self._summarize("ap", iou_thr=0.50)
self.stats[2] = self._summarize("ap", iou_thr=0.75)
self.stats[3] = self._summarize("ap", area_rng="small")
self.stats[4] = self._summarize("ap", area_rng="medium")
self.stats[5] = self._summarize("ap", area_rng="large")
self.stats[6] = self._summarize("ap", freq_group_idx=0)
self.stats[7] = self._summarize("ap", freq_group_idx=1)
self.stats[8] = self._summarize("ap", freq_group_idx=2)
key = "AR@{}".format(max_dets)
self.results[key] = self._summarize("ar")
for area_rng in ["small", "medium", "large"]:
key = "AR{}@{}".format(area_rng[0], max_dets)
self.results[key] = self._summarize("ar", area_rng=area_rng)
self.print_results()
def run(self):
"""Wrapper function which calculates the results."""
self.evaluate()
self.accumulate()
self.summarize()
def print_results(self):
template = (
" {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} catIds={:>3s}] = {:0.3f}"
)
for key, value in self.results.items():
max_dets = self.params.max_dets
if "AP" in key:
title = "Average Precision"
_type = "(AP)"
else:
title = "Average Recall"
_type = "(AR)"
if len(key) > 2 and key[2].isdigit():
iou_thr = float(key[2:]) / 100
iou = "{:0.2f}".format(iou_thr)
else:
iou = "{:0.2f}:{:0.2f}".format(self.params.iou_thrs[0], self.params.iou_thrs[-1])
if len(key) > 2 and key[2] in ["r", "c", "f"]:
cat_group_name = key[2]
else:
cat_group_name = "all"
if len(key) > 2 and key[2] in ["s", "m", "l"]:
area_rng = key[2]
else:
area_rng = "all"
print(template.format(title, _type, iou, area_rng, max_dets, cat_group_name, value))
def get_results(self):
if not self.results:
print("Warning: results is empty. Call run().")
return self.results
#################################################################
# end of straight copy from lvis, just fixing constructor
#################################################################
class LvisEvaluator(object):
def __init__(self, lvis_gt, iou_types):
assert isinstance(iou_types, (list, tuple))
# lvis_gt = copy.deepcopy(lvis_gt)
self.lvis_gt = lvis_gt
self.iou_types = iou_types
self.coco_eval = {}
for iou_type in iou_types:
self.coco_eval[iou_type] = LVISEval(lvis_gt, iou_type=iou_type)
self.img_ids = []
self.eval_imgs = {k: [] for k in iou_types}
def update(self, predictions):
img_ids = list(np.unique(list(predictions.keys())))
self.img_ids.extend(img_ids)
for iou_type in self.iou_types:
results = self.prepare(predictions, iou_type)
lvis_dt = LVISResults(self.lvis_gt, results)
lvis_eval = self.coco_eval[iou_type]
lvis_eval.lvis_dt = lvis_dt
lvis_eval.params.img_ids = list(img_ids)
lvis_eval.evaluate()
eval_imgs = lvis_eval.eval_imgs
eval_imgs = np.asarray(eval_imgs).reshape(
len(lvis_eval.params.cat_ids),
len(lvis_eval.params.area_rng),
len(lvis_eval.params.img_ids),
)
self.eval_imgs[iou_type].append(eval_imgs)
def synchronize_between_processes(self):
for iou_type in self.iou_types:
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
create_common_lvis_eval(
self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type]
)
def accumulate(self):
for lvis_eval in self.coco_eval.values():
lvis_eval.accumulate()
def summarize(self):
for iou_type, lvis_eval in self.coco_eval.items():
print("IoU metric: {}".format(iou_type))
lvis_eval.summarize()
def prepare(self, predictions, iou_type):
if iou_type == "bbox":
return self.prepare_for_lvis_detection(predictions)
elif iou_type == "segm":
return self.prepare_for_lvis_segmentation(predictions)
elif iou_type == "keypoints":
return self.prepare_for_lvis_keypoint(predictions)
else:
raise ValueError("Unknown iou type {}".format(iou_type))
def prepare_for_lvis_detection(self, predictions):
lvis_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
lvis_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return lvis_results
def prepare_for_lvis_segmentation(self, predictions):
lvis_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
scores = prediction["scores"]
labels = prediction["labels"]
masks = prediction["masks"]
masks = masks > 0.5
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
lvis_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return lvis_results
def convert_to_xywh(boxes):
xmin, ymin, xmax, ymax = boxes.unbind(1)
return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
def merge(img_ids, eval_imgs):
all_img_ids = utils.all_gather(img_ids)
all_eval_imgs = utils.all_gather(eval_imgs)
merged_img_ids = []
for p in all_img_ids:
merged_img_ids.extend(p)
merged_eval_imgs = []
for p in all_eval_imgs:
merged_eval_imgs.append(p)
merged_img_ids = np.array(merged_img_ids)
merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
# keep only unique (and in sorted order) images
merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
merged_eval_imgs = merged_eval_imgs[..., idx]
return merged_img_ids, merged_eval_imgs
def create_common_lvis_eval(lvis_eval, img_ids, eval_imgs):
img_ids, eval_imgs = merge(img_ids, eval_imgs)
img_ids = list(img_ids)
eval_imgs = list(eval_imgs.flatten())
lvis_eval.eval_imgs = eval_imgs
lvis_eval.params.img_ids = img_ids
| craftassist-master | python/craftassist/voxel_models/detection-transformer/lvis_eval.py |
import json
import numpy as np
import copy
import torch
import torch._six
from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
import pycocotools.mask as mask_util
from collections import defaultdict
import utils
class CocoEvaluator(object):
def __init__(self, coco_gt, iou_types):
assert isinstance(iou_types, (list, tuple))
coco_gt = copy.deepcopy(coco_gt)
self.coco_gt = coco_gt
self.iou_types = iou_types
self.coco_eval = {}
for iou_type in iou_types:
self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
self.img_ids = []
self.eval_imgs = {k: [] for k in iou_types}
def update(self, predictions):
img_ids = list(np.unique(list(predictions.keys())))
self.img_ids.extend(img_ids)
for iou_type in self.iou_types:
results = self.prepare(predictions, iou_type)
coco_dt = loadRes(self.coco_gt, results) if results else COCO()
coco_eval = self.coco_eval[iou_type]
coco_eval.cocoDt = coco_dt
coco_eval.params.imgIds = list(img_ids)
img_ids, eval_imgs = evaluate(coco_eval)
self.eval_imgs[iou_type].append(eval_imgs)
def synchronize_between_processes(self):
for iou_type in self.iou_types:
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
create_common_coco_eval(
self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type]
)
def accumulate(self):
for coco_eval in self.coco_eval.values():
coco_eval.accumulate()
def summarize(self):
for iou_type, coco_eval in self.coco_eval.items():
print("IoU metric: {}".format(iou_type))
coco_eval.summarize()
def prepare(self, predictions, iou_type):
if iou_type == "bbox":
return self.prepare_for_coco_detection(predictions)
elif iou_type == "segm":
return self.prepare_for_coco_segmentation(predictions)
elif iou_type == "keypoints":
return self.prepare_for_coco_keypoint(predictions)
else:
raise ValueError("Unknown iou type {}".format(iou_type))
def prepare_for_coco_detection(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xyzwhd(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
def prepare_for_coco_segmentation(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
scores = prediction["scores"]
labels = prediction["labels"]
masks = prediction["masks"]
masks = masks > 0.5
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return coco_results
def prepare_for_coco_keypoint(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xyzwhd(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
keypoints = prediction["keypoints"]
keypoints = keypoints.flatten(start_dim=1).tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"keypoints": keypoint,
"score": scores[k],
}
for k, keypoint in enumerate(keypoints)
]
)
return coco_results
def convert_to_xyzwhd(boxes):
xmin, ymin, zmin, xmax, ymax, zmax = boxes.unbind(1)
return torch.stack((xmin, ymin, zmin, xmax - xmin, ymax - ymin, zmax - zmin), dim=1)
def merge(img_ids, eval_imgs):
all_img_ids = utils.all_gather(img_ids)
all_eval_imgs = utils.all_gather(eval_imgs)
merged_img_ids = []
for p in all_img_ids:
merged_img_ids.extend(p)
merged_eval_imgs = []
for p in all_eval_imgs:
merged_eval_imgs.append(p)
merged_img_ids = np.array(merged_img_ids)
merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
# keep only unique (and in sorted order) images
merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
merged_eval_imgs = merged_eval_imgs[..., idx]
return merged_img_ids, merged_eval_imgs
def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
img_ids, eval_imgs = merge(img_ids, eval_imgs)
img_ids = list(img_ids)
eval_imgs = list(eval_imgs.flatten())
coco_eval.evalImgs = eval_imgs
coco_eval.params.imgIds = img_ids
coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
#################################################################
# From pycocotools, just removed the prints and fixed
# a Python3 bug about unicode not defined
#################################################################
# Ideally, pycocotools wouldn't have hard-coded prints
# so that we could avoid copy-pasting those two functions
def createIndex(self):
# create index
# print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns, catToImgs = defaultdict(list), defaultdict(list)
if "annotations" in self.dataset:
for ann in self.dataset["annotations"]:
imgToAnns[ann["image_id"]].append(ann)
anns[ann["id"]] = ann
if "images" in self.dataset:
for img in self.dataset["images"]:
imgs[img["id"]] = img
if "categories" in self.dataset:
for cat in self.dataset["categories"]:
cats[cat["id"]] = cat
if "annotations" in self.dataset and "categories" in self.dataset:
for ann in self.dataset["annotations"]:
catToImgs[ann["category_id"]].append(ann["image_id"])
# print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
maskUtils = mask_util
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset["images"] = [img for img in self.dataset["images"]]
# print('Loading and preparing results...')
# tic = time.time()
if isinstance(resFile, torch._six.string_classes):
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, "results in not an array of objects"
annsImgIds = [ann["image_id"] for ann in anns]
assert set(annsImgIds) == (
set(annsImgIds) & set(self.getImgIds())
), "Results do not correspond to current coco set"
if "caption" in anns[0]:
imgIds = set([img["id"] for img in res.dataset["images"]]) & set(
[ann["image_id"] for ann in anns]
)
res.dataset["images"] = [img for img in res.dataset["images"] if img["id"] in imgIds]
for id, ann in enumerate(anns):
ann["id"] = id + 1
elif "bbox" in anns[0] and not anns[0]["bbox"] == []:
res.dataset["categories"] = copy.deepcopy(self.dataset["categories"])
for id, ann in enumerate(anns):
bb = ann["bbox"]
x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]
if "segmentation" not in ann:
ann["segmentation"] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann["area"] = bb[2] * bb[3]
ann["id"] = id + 1
ann["iscrowd"] = 0
elif "segmentation" in anns[0]:
res.dataset["categories"] = copy.deepcopy(self.dataset["categories"])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann["area"] = maskUtils.area(ann["segmentation"])
if "bbox" not in ann:
ann["bbox"] = maskUtils.toBbox(ann["segmentation"])
ann["id"] = id + 1
ann["iscrowd"] = 0
elif "keypoints" in anns[0]:
res.dataset["categories"] = copy.deepcopy(self.dataset["categories"])
for id, ann in enumerate(anns):
s = ann["keypoints"]
x = s[0::3]
y = s[1::3]
x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y)
ann["area"] = (x2 - x1) * (y2 - y1)
ann["id"] = id + 1
ann["bbox"] = [x1, y1, x2 - x1, y2 - y1]
# print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset["annotations"] = anns
createIndex(res)
return res
def evaluate(self):
"""
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
"""
# tic = time.time()
# print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = "segm" if p.useSegm == 1 else "bbox"
print("useSegm (deprecated) is not None. Running {} evaluation".format(p.iouType))
# print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == "segm" or p.iouType == "bbox":
computeIoU = self.computeIoU
elif p.iouType == "keypoints":
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds
}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
evalImgs = [
evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
# this is NOT in the pycocotools code, but could be done outside
evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
self._paramsEval = copy.deepcopy(self.params)
# toc = time.time()
# print('DONE (t={:0.2f}s).'.format(toc-tic))
return p.imgIds, evalImgs
#################################################################
# end of straight copy from pycocotools, just removing the prints
#################################################################
| craftassist-master | python/craftassist/voxel_models/detection-transformer/coco_eval.py |
import math
import sys
from typing import Iterable
import torch
import utils
from coco_eval import CocoEvaluator
from datasets.lvis import LVIS
from lvis_eval import LvisEvaluator
def train_one_epoch(
model: torch.nn.Module,
criterion: torch.nn.Module,
data_loader: Iterable,
optimizer: torch.optim.Optimizer,
device: torch.device,
epoch: int,
max_norm: float = 0,
):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value:.6f}"))
metric_logger.add_meter("class_error", utils.SmoothedValue(window_size=1, fmt="{value:.2f}"))
header = "Epoch: [{}]".format(epoch)
print_freq = 10
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
# print('--------------------')
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(samples)
print("------ outputs 1---------")
print(outputs["pred_masks"].shape)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f"{k}_unscaled": v for k, v in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {
k: v * weight_dict[k] for k, v in loss_dict_reduced.items() if k in weight_dict
}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
metric_logger.update(
loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled
)
metric_logger.update(class_error=loss_dict_reduced["class_error"])
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(model, criterion, postprocessor, data_loader, base_ds, device, eval_bbox, eval_masks):
model.eval()
criterion.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("class_error", utils.SmoothedValue(window_size=1, fmt="{value:.2f}"))
header = "Test:"
iou_types = []
if eval_masks:
iou_types += ["segm"]
if eval_bbox:
iou_types += ["bbox"]
iou_types = tuple(iou_types)
if isinstance(base_ds, LVIS):
coco_evaluator = LvisEvaluator(base_ds, iou_types) if eval_bbox or eval_masks else None
else:
coco_evaluator = CocoEvaluator(base_ds, iou_types) if eval_bbox or eval_masks else None
# coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75]
for samples, targets in metric_logger.log_every(data_loader, 10, header):
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_scaled = {
k: v * weight_dict[k] for k, v in loss_dict_reduced.items() if k in weight_dict
}
loss_dict_reduced_unscaled = {f"{k}_unscaled": v for k, v in loss_dict_reduced.items()}
metric_logger.update(
loss=sum(loss_dict_reduced_scaled.values()),
**loss_dict_reduced_scaled,
**loss_dict_reduced_unscaled,
)
metric_logger.update(class_error=loss_dict_reduced["class_error"])
results = postprocessor(outputs, targets)
res = {target["image_id"].item(): output for target, output in zip(targets, results)}
if coco_evaluator is not None:
coco_evaluator.update(res)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
if coco_evaluator is not None:
coco_evaluator.synchronize_between_processes()
# accumulate predictions from all images
if coco_evaluator is not None:
coco_evaluator.accumulate()
coco_evaluator.summarize()
stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
if coco_evaluator is not None:
if eval_bbox:
stats["coco_eval_bbox"] = coco_evaluator.coco_eval["bbox"].stats.tolist()
if eval_masks:
stats["coco_eval_masks"] = coco_evaluator.coco_eval["segm"].stats.tolist()
return stats, coco_evaluator
| craftassist-master | python/craftassist/voxel_models/detection-transformer/engine.py |
from __future__ import print_function
from collections import defaultdict, deque
import datetime
import pickle
import subprocess
import time
import torch
import torch.distributed as dist
import os
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value,
)
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device="cuda")
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
if local_size != max_size:
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
if torch.cuda.is_available():
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
"max mem: {memory:.0f}",
]
)
else:
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
]
)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
else:
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(
"{} Total time: {} ({:.4f} s / it)".format(
header, total_time_str, total_time / len(iterable)
)
)
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode("ascii").strip()
sha = "N/A"
diff = "clean"
branch = "N/A"
try:
sha = _run(["git", "rev-parse", "HEAD"])
subprocess.check_output(["git", "diff"], cwd=cwd)
diff = _run(["git", "diff-index", "HEAD"])
diff = "has uncommited changes" if diff else "clean"
branch = _run(["git", "rev-parse", "--abbrev-ref", "HEAD"])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message
def collate_fn(batch):
batch = list(zip(*batch))
batch[0] = NestedTensor.from_tensor_list(batch[0])
return tuple(batch)
class NestedTensor(object):
def __init__(self, tensors, mask):
self.tensors = tensors
self.mask = mask
def to(self, *args, **kwargs):
cast_tensor = self.tensors.to(*args, **kwargs)
cast_mask = self.mask.to(*args, **kwargs) if self.mask is not None else None
return type(self)(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
@classmethod
def from_tensor_list(cls, tensor_list):
# TODO make this more general
# print('---- nest tensor size ----')
# print(tensor_list[1].shape)
if tensor_list[0].ndim == 3:
# TODO make it support different-sized images
max_size = tuple(max(s) for s in zip(*[img.shape for img in tensor_list]))
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = (len(tensor_list),) + max_size
b, h, w, d = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w, d), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[0], : img.shape[1], : img.shape[2]] = False
elif tensor_list[0].ndim == 4:
max_size = tuple(max(s) for s in zip(*[img.shape for img in tensor_list]))
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = (len(tensor_list),) + max_size
b, c, h, w, d = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, c, h, w, d), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2], : img.shape[3]].copy_(img)
m[: img.shape[0], : img.shape[1], : img.shape[2], : img.shape[3]] = False
else:
raise ValueError("not supported")
return cls(tensor, mask)
def __repr__(self):
return repr(self.tensors)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
else:
print("Not using distributed mode")
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = "nccl"
print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
if target.numel() == 0:
return [torch.zeros([], device=output.device)]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
| craftassist-master | python/craftassist/voxel_models/detection-transformer/utils.py |
import torch
import pandas as pd
from pathlib import Path
import seaborn as sns
import matplotlib.pyplot as plt
def plot_logs(logs, fields=("class_error", "loss_bbox_unscaled", "mAP"), ewm_col=0):
dfs = [pd.read_json(Path(p) / "log.txt", lines=True) for p in logs]
fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5))
for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))):
for j, field in enumerate(fields):
if field == "loss_huber" and "test_loss_huber" not in df.keys():
field = "loss_bbox_unscaled"
if field == "mAP":
coco_eval = (
pd.DataFrame(pd.np.stack(df.test_coco_eval.dropna().values)[:, 1])
.ewm(com=ewm_col)
.mean()
)
axs[j].plot(coco_eval, c=color)
else:
df.interpolate().ewm(com=ewm_col).mean().plot(
y=[f"train_{field}", f"test_{field}"],
ax=axs[j],
color=[color] * 2,
style=["-", "--"],
)
for ax, field in zip(axs, fields):
ax.legend([Path(p).name for p in logs])
ax.set_title(field)
def plot_precision_recall(files, naming_scheme="iter"):
if naming_scheme == "exp_id":
# name becomes exp_id
names = [f.parts[-3] for f in files]
elif naming_scheme == "iter":
names = [f.stem for f in files]
else:
raise ValueError(f"not supported {naming_scheme}")
fig, axs = plt.subplots(ncols=2, figsize=(16, 5))
for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names):
data = torch.load(f)
# precision is n_iou, n_points, n_cat, n_area, max_det
precision = data["precision"]
recall = data["params"].recThrs
scores = data["scores"]
# take precision for all classes, all areas and 100 detections
precision = precision[0, :, :, 0, -1].mean(1)
scores = scores[0, :, :, 0, -1].mean(1)
prec = precision.mean()
rec = data["recall"][0, :, 0, -1].mean()
print(
f"{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, "
+ f"score={scores.mean():0.3f}, "
+ f"f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}"
)
axs[0].plot(recall, precision, c=color)
axs[1].plot(recall, scores, c=color)
axs[0].set_title("Precision / Recall")
axs[0].legend(names)
axs[1].set_title("Scores / Recall")
axs[1].legend(names)
return fig, axs
| craftassist-master | python/craftassist/voxel_models/detection-transformer/plot_utils.py |
import bisect
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
class ConcatDataset(_ConcatDataset):
"""
Same as torch.utils.data.dataset.ConcatDataset, but exposes an extra
method
"""
def get_idxs(self, idx):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return dataset_idx, sample_idx
def get_in_coco_format(self, idx: int):
dataset_idx, sample_idx = self.get_idxs(idx)
return self.datasets[dataset_idx].get_in_coco_format(sample_idx)
from .voc import VOCDetection
from .voc2012 import VOCDetection2012
from .voc import make_voc_transforms
def build(image_set, args):
ds_2007 = VOCDetection(
image_set=image_set, transforms=make_voc_transforms(image_set, args.remove_difficult)
)
if image_set == "test":
return ds_2007
ds_2012 = VOCDetection2012(
image_set=image_set, transforms=make_voc_transforms(image_set, args.remove_difficult)
)
return ConcatDataset([ds_2007, ds_2012])
| craftassist-master | python/craftassist/voxel_models/detection-transformer/datasets/voc2007_2012.py |
import copy
import os
import torch
import torch.utils.data
import torchvision
import transforms as T
from pycocotools import mask as coco_mask
class FilterAndRemapCocoCategories(object):
def __init__(self, categories, remap=True):
self.categories = categories
self.remap = remap
def __call__(self, image, target):
anno = target["annotations"]
anno = [obj for obj in anno if obj["category_id"] in self.categories]
if not self.remap:
target["annotations"] = anno
return image, target
anno = copy.deepcopy(anno)
for obj in anno:
obj["category_id"] = self.categories.index(obj["category_id"])
target["annotations"] = anno
return image, target
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks
class ConvertCocoPolysToMask(object):
def __init__(self, return_masks=False):
self.return_masks = return_masks
def __call__(self, image, target):
w, h = image.size
image_id = target["image_id"]
image_id = torch.tensor([image_id])
anno = target["annotations"]
anno = [obj for obj in anno if "iscrowd" not in obj or obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
# guard against no boxes via resizing
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2].clamp_(min=0, max=w)
boxes[:, 1::2].clamp_(min=0, max=h)
classes = [obj["category_id"] for obj in anno]
classes = torch.tensor(classes, dtype=torch.int64)
if self.return_masks:
segmentations = [obj["segmentation"] for obj in anno]
masks = convert_coco_poly_to_mask(segmentations, h, w)
keypoints = None
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
if self.return_masks:
masks = masks[keep]
if keypoints is not None:
keypoints = keypoints[keep]
target = {}
target["boxes"] = boxes
target["labels"] = classes
if self.return_masks:
target["masks"] = masks
target["image_id"] = image_id
if keypoints is not None:
target["keypoints"] = keypoints
# for conversion to coco api
area = torch.tensor([obj["area"] for obj in anno])
iscrowd = torch.tensor([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno])
target["area"] = area[keep]
target["iscrowd"] = iscrowd[keep]
target["orig_size"] = torch.as_tensor([int(h), int(w)])
target["size"] = torch.as_tensor([int(h), int(w)])
return image, target
class CocoDetection(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, ann_file, transforms, return_masks):
super(CocoDetection, self).__init__(img_folder, ann_file)
self._transforms = transforms
self.prepare = ConvertCocoPolysToMask(return_masks)
def __getitem__(self, idx):
img, target = super(CocoDetection, self).__getitem__(idx)
image_id = self.ids[idx]
target = dict(image_id=image_id, annotations=target)
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
def get_in_coco_format(self, idx):
img, target = super(CocoDetection, self).__getitem__(idx)
image_id = self.ids[idx]
target = dict(image_id=image_id, annotations=target)
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
def make_coco_transforms(image_set):
normalize = T.Compose(
[T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]
)
scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]
transform_train = T.Compose(
[
T.RandomHorizontalFlip(),
T.RandomSelect(
T.RandomResize(scales, max_size=1333),
T.Compose(
[
T.RandomResize([400, 500, 600]),
T.RandomSizeCrop(384, 600),
T.RandomResize(scales, max_size=1333),
]
),
),
normalize,
]
)
transform_val = T.Compose([T.RandomResize([800], max_size=1333), normalize])
transforms = {
"train": transform_train,
"trainval": transform_train,
"val": transform_val,
"test": transform_val,
}
return transforms[image_set]
def build(image_set, args):
root = "/datasets01/COCO/022719"
if args.crowdfree:
# has cleaned up training set, val set is unchanged
root_ann = "/checkpoint/szagoruyko/detection_transformer_shared/coco_instances_crowdfree"
else:
root_ann = root
mode = "instances"
anno_file_template = "{}_{}2017.json"
PATHS = {
"train": (
"train2017",
os.path.join("annotations", anno_file_template.format(mode, "train")),
),
"val": ("val2017", os.path.join("annotations", anno_file_template.format(mode, "val"))),
# this is a hack, change in the future
"trainval": (
"train2017",
os.path.join("annotations", anno_file_template.format(mode, "train")),
),
"test": ("val2017", os.path.join("annotations", anno_file_template.format(mode, "val"))),
}
img_folder, ann_file = PATHS[image_set]
img_folder = os.path.join(root, img_folder)
ann_file = os.path.join(root_ann, ann_file)
dataset = CocoDetection(
img_folder, ann_file, transforms=make_coco_transforms(image_set), return_masks=args.masks
)
return dataset
| craftassist-master | python/craftassist/voxel_models/detection-transformer/datasets/coco.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import pickle
import numpy as np
import torch
from torch.utils import data as tds
from copy import deepcopy
def underdirt(schematic, labels=None, max_shift=0, nothing_id=0):
# todo fancier dirt!
# FIXME!!!! label as ground where appropriate
shift = torch.randint(max_shift + 1, (1,)).item()
if shift > 0:
new_schematic = torch.LongTensor(schematic.size())
new_schematic[:, shift:, :] = schematic[:, :-shift, :]
new_schematic[:, :shift, :] = 3
new_labels = None
if labels is not None:
new_labels = torch.LongTensor(labels.size())
new_labels[:, shift:, :] = labels[:, :-shift, :]
new_labels[:, :shift, :] = nothing_id
return new_schematic, new_labels
else:
return schematic, labels
def flip_rotate(c, l=None, idx=None):
"""
Randomly transform the cube for more data.
The transformation is chosen from:
0. original
1. x-z plane rotation 90
2. x-z plane rotation 180
3. x-z plane rotation 270
4. x-axis flip
5. z-axis flip
"""
idx = np.random.choice(range(6)) if (idx is None) else idx
l_ = l
if idx == 0:
c_ = c
l_ = l
elif idx >= 1 and idx <= 3: # rotate
npc = c.numpy()
npc = np.rot90(npc, idx, axes=(0, 2)) # rotate on the x-z plane
c_ = torch.from_numpy(npc.copy())
if l is not None:
npl = l.numpy()
npl = np.rot90(npl, idx, axes=(0, 2)) # rotate on the x-z plane
l_ = torch.from_numpy(npl.copy())
else: # flip
npc = c.numpy()
npc = np.flip(npc, axis=(idx - 4) * 2) # 0 or 2
c_ = torch.from_numpy(npc.copy())
if l is not None:
npl = l.numpy()
npl = np.flip(npl, axis=(idx - 4) * 2) # 0 or 2
l_ = torch.from_numpy(npl.copy())
return c_, l_, idx
def pad_to_sidelength(schematic, labels=None, nothing_id=0, sidelength=32):
szs = list(schematic.size())
szs = np.add(szs, -sidelength)
pad = []
# this is all backwards bc pytorch pad semantics :(
for s in szs:
if s >= 0:
pad.append(0)
else:
pad.append(-s)
pad.append(0)
schematic = torch.nn.functional.pad(schematic, pad[::-1])
if labels is not None:
labels = torch.nn.functional.pad(labels, pad[::-1], value=nothing_id)
return schematic, labels
# TODO cut outliers
# TODO simplify
def fit_in_sidelength(schematic, labels=None, nothing_id=0, sl=32, max_shift=0):
schematic, labels = pad_to_sidelength(
schematic, labels=labels, nothing_id=nothing_id, sidelength=sl
)
nz = schematic.nonzero()
m, _ = nz.median(0)
min_y, _ = nz.min(0)
min_y = min_y[1]
xshift = max(torch.randint(-max_shift, max_shift + 1, (1,)).item() - m[0].item() + sl // 2, 0)
zshift = max(torch.randint(-max_shift, max_shift + 1, (1,)).item() - m[2].item() + sl // 2, 0)
new_schematic = torch.LongTensor(sl, sl, sl).fill_(1)
new_schematic[xshift:, : sl - min_y, zshift:] = schematic[
: sl - xshift, min_y:sl, : sl - zshift
]
new_labels = None
if labels is not None:
new_labels = torch.LongTensor(sl, sl, sl).fill_(nothing_id)
new_labels[xshift:, : sl - min_y, zshift:] = labels[: sl - xshift, min_y:sl, : sl - zshift]
return new_schematic, new_labels, (xshift, -min_y, zshift)
def make_example_from_raw(schematic, labels=None, augment={}, nothing_id=0, sl=32):
max_shift = augment.get("max_shift", 0)
s, l, o = fit_in_sidelength(
schematic, labels=labels, nothing_id=nothing_id, max_shift=max_shift
)
if len(augment) > 0:
if augment.get("flip_rotate", False):
s, l, _ = flip_rotate(s, l=l)
m = augment.get("underdirt")
if m is not None:
# really should fix offset here.....TODO
s, l = underdirt(s, labels=l, max_shift=m, nothing_id=nothing_id)
s[s == 0] = 1
s -= 1
return s, l, o
def swallow_classes(classes, predator, prey_classes, class_map):
new_classes = deepcopy(classes)
apex = class_map.get(predator, predator)
for prey in prey_classes:
class_map[prey] = apex
new_classes["name2count"][predator] += new_classes["name2count"][prey]
del new_classes["name2count"][prey]
for prey in prey_classes:
for s, t in class_map.items():
if t == prey:
class_map[s] = apex
return new_classes, class_map
def organize_classes(classes, min_occurence):
class_map = {}
new_classes = deepcopy(classes)
for cname in classes["name2count"]:
# hacky, should stem this properly
if cname[-1] == "s" and classes["name2count"].get(cname[:-1]) is not None:
new_classes, class_map = swallow_classes(new_classes, cname[:-1], [cname], class_map)
small_classes = []
for cname, count in new_classes["name2count"].items():
if count < min_occurence:
small_classes.append(cname)
if "none" in small_classes:
small_classes.remove("none")
new_classes, class_map = swallow_classes(new_classes, "none", small_classes, class_map)
if "nothing" in new_classes["name2idx"]:
new_classes, class_map = swallow_classes(new_classes, "none", ["nothing"], class_map)
counts = sorted(list(new_classes["name2count"].items()), key=lambda x: x[1], reverse=True)
new_classes["name2idx"]["none"] = 0
new_classes["idx2name"].append("none")
for i in range(len(counts)):
cname = counts[i][0]
if cname != "none":
new_classes["name2idx"][cname] = i
new_classes["idx2name"].append(cname)
return new_classes, class_map
def create_shape_dataset():
pass
class SemSegData(tds.Dataset):
def __init__(
self,
shape_data_path=None,
house_data_path=None,
nexamples=0,
shape_pct=0.5,
sidelength=32,
min_size=10,
max_size=30,
classes=None,
augment={},
house_min_class_occurence=250,
shape_min_class_occurence=1,
useid=True,
shape_save_path=None,
):
self.sidelength = sidelength
self.useid = useid
self.examples = []
self.nexamples = nexamples
self.augment = augment
print("create semsegdata")
# create combined dataset of shapes and houses
shape_nexamples = int(nexamples * shape_pct)
shape_dataset = SemSegShapeData(
data_path=shape_data_path,
nexamples=shape_nexamples,
sidelength=sidelength,
min_size=min_size,
max_size=max_size,
classes=classes,
augment=augment,
min_class_occurence=shape_min_class_occurence,
useid=useid,
save_path=shape_save_path,
)
house_nexamples = nexamples - shape_nexamples
house_dataset = SemSegHouseData(
data_path=house_data_path,
nexamples=house_nexamples,
sidelength=sidelength,
classes=classes,
augment=augment,
min_class_occurence=house_min_class_occurence,
useid=useid,
)
self.classes = {"name2idx": {}, "idx2name": [], "name2count": {}}
house_classes = house_dataset.get_classes()
shape_classes = shape_dataset.get_classes()
self.classes = deepcopy(house_classes)
print(self.classes)
print(shape_classes)
for cname in shape_classes["name2idx"]:
if cname not in self.classes["name2idx"]:
new_idx = len(self.classes["name2idx"])
self.classes["name2idx"][cname] = new_idx
self.classes["idx2name"].append(cname)
self.classes["name2count"][cname] = shape_classes["name2count"].get(cname, 0)
else:
if cname in self.classes["name2count"]:
self.classes["name2count"][cname] += shape_classes["name2count"].get(cname, 0)
else:
self.classes["name2count"][cname] = shape_classes["name2count"].get(cname, 0)
self.inst_data = shape_dataset.inst_data + house_dataset.inst_data
if classes is None:
class_map = {}
for cname in self.classes["name2idx"]:
class_map[cname] = cname
for data in self.inst_data:
for cname in data[2]:
if cname not in class_map:
class_map[cname] = "none"
else:
new_classes = deepcopy(classes)
new_classes["name2count"] = {}
new_classes["name2count"]["none"] = 0
class_map = {}
for cname in new_classes["name2idx"]:
class_map[cname] = cname
for data in self.inst_data:
for cname in data[2]:
mapped_name = "none"
if cname in class_map:
mapped_name = class_map[cname]
if mapped_name not in new_classes["name2count"]:
new_classes["name2count"][mapped_name] = 0
new_classes["name2count"][mapped_name] += 1
class_map[cname] = mapped_name
self.classes = new_classes
# this should be 0...
self.nothing_id = self.classes["name2idx"]["none"]
c = self.classes["name2idx"]
for i in range(len(shape_dataset.inst_data)):
shape_dataset.inst_data[i] = list(shape_dataset.inst_data[i])
x = shape_dataset.inst_data[i]
x[1].apply_(lambda z: c[shape_classes["idx2name"][z]] if z > 0 else self.nothing_id)
for i in range(len(house_dataset.inst_data)):
house_dataset.inst_data[i] = list(house_dataset.inst_data[i])
x = house_dataset.inst_data[i]
x[1].apply_(lambda z: c[house_classes["idx2name"][z]] if z > 0 else self.nothing_id)
self.inst_data = shape_dataset.inst_data + house_dataset.inst_data
self.nexamples = len(self.inst_data)
print(
"Generated {} examples consisting of {} shapes and {} houses.".format(
len(self.inst_data), len(shape_dataset.inst_data), len(house_dataset.inst_data)
)
)
#
def get_classes(self):
return self.classes
def set_classes(self, classes):
self.classes = classes
def __getitem__(self, index):
x = self.inst_data[index]
s, l, _ = make_example_from_raw(
x[0], labels=x[1], nothing_id=self.nothing_id, sl=self.sidelength, augment=self.augment
)
return s, l
def __len__(self):
return self.nexamples
class SemSegShapeData(tds.Dataset):
def __init__(
self,
data_path=None,
nexamples=-1,
sidelength=32,
min_size=10,
max_size=30,
classes=None,
augment={},
min_class_occurence=1,
useid=True,
save_path=None,
):
self.sidelength = sidelength
self.useid = useid
self.examples = []
self.nexamples = nexamples
self.augment = augment
if data_path is not None:
self.inst_data = pickle.load(open(data_path, "rb"))
else:
self.inst_data = create_shape_dataset(min=min_size, max=max_size, nexamples=nexamples)
if save_path is not None:
with open(save_path, "wb") as f:
pickle.dump(self.inst_data, f)
print("Save generated shape data to {}".format(save_path))
if self.nexamples < 0:
self.nexamples = len(self.inst_data)
else:
self.nexamples = min(len(self.inst_data), self.nexamples)
self.inst_data = self.inst_data[: self.nexamples]
# TODO separate training and validation data
if classes is None:
classes = {"name2idx": {}, "idx2name": [], "name2count": {}}
for i in range(len(self.inst_data)):
for cname in self.inst_data[i][2]:
if classes["name2count"].get(cname) is None:
classes["name2count"][cname] = 1
else:
classes["name2count"][cname] += 1
if classes["name2count"].get("none") is None:
classes["name2count"]["none"] = 1
merged_classes, class_map = organize_classes(classes, min_class_occurence)
for cname in merged_classes["name2idx"]:
class_map[cname] = cname
self.classes = merged_classes
else:
new_classes = deepcopy(classes)
new_classes["name2count"] = {}
new_classes["name2count"]["none"] = 0
class_map = {}
for cname in new_classes["name2idx"]:
class_map[cname] = cname
for data in self.inst_data:
for cname in data[2]:
mapped_name = "none"
if cname in class_map:
mapped_name = class_map[cname]
if mapped_name not in new_classes["name2count"]:
new_classes["name2count"][mapped_name] = 0
new_classes["name2count"][mapped_name] += 1
class_map[cname] = mapped_name
self.classes = new_classes
# this should be 0...
self.nothing_id = self.classes["name2idx"]["none"]
c = self.classes["name2idx"]
for i in range(len(self.inst_data)):
self.inst_data[i] = list(self.inst_data[i])
x = self.inst_data[i]
x[0] = torch.from_numpy(x[0]).long()
x[1] = torch.from_numpy(x[1]).long()
x[1].apply_(lambda z: c[class_map[x[2][z]]] if z > 0 else self.nothing_id)
#
def get_classes(self):
return self.classes
def set_classes(self, classes):
self.classes = classes
def __getitem__(self, index):
x = self.inst_data[index]
s, l, _ = make_example_from_raw(
x[0], labels=x[1], nothing_id=self.nothing_id, sl=self.sidelength, augment=self.augment
)
return s, l
def __len__(self):
return self.nexamples
class SemSegHouseData(tds.Dataset):
def __init__(
self,
data_path,
nexamples=-1,
sidelength=32,
classes=None,
augment={},
min_class_occurence=250,
useid=True,
):
self.sidelength = sidelength
self.useid = useid
self.examples = []
self.inst_data = []
if data_path:
self.inst_data = pickle.load(open(data_path, "rb"))
self.nexamples = nexamples
self.augment = augment
if self.nexamples < 0:
self.nexamples = len(self.inst_data)
else:
self.nexamples = min(len(self.inst_data), self.nexamples)
self.inst_data = self.inst_data[: self.nexamples]
# print("------- inst data -------")
# print(self.inst_data)
# TODO separate training and validation data
if classes is None:
classes = {"name2idx": {}, "idx2name": [], "name2count": {}}
for i in range(len(self.inst_data)):
for cname in self.inst_data[i][2]:
if classes["name2count"].get(cname) is None:
classes["name2count"][cname] = 1
else:
classes["name2count"][cname] += 1
if classes["name2count"].get("none") is None:
classes["name2count"]["none"] = 1
merged_classes, class_map = organize_classes(classes, min_class_occurence)
for cname in merged_classes["name2idx"]:
class_map[cname] = cname
self.classes = merged_classes
else:
new_classes = deepcopy(classes)
new_classes["name2count"] = {}
new_classes["name2count"]["none"] = 0
class_map = {}
for cname in new_classes["name2idx"]:
class_map[cname] = cname
for data in self.inst_data:
for cname in data[2]:
mapped_name = "none"
if cname in class_map:
mapped_name = class_map[cname]
if mapped_name not in new_classes["name2count"]:
new_classes["name2count"][mapped_name] = 0
new_classes["name2count"][mapped_name] += 1
class_map[cname] = mapped_name
self.classes = new_classes
# this should be 0...
self.nothing_id = self.classes["name2idx"]["none"]
print("class No.: {}".format(len(self.classes["name2idx"])))
# c = self.classes["name2idx"]
for i in range(len(self.inst_data)):
self.inst_data[i] = list(self.inst_data[i])
x = self.inst_data[i]
x[0] = torch.from_numpy(x[0]).long()
x[1] = torch.from_numpy(x[1]).long()
# x[1].apply_(lambda z: c[class_map[x[2][z]]] if z > 0 else self.nothing_id)
self.class_map = class_map
#
def get_classes(self):
return self.classes
def set_classes(self, classes):
self.classes = classes
def __getitem__(self, index):
x = self.inst_data[index]
s, l, _ = make_example_from_raw(
x[0], labels=x[1], nothing_id=self.nothing_id, sl=self.sidelength, augment=self.augment
)
w, h, d = x[1].shape
inst_len = len(x[2]) - 1 # ignore nothing
masks = torch.zeros((inst_len, w, h, d), dtype=torch.uint8)
boxes = []
labels = []
for i, inst_name in enumerate(x[2][1:]):
cls_id = self.classes["name2idx"][self.class_map[inst_name]]
idx = x[1] == cls_id
masks[i][idx] = 1
idx = idx.nonzero()
values, indices = idx.min(dim=0)
x_min, y_min, z_min = values
values, indices = idx.max(dim=0)
x_max, y_max, z_max = values
box = (x_min, y_min, z_min, x_max, y_max, z_max)
boxes.append(box)
labels.append(cls_id)
boxes = torch.tensor(boxes, dtype=torch.float32)
labels = torch.tensor(labels)
size = torch.tensor((d, h, w))
data = {
"masks": masks,
"boxes": boxes,
"labels": labels,
"size": size,
"orig_size": size,
"image_id": torch.tensor(index),
}
return x[0], data
def __len__(self):
return self.nexamples
def build(image_set, args):
data_path = "/checkpoint/aszlam/minecraft/segmentation_data/training_data.pkl"
nexamples = 100
house_dataset = SemSegHouseData(data_path=data_path, nexamples=nexamples)
return house_dataset
| craftassist-master | python/craftassist/voxel_models/detection-transformer/datasets/house.py |
import importlib
def build_dataset(image_set, args):
# what a hack
mod = importlib.import_module("datasets." + args.dataset_file)
return mod.build(image_set, args)
| craftassist-master | python/craftassist/voxel_models/detection-transformer/datasets/__init__.py |
import json
import os
import time
from collections import defaultdict
import torchvision
from PIL import Image
import pycocotools.mask as mask_utils
import transforms as T
from .coco import ConvertCocoPolysToMask
def _isArrayLike(obj):
return hasattr(obj, "__iter__") and hasattr(obj, "__len__")
class LVIS:
def __init__(self, annotation_path=None):
"""Class for reading and visualizing annotations.
Args:
annotation_path (str): location of annotation file
"""
self.anns = {}
self.cats = {}
self.imgs = {}
self.img_ann_map = defaultdict(list)
self.cat_img_map = defaultdict(list)
self.dataset = {}
if annotation_path is not None:
print("Loading annotations.")
tic = time.time()
self.dataset = self._load_json(annotation_path)
print("Done (t={:0.2f}s)".format(time.time() - tic))
assert type(self.dataset) == dict, "Annotation file format {} not supported.".format(
type(self.dataset)
)
self._create_index()
def _load_json(self, path):
with open(path, "r") as f:
return json.load(f)
def _create_index(self):
print("Creating index.")
self.img_ann_map = defaultdict(list)
self.cat_img_map = defaultdict(list)
self.anns = {}
self.cats = {}
self.imgs = {}
for ann in self.dataset["annotations"]:
self.img_ann_map[ann["image_id"]].append(ann)
self.anns[ann["id"]] = ann
for img in self.dataset["images"]:
self.imgs[img["id"]] = img
for cat in self.dataset["categories"]:
self.cats[cat["id"]] = cat
for ann in self.dataset["annotations"]:
self.cat_img_map[ann["category_id"]].append(ann["image_id"])
print("Index created.")
def get_ann_ids(self, img_ids=None, cat_ids=None, area_rng=None):
"""Get ann ids that satisfy given filter conditions.
Args:
img_ids (int array): get anns for given imgs
cat_ids (int array): get anns for given cats
area_rng (float array): get anns for a given area range. e.g [0, inf]
Returns:
ids (int array): integer array of ann ids
"""
if img_ids is not None:
img_ids = img_ids if _isArrayLike(img_ids) else [img_ids]
if cat_ids is not None:
cat_ids = cat_ids if _isArrayLike(cat_ids) else [cat_ids]
anns = []
if img_ids is not None:
for img_id in img_ids:
anns.extend(self.img_ann_map[img_id])
else:
anns = self.dataset["annotations"]
# return early if no more filtering required
if cat_ids is None and area_rng is None:
return [_ann["id"] for _ann in anns]
cat_ids = set(cat_ids)
if area_rng is None:
area_rng = [0, float("inf")]
ann_ids = [
_ann["id"]
for _ann in anns
if _ann["category_id"] in cat_ids
and _ann["area"] > area_rng[0]
and _ann["area"] < area_rng[1]
]
return ann_ids
def get_cat_ids(self):
"""Get all category ids.
Returns:
ids (int array): integer array of category ids
"""
return list(self.cats.keys())
def get_img_ids(self):
"""Get all img ids.
Returns:
ids (int array): integer array of image ids
"""
return list(self.imgs.keys())
def _load_helper(self, _dict, ids):
if ids is None:
return list(_dict.values())
elif _isArrayLike(ids):
return [_dict[id] for id in ids]
else:
return [_dict[ids]]
def load_anns(self, ids=None):
"""Load anns with the specified ids. If ids=None load all anns.
Args:
ids (int array): integer array of annotation ids
Returns:
anns (dict array) : loaded annotation objects
"""
return self._load_helper(self.anns, ids)
def load_cats(self, ids):
"""Load categories with the specified ids. If ids=None load all
categories.
Args:
ids (int array): integer array of category ids
Returns:
cats (dict array) : loaded category dicts
"""
return self._load_helper(self.cats, ids)
def load_imgs(self, ids):
"""Load categories with the specified ids. If ids=None load all images.
Args:
ids (int array): integer array of image ids
Returns:
imgs (dict array) : loaded image dicts
"""
return self._load_helper(self.imgs, ids)
def download(self, save_dir, img_ids=None):
"""Download images from mscoco.org server.
Args:
save_dir (str): dir to save downloaded images
img_ids (int array): img ids of images to download
"""
imgs = self.load_imgs(img_ids)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for img in imgs:
file_name = os.path.join(save_dir, img["file_name"])
if not os.path.exists(file_name):
from urllib.request import urlretrieve
urlretrieve(img["coco_url"], file_name)
def ann_to_rle(self, ann):
"""Convert annotation which can be polygons, uncompressed RLE to RLE.
Args:
ann (dict) : annotation object
Returns:
ann (rle)
"""
img_data = self.imgs[ann["image_id"]]
h, w = img_data["height"], img_data["width"]
segm = ann["segmentation"]
if isinstance(segm, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = mask_utils.frPyObjects(segm, h, w)
rle = mask_utils.merge(rles)
elif isinstance(segm["counts"], list):
# uncompressed RLE
rle = mask_utils.frPyObjects(segm, h, w)
else:
# rle
rle = ann["segmentation"]
return rle
def ann_to_mask(self, ann):
"""Convert annotation which can be polygons, uncompressed RLE, or RLE
to binary mask.
Args:
ann (dict) : annotation object
Returns:
binary mask (numpy 2D array)
"""
rle = self.ann_to_rle(ann)
return mask_utils.decode(rle)
class LvisDetectionBase(torchvision.datasets.VisionDataset):
def __init__(self, root, annFile, transform=None, target_transform=None, transforms=None):
super(LvisDetectionBase, self).__init__(root, transforms, transform, target_transform)
self.lvis = LVIS(annFile)
self.ids = list(sorted(self.lvis.imgs.keys()))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.
"""
lvis = self.lvis
img_id = self.ids[index]
ann_ids = lvis.get_ann_ids(img_ids=img_id)
target = lvis.load_anns(ann_ids)
path = lvis.load_imgs(img_id)[0]["file_name"]
img = Image.open(os.path.join(self.root, path)).convert("RGB")
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.ids)
class LvisDetection(LvisDetectionBase):
def __init__(self, img_folder, ann_file, transforms, return_masks):
super(LvisDetection, self).__init__(img_folder, ann_file)
self._transforms = transforms
self.prepare = ConvertCocoPolysToMask(return_masks)
def __getitem__(self, idx):
img, target = super(LvisDetection, self).__getitem__(idx)
image_id = self.ids[idx]
target = dict(image_id=image_id, annotations=target)
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
def get_in_coco_format(self, idx):
img, target = super(LvisDetection, self).__getitem__(idx)
image_id = self.ids[idx]
target = dict(image_id=image_id, annotations=target)
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
def make_lvis_transforms(image_set):
normalize = T.Compose(
[T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]
)
scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]
transform_train = T.Compose(
[
T.RandomHorizontalFlip(),
T.RandomSelect(
T.RandomResize(scales, max_size=1333),
T.Compose(
[
T.RandomResize([400, 500, 600]),
T.RandomCrop((384, 384)),
T.RandomResize(scales, max_size=1333),
]
),
),
normalize,
]
)
transform_val = T.Compose([T.RandomResize([800], max_size=1333), normalize])
transforms = {
"train": transform_train,
"trainval": transform_train,
"val": transform_val,
"test": transform_val,
}
return transforms[image_set]
def build(image_set, args):
anno_root = "/datasets01/lvis/020520/"
anno_file_template = "lvis_v0.5_{}.json"
PATHS = {
"train": ("/datasets01/COCO/022719/train2017", anno_file_template.format("train")),
"val": ("/datasets01/COCO/060817/val2014", anno_file_template.format("val")),
# this is a hack, change in the future
"trainval": ("/datasets01/COCO/022719/train2017", anno_file_template.format("train")),
"test": ("/datasets01/COCO/060817/val2014", anno_file_template.format("val")),
}
img_folder, ann_file = PATHS[image_set]
# img_folder = os.path.join(root, img_folder)
ann_file = os.path.join(anno_root, ann_file)
dataset = LvisDetection(
img_folder, ann_file, transforms=make_lvis_transforms(image_set), return_masks=args.masks
)
return dataset
| craftassist-master | python/craftassist/voxel_models/detection-transformer/datasets/lvis.py |
from .voc import VOCDetection
from typing import Iterable
import to_coco_api
VOC_PATH = "/datasets01/VOC/060817/"
class VOCDetection2012(VOCDetection):
def __init__(self, image_set: str = "train", transforms: Iterable = None):
super(VOCDetection, self).__init__(
VOC_PATH, image_set=image_set, year="2012", download=False
)
self.prepare = to_coco_api.PrepareInstance()
self._transforms = transforms
from .voc import make_voc_transforms
def build(image_set, args):
# if we only use voc2012, then we need to adapt trainval and test to
# VOC2012 constraints
if image_set == "test":
image_set = "val"
if image_set == "trainval":
image_set = "train"
return VOCDetection2012(
image_set=image_set, transforms=make_voc_transforms(image_set, args.remove_difficult)
)
| craftassist-master | python/craftassist/voxel_models/detection-transformer/datasets/voc2012.py |
import json
import os
import numpy as np
import torch
from PIL import Image
import transforms as T
from box_ops import masks_to_boxes
from panopticapi.utils import rgb2id
class CocoPanoptic:
def __init__(self, img_folder, ann_folder, ann_file, transforms=None):
with open(ann_file, "r") as f:
self.coco = json.load(f)
# sort 'images' field so that they are aligned with 'annotations'
# i.e., in alphabetical order
self.coco["images"] = sorted(self.coco["images"], key=lambda x: x["id"])
# sanity check
for img, ann in zip(self.coco["images"], self.coco["annotations"]):
assert img["file_name"][:-4] == ann["file_name"][:-4]
self.img_folder = img_folder
self.ann_folder = ann_folder
self.transforms = transforms
def __getitem__(self, idx):
ann_info = self.coco["annotations"][idx]
img_path = os.path.join(self.img_folder, ann_info["file_name"].replace(".png", ".jpg"))
ann_path = os.path.join(self.ann_folder, ann_info["file_name"])
img = Image.open(img_path).convert("RGB")
masks = np.asarray(Image.open(ann_path), dtype=np.uint32)
masks = rgb2id(masks)
ids = np.array([ann["id"] for ann in ann_info["segments_info"]])
masks = masks == ids[:, None, None]
masks = torch.as_tensor(masks, dtype=torch.uint8)
labels = torch.tensor(
[ann["category_id"] for ann in ann_info["segments_info"]], dtype=torch.int64
)
target = {}
target["image_id"] = torch.tensor([ann_info["image_id"]])
target["masks"] = masks
target["labels"] = labels
w, h = img.size
target["boxes"] = masks_to_boxes(masks)
target["size"] = torch.as_tensor([int(h), int(w)])
target["orig_size"] = torch.as_tensor([int(h), int(w)])
for name in ["iscrowd", "area"]:
target[name] = torch.tensor([ann[name] for ann in ann_info["segments_info"]])
if self.transforms is not None:
img, target = self.transforms(img, target)
# print('img')
# print(img)
# print('target')
# print(target)
return img, target
def __len__(self):
return len(self.coco["images"])
def get_height_and_width(self, idx):
img_info = self.coco["images"][idx]
height = img_info["height"]
width = img_info["width"]
return height, width
def make_coco_panoptic_transforms(image_set):
normalize = T.Compose(
[
T.ToTensor(),
# T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
)
# scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]
transform_train = T.Compose(
[
# T.RandomHorizontalFlip(),
# T.RandomSelect(
# T.RandomResize(scales, max_size=1333),
# T.Compose([
# T.RandomResize([400, 500, 600]),
# T.RandomSizeCrop(384, 600),
# T.RandomResize(scales, max_size=1333),
# ])
# ),
normalize
]
)
transform_val = T.Compose([T.RandomResize([800], max_size=1333), normalize])
transforms = {
"train": transform_train,
"trainval": transform_train,
"val": transform_val,
"test": transform_val,
}
return transforms[image_set]
def build(image_set, args):
img_folder_root = "/datasets01/COCO/022719"
ann_folder_root = "/datasets01/COCO/060419"
mode = "panoptic"
anno_file_template = "{}_{}2017.json"
PATHS = {
"train": (
"train2017",
os.path.join("annotations", anno_file_template.format(mode, "train")),
),
"val": ("val2017", os.path.join("annotations", anno_file_template.format(mode, "val"))),
# this is a hack, change in the future
"trainval": (
"train2017",
os.path.join("annotations", anno_file_template.format(mode, "train")),
),
"test": ("val2017", os.path.join("annotations", anno_file_template.format(mode, "val"))),
}
img_folder, ann_file = PATHS[image_set]
img_folder_path = os.path.join(img_folder_root, img_folder)
ann_folder = os.path.join(ann_folder_root, "{}_{}".format(mode, img_folder))
ann_file = os.path.join(ann_folder_root, ann_file)
dataset = CocoPanoptic(
img_folder_path, ann_folder, ann_file, transforms=make_coco_panoptic_transforms(image_set)
)
return dataset
| craftassist-master | python/craftassist/voxel_models/detection-transformer/datasets/coco_panoptic.py |
import torchvision
from typing import Iterable
import to_coco_api
import transforms as T
VOC_PATH = "/checkpoint/szagoruyko/detection_transformer_shared/datasets01"
class VOCDetection(torchvision.datasets.VOCDetection):
def __init__(self, image_set: str = "train", transforms: Iterable = None):
super().__init__(VOC_PATH, image_set=image_set, year="2007", download=False)
self.prepare = to_coco_api.PrepareInstance()
self._transforms = transforms
def __getitem__(self, idx: int):
image, target = super().__getitem__(idx)
image, target = self.prepare(image, target)
if self._transforms is not None:
image, target = self._transforms(image, target)
return image, target
def get_in_coco_format(self, idx: int):
image, target = super().__getitem__(idx)
image, target = self.prepare(image, target)
if self._transforms is not None:
image, target = self._transforms(image, target)
return image, target
def make_voc_transforms(image_set, remove_difficult):
normalize = T.Compose(
[T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]
)
transform_train = T.Compose(
[
T.RandomHorizontalFlip(),
T.RandomSelect(
T.RandomResize([400, 500, 600], max_size=1000),
T.Compose(
[
T.RandomResize([400, 500, 600]),
T.RandomCrop((384, 384)),
T.RandomResize([400, 500, 600], max_size=1000),
]
),
),
normalize,
T.RemoveDifficult(remove_difficult),
]
)
transform_val = T.Compose([T.RandomResize([600], max_size=1000), normalize])
transforms = {
"train": transform_train,
"trainval": transform_train,
"val": transform_val,
"test": transform_val,
}
return transforms[image_set]
def build(image_set, args):
return VOCDetection(
image_set=image_set, transforms=make_voc_transforms(image_set, args.remove_difficult)
)
| craftassist-master | python/craftassist/voxel_models/detection-transformer/datasets/voc.py |
import copy
import torch
import torch.nn.functional as F
from torch import nn
class Transformer(nn.Module):
def __init__(
self,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
return_intermediate_dec=False,
pass_pos_and_query=True,
):
super().__init__()
self.pass_pos_and_query = pass_pos_and_query
# TODO: force norm after in encoder for backcomp
encoder_layer = TransformerEncoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, normalize_before=False
)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
decoder_layer = TransformerDecoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(
decoder_layer,
num_decoder_layers,
decoder_norm,
return_intermediate=return_intermediate_dec,
)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, mask, query_embed, pos_embed):
# flatten NxCxHxW to HWxNxC
bs, c, h, w, d = src.shape
src = src.flatten(2).permute(2, 0, 1)
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
mask = mask.flatten(1)
# print('==> src shape: {}'.format(src.shape))
# print('==> pos emb shape: {}'.format(pos_embed.shape))
if self.pass_pos_and_query:
tgt = torch.zeros_like(query_embed)
else:
src, tgt, query_embed, pos_embed = src + 0.1 * pos_embed, query_embed, None, None
# src, tgt, query_embed, pos_embed = src , query_embed, None, None
# cnt = 0
# for obj in gc.get_objects():
# try:
# if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
# cnt += 1
# except:
# pass
# print(f'active tensor num: {cnt}')
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
# print('==> memory size: {}'.format(memory.size()))
# cnt = 0
# for obj in gc.get_objects():
# try:
# if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
# cnt += 1
# except:
# pass
# print(f'active tensor num: {cnt}')
hs = self.decoder(
tgt, memory, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed
)
# print('==> hs size: {}'.format(hs.size()))
# cnt = 0
# for obj in gc.get_objects():
# try:
# if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
# cnt += 1
# except:
# pass
# print(f'active tensor num: {cnt}')
# return None
return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w, d)
# def forward(self, src, mask, query_embed, pos_embed):
# # flatten NxCxHxW to HWxNxC
# bs, c, h, w, d = src.shape
# memory = src.view(h * w * d, bs, c)
# hs = src.flatten()[: 3 * 100 * bs * c].view(3, 100, bs, c)
# # hs = torch.zeros((3, 100, bs, c)).cuda() + 0.5
# return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w, d)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src, mask=None, src_key_padding_mask=None, pos=None):
output = src
for i in range(self.num_layers):
output = self.layers[i](
output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos
)
if self.norm:
output = self.norm(output)
return output
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(
self,
tgt,
memory,
tgt_mask=None,
memory_mask=None,
tgt_key_padding_mask=None,
memory_key_padding_mask=None,
pos=None,
query_pos=None,
):
output = tgt
intermediate = []
for i in range(self.num_layers):
output = self.layers[i](
output,
memory,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos,
query_pos=query_pos,
)
if self.return_intermediate:
act = output if self.layers[i].normalize_before else self.norm(output)
intermediate.append(act)
if self.norm:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output
class TransformerEncoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward_post(self, src, src_mask=None, src_key_padding_mask=None, pos=None):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(
q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(self, src, src_mask, src_key_padding_mask=None, pos=None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(
q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(self, src, src_mask, src_key_padding_mask=None, pos=None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerDecoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward_post(
self,
tgt,
memory,
tgt_mask=None,
memory_mask=None,
tgt_key_padding_mask=None,
memory_key_padding_mask=None,
pos=None,
query_pos=None,
):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(
q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(
query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_pre(
self,
tgt,
memory,
tgt_mask=None,
memory_mask=None,
tgt_key_padding_mask=None,
memory_key_padding_mask=None,
pos=None,
query_pos=None,
):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(
q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(
query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(
self,
tgt,
memory,
tgt_mask=None,
memory_mask=None,
tgt_key_padding_mask=None,
memory_key_padding_mask=None,
pos=None,
query_pos=None,
):
if self.normalize_before:
return self.forward_pre(
tgt,
memory,
tgt_mask,
memory_mask,
tgt_key_padding_mask,
memory_key_padding_mask,
pos,
query_pos,
)
return self.forward_post(
tgt,
memory,
tgt_mask,
memory_mask,
tgt_key_padding_mask,
memory_key_padding_mask,
pos,
query_pos,
)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def build_transformer(args):
return Transformer(
d_model=args.hidden_dim,
dropout=args.dropout,
nhead=args.nheads,
dim_feedforward=args.dim_feedforward,
num_encoder_layers=args.enc_layers,
num_decoder_layers=args.dec_layers,
normalize_before=args.pre_norm,
return_intermediate_dec=True,
pass_pos_and_query=args.pass_pos_and_query,
)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
| craftassist-master | python/craftassist/voxel_models/detection-transformer/models/detr.py |
from itertools import zip_longest
import torch
from scipy.optimize import linear_sum_assignment
from torch import nn
from box_ops import generalized_box_iou, box_cxcyczwhd_to_xyzxyz
def prepare_outputs(outputs):
"""
Change convention from outputs = {scores[N], boxes[N]}
into a [{scores[0], boxes[0]}, ..., {scores[N], boxes[N]}]
"""
return [dict(zip_longest(outputs, t)) for t in zip_longest(*outputs.values())]
class HungarianMatcher(nn.Module):
def __init__(self, cost_class, cost_bbox, cost_giou):
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
@torch.no_grad()
def forward(self, outputs, targets):
indices = []
outputs = outputs.copy()
outputs["pred_scores"] = outputs["pred_logits"].softmax(dim=-1)
outputs = prepare_outputs(outputs)
for out, tgt in zip(outputs, targets):
cost = self._get_cost_matrix(out, tgt)
src_idx, tgt_idx = linear_sum_assignment(cost.cpu())
src_idx, tgt_idx = torch.as_tensor(src_idx), torch.as_tensor(tgt_idx)
indices.append((src_idx, tgt_idx))
return indices
def _get_cost_matrix(self, out, tgt):
out_prob, out_bbox = out["pred_scores"], out["pred_boxes"]
tgt_ids, tgt_bbox = tgt["labels"], tgt["boxes"]
cost_class = -out_prob[:, tgt_ids]
cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
cost_giou = -generalized_box_iou(
box_cxcyczwhd_to_xyzxyz(out_bbox), box_cxcyczwhd_to_xyzxyz(tgt_bbox)
)
cost = (
self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
)
return cost
class SequentialMatcher(nn.Module):
def forward(self, outputs, targets):
return [(torch.arange(len(tgt["labels"])),) * 2 for tgt in targets]
class LexicographicalMatcher(nn.Module):
def __init__(self, lexic="acb"):
super().__init__()
self.lexic = lexic
def forward(self, outputs, targets):
indices = []
for tgt in targets:
tgt_cls, tgt_box = tgt["labels"], tgt["boxes"]
area = tgt_box[:, 2] * tgt_box[:, 3]
if self.lexic == "acb":
search_list = [
(-a, cl, b)
for cl, a, b in zip(tgt_cls.tolist(), area.tolist(), tgt_box.tolist())
]
else:
search_list = [
(cl, -a, b)
for cl, a, b in zip(tgt_cls.tolist(), area.tolist(), tgt_box.tolist())
]
# argsort from https://stackoverflow.com/questions/3382352/equivalent-of-numpy-argsort-in-basic-python
j = sorted(range(len(search_list)), key=search_list.__getitem__)
j = torch.as_tensor(j, dtype=torch.int64)
i = torch.arange(len(j), dtype=j.dtype)
indices.append((i, j))
return indices
def build_matcher(args):
if args.set_loss == "sequential":
matcher = SequentialMatcher()
elif args.set_loss == "hungarian":
matcher = HungarianMatcher(
cost_class=args.set_cost_class,
cost_bbox=args.set_cost_bbox,
cost_giou=args.set_cost_giou,
)
elif args.set_loss == "lexicographical":
matcher = LexicographicalMatcher()
else:
raise ValueError(
f"Only sequential, lexicographical and hungarian accepted, got {args.set_loss}"
)
return matcher
| craftassist-master | python/craftassist/voxel_models/detection-transformer/models/matcher.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
import torch
import torch.nn as nn
def underdirt(schematic, labels=None, max_shift=0, nothing_id=0):
# todo fancier dirt!
# FIXME!!!! label as ground where appropriate
shift = torch.randint(max_shift + 1, (1,)).item()
if shift > 0:
new_schematic = torch.LongTensor(schematic.size())
new_schematic[:, shift:, :] = schematic[:, :-shift, :]
new_schematic[:, :shift, :] = 3
new_labels = None
if labels is not None:
new_labels = torch.LongTensor(labels.size())
new_labels[:, shift:, :] = labels[:, :-shift, :]
new_labels[:, :shift, :] = nothing_id
return new_schematic, new_labels
else:
return schematic, labels
def flip_rotate(c, l=None, idx=None):
"""
Randomly transform the cube for more data.
The transformation is chosen from:
0. original
1. x-z plane rotation 90
2. x-z plane rotation 180
3. x-z plane rotation 270
4. x-axis flip
5. z-axis flip
"""
idx = np.random.choice(range(6)) if (idx is None) else idx
l_ = l
if idx == 0:
c_ = c
l_ = l
elif idx >= 1 and idx <= 3: # rotate
npc = c.numpy()
npc = np.rot90(npc, idx, axes=(0, 2)) # rotate on the x-z plane
c_ = torch.from_numpy(npc.copy())
if l is not None:
npl = l.numpy()
npl = np.rot90(npl, idx, axes=(0, 2)) # rotate on the x-z plane
l_ = torch.from_numpy(npl.copy())
else: # flip
npc = c.numpy()
npc = np.flip(npc, axis=(idx - 4) * 2) # 0 or 2
c_ = torch.from_numpy(npc.copy())
if l is not None:
npl = l.numpy()
npl = np.flip(npl, axis=(idx - 4) * 2) # 0 or 2
l_ = torch.from_numpy(npl.copy())
return c_, l_, idx
def pad_to_sidelength(schematic, labels=None, nothing_id=0, sidelength=32):
szs = list(schematic.size())
szs = np.add(szs, -sidelength)
pad = []
# this is all backwards bc pytorch pad semantics :(
for s in szs:
if s >= 0:
pad.append(0)
else:
pad.append(-s)
pad.append(0)
schematic = torch.nn.functional.pad(schematic, pad[::-1])
if labels is not None:
labels = torch.nn.functional.pad(labels, pad[::-1], value=nothing_id)
return schematic, labels
# TODO simplify
def fit_in_sidelength(schematic, labels=None, nothing_id=0, sl=32, max_shift=0):
schematic, labels = pad_to_sidelength(
schematic, labels=labels, nothing_id=nothing_id, sidelength=sl
)
nz = schematic.nonzero()
m, _ = nz.median(0)
min_y, _ = nz.min(0)
min_y = min_y[1]
xshift = max(torch.randint(-max_shift, max_shift + 1, (1,)).item() - m[0].item() + sl // 2, 0)
zshift = max(torch.randint(-max_shift, max_shift + 1, (1,)).item() - m[2].item() + sl // 2, 0)
new_schematic = torch.LongTensor(sl, sl, sl).fill_(1)
new_schematic[xshift:, : sl - min_y, zshift:] = schematic[
: sl - xshift, min_y:sl, : sl - zshift
]
new_labels = None
if labels is not None:
new_labels = torch.LongTensor(sl, sl, sl).fill_(nothing_id)
new_labels[xshift:, : sl - min_y, zshift:] = labels[: sl - xshift, min_y:sl, : sl - zshift]
return new_schematic, new_labels, (xshift, -min_y, zshift)
def make_example_from_raw(schematic, labels=None, augment={}, nothing_id=0, sl=32):
max_shift = augment.get("max_shift", 0)
s, l, o = fit_in_sidelength(
schematic, labels=labels, nothing_id=nothing_id, max_shift=max_shift
)
if len(augment) > 0:
if augment.get("flip_rotate", False):
s, l, _ = flip_rotate(s, l=l)
m = augment.get("underdirt")
if m is not None:
# really should fix offset here.....TODO
s, l = underdirt(s, labels=l, max_shift=m, nothing_id=nothing_id)
s[s == 0] = 1
s -= 1
return s, l, o
class SemSegNet(nn.Module):
def __init__(self, classes=None):
super(SemSegNet, self).__init__()
# if opts.load:
# if opts.load_model != "":
# self.load(opts.load_model)
# else:
# raise ("loading from file specified but no load_filepath specified")
# else:
# self._build()
# self.classes = classes
self._build()
self.classes = classes
def _build(self):
try:
embedding_dim = 4
except:
embedding_dim = 8
try:
num_words = 256
except:
num_words = 3
try:
num_layers = 4
except:
num_layers = 4 # 32x32x32 input
try:
hidden_dim = 128
except:
hidden_dim = 64
self.embedding_dim = embedding_dim
self.embedding = nn.Embedding(num_words, embedding_dim)
self.layers = nn.ModuleList()
self.num_layers = num_layers
self.layers.append(
nn.Sequential(
nn.Conv3d(embedding_dim, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm3d(hidden_dim),
nn.ReLU(inplace=True),
)
)
for i in range(num_layers - 1):
if i == 0:
self.layers.append(
nn.Sequential(
nn.Conv3d(hidden_dim, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm3d(hidden_dim),
nn.ReLU(inplace=True),
)
)
else:
self.layers.append(
nn.Sequential(
nn.Conv3d(hidden_dim, hidden_dim, kernel_size=5, stride=2, padding=2),
nn.BatchNorm3d(hidden_dim),
nn.ReLU(inplace=True),
)
)
# self.out = nn.Conv3d(hidden_dim, opts.num_classes, kernel_size=1)
# self.lsm = nn.LogSoftmax(dim=1)
# def forward(self, x):
# shape = list(x.size())
# shape.insert(1, 128)
# ret = torch.zeros(shape).cuda() + 0.5
# return ret
def forward(self, x):
# FIXME when pytorch is ready for this, embedding
# backwards is soooooo slow
# z = self.embedding(x)
# print('x size==> {}'.format(x.size()))
szs = list(x.size())
x = x.view(-1)
# print('x view size==> {}'.format(x.size()))
# print('embed size==> {}'.format(self.embedding.weight.size()))
z = self.embedding.weight.index_select(0, x)
# print('z size==> {}'.format(z.size()))
szs.append(self.embedding_dim)
z = z.view(torch.Size(szs))
# print('z view size==> {}'.format(z.size()))
z = z.permute(0, 4, 1, 2, 3).contiguous()
# print('z permute size==> {}'.format(z.size()))
for i in range(self.num_layers):
z = self.layers[i](z)
# print('layer {} : z fc after size==> {}'.format(i, z.size()))
# out = self.out(z)
# print('out size==> {}'.format(out.size()))
# rtr = self.lsm(out)
# print('return size==> {}'.format(z.size()))
return z
def save(self, filepath):
self.cpu()
sds = {}
sds["opts"] = self.opts
sds["classes"] = self.classes
sds["state_dict"] = self.state_dict()
torch.save(sds, filepath)
if self.opts.cuda:
self.cuda()
def load(self, filepath):
sds = torch.load(filepath)
self.opts = sds["opts"]
print("loading from file, using opts")
print(self.opts)
self._build()
self.load_state_dict(sds["state_dict"])
self.zero_grad()
self.classes = sds["classes"]
class Opt:
pass
class SemSegWrapper:
def __init__(self, model, threshold=-1.0, blocks_only=True, cuda=False):
if type(model) is str:
opts = Opt()
opts.load = True
opts.load_model = model
model = SemSegNet(opts)
self.model = model
self.cuda = cuda
if self.cuda:
model.cuda()
else:
model.cpu()
self.classes = model.classes
# threshold for relevance; unused rn
self.threshold = threshold
# if true only label non-air blocks
self.blocks_only = blocks_only
# this is used by the semseg_process
i2n = self.classes["idx2name"]
self.tags = [(c, self.classes["name2count"][c]) for c in i2n]
assert self.classes["name2idx"]["none"] == 0
@torch.no_grad()
def segment_object(self, blocks):
self.model.eval()
blocks = torch.from_numpy(blocks)[:, :, :, 0]
blocks, _, o = make_example_from_raw(blocks)
blocks = blocks.unsqueeze(0)
if self.cuda:
blocks = blocks.cuda()
y = self.model(blocks)
_, mids = y.squeeze().max(0)
locs = mids.nonzero()
locs = locs.tolist()
if self.blocks_only:
return {
tuple(np.subtract(l, o)): mids[l[0], l[1], l[2]].item()
for l in locs
if blocks[0, l[0], l[1], l[2]] > 0
}
else:
return {tuple(ll for ll in l): mids[l[0], l[1], l[2]].item() for l in locs}
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--hidden_dim", type=int, default=128, help="size of hidden dim in fc layer"
)
parser.add_argument("--embedding_dim", type=int, default=16, help="size of blockid embedding")
parser.add_argument("--num_words", type=int, default=256, help="number of blocks")
parser.add_argument("--num_classes", type=int, default=20, help="number of blocks")
args = parser.parse_args()
N = SemSegNet(args)
| craftassist-master | python/craftassist/voxel_models/detection-transformer/models/semseg.py |
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from utils import NestedTensor
from .position_encoding import build_position_encoding
from .semseg import SemSegNet
class FrozenBatchNorm2d(torch.nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters
are fixed
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def forward(self, x):
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
eps = 1e-5
scale = w * (rv + eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
def __init__(
self,
backbone: nn.Module,
train_backbone: bool,
num_channels: int,
return_interm_layers: bool,
):
super().__init__()
for name, parameter in backbone.named_parameters():
if (
not train_backbone
or "layer2" not in name
and "layer3" not in name
and "layer4" not in name
):
parameter.requires_grad_(False)
if return_interm_layers:
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
else:
return_layers = {"layer4": 0}
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
print("backbone body")
print(self.body)
self.num_channels = num_channels
def forward(self, tensor_list):
xs = self.body(tensor_list.tensors)
out = OrderedDict()
for name, x in xs.items():
mask = F.interpolate(tensor_list.mask[None].float(), size=x.shape[-2:]).bool()[0]
out[name] = NestedTensor(x, mask)
print("backbone out")
print(out)
return out
class Backbone(BackboneBase):
"""ResNet backbone with frozen BatchNorm."""
def __init__(
self, name: str, train_backbone: bool, return_interm_layers: bool, dilation: bool
):
backbone = getattr(torchvision.models, name)(
replace_stride_with_dilation=[False, False, dilation],
pretrained=True,
norm_layer=FrozenBatchNorm2d,
)
num_channels = 512 if name in ("resnet18", "resnet34") else 2048
super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
class GroupNorm32(torch.nn.GroupNorm):
def __init__(self, num_channels, num_groups=32, **kargs):
super().__init__(num_groups, num_channels, **kargs)
class GroupNormBackbone(BackboneBase):
"""ResNet backbone with GroupNorm with 32 channels."""
def __init__(
self, name: str, train_backbone: bool, return_interm_layers: bool, dilation: bool
):
name_map = {
"resnet50-gn": ("resnet50", "/checkpoint/szagoruyko/imagenet/22014122/checkpoint.pth"),
"resnet101-gn": (
"resnet101",
"/checkpoint/szagoruyko/imagenet/22080524/checkpoint.pth",
),
}
backbone = getattr(torchvision.models, name_map[name][0])(
replace_stride_with_dilation=[False, False, dilation],
pretrained=False,
norm_layer=GroupNorm32,
)
checkpoint = torch.load(name_map[name][1], map_location="cpu")
state_dict = {k[7:]: p for k, p in checkpoint["model"].items()}
backbone.load_state_dict(state_dict)
num_channels = 512 if name_map[name][0] in ("resnet18", "resnet34") else 2048
super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
class SemSegBackBone(nn.Module):
def __init__(self):
super().__init__()
self.backbone = SemSegNet()
def forward(self, tensor_list):
xs = self.backbone(tensor_list.tensors.long())
out = OrderedDict()
x = xs
name = "default"
mask = F.interpolate(tensor_list.mask[None].float(), size=x.shape[-3:]).bool()[0]
out[name] = NestedTensor(x, mask)
# for name, x in xs.items():
# mask = F.interpolate(tensor_list.mask[None].float(), size=x.shape[-2:]).bool()[0]
# out[name] = NestedTensor(x, mask)
return out
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
def forward(self, tensor_list):
xs = self[0](tensor_list)
out = []
pos = []
# print(type(xs))
for name, x in xs.items():
out.append(x)
# position encoding
pos.append(self[1](x).to(x.tensors.dtype))
return out, pos
def build_backbone(args):
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
return_interm_layers = args.masks
if args.backbone in ("resnet50-gn", "resnet101-gn"):
backbone = GroupNormBackbone(
args.backbone, train_backbone, return_interm_layers, args.dilation
)
elif args.backbone in ("semseg"):
backbone = SemSegBackBone()
else:
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation)
model = Joiner(backbone, position_embedding)
# 128 for semseg net, should pass arg here instead of hard coding
model.num_channels = 128 # backbone.num_channels
return model
| craftassist-master | python/craftassist/voxel_models/detection-transformer/models/backbone.py |
import math
import torch
from torch import nn
class PositionEmbedding(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=16, temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, tensor_list):
x = tensor_list.tensors
mask = tensor_list.mask
# print('mask size')
# print(mask.shape)
not_mask = ~mask
z_embed = not_mask.cumsum(1, dtype=torch.float32)
y_embed = not_mask.cumsum(2, dtype=torch.float32)
x_embed = not_mask.cumsum(3, dtype=torch.float32)
# print('x_embed size: {}'.format(x_embed.shape))
# print('y_embed size: {}'.format(y_embed.shape))
# print('z_embed size: {}'.format(z_embed.shape))
if self.normalize:
eps = 1e-6
z_embed = z_embed / (z_embed[:, -1:, :, :] + eps) * self.scale
y_embed = y_embed / (y_embed[:, :, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (3 * (dim_t // 3) / self.num_pos_feats)
# print('dim_t size: {}'.format(dim_t.shape))
# print("pos_X 111: {}".format(x_embed[:, :, :, :, None].shape))
pos_x = x_embed[:, :, :, :, None] / dim_t
pos_y = y_embed[:, :, :, :, None] / dim_t
pos_z = z_embed[:, :, :, :, None] / dim_t
# print('pos_x size 1: {}'.format(pos_x.shape))
pos_x = torch.stack(
(pos_x[:, :, :, :, 0::2].sin(), pos_x[:, :, :, :, 1::2].cos()), dim=5
).flatten(4)
pos_y = torch.stack(
(pos_y[:, :, :, :, 0::2].sin(), pos_y[:, :, :, :, 1::2].cos()), dim=5
).flatten(4)
pos_z = torch.stack(
(pos_y[:, :, :, :, 0::2].sin(), pos_y[:, :, :, :, 1::2].cos()), dim=5
).flatten(4)
# print('pos_x size 2: {}'.format(pos_x.shape))
pos = torch.cat((pos_z, pos_y, pos_x), dim=4).permute(0, 4, 1, 2, 3)
return pos
# class PositionEmbedding(nn.Module):
# """
# This is a more standard version of the position embedding, very similar to the one
# used by the Attention is all you need paper, generalized to work on images.
# """
# def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
# super().__init__()
# self.num_pos_feats = num_pos_feats
# self.temperature = temperature
# self.normalize = normalize
# if scale is not None and normalize is False:
# raise ValueError("normalize should be True if scale is passed")
# if scale is None:
# scale = 2 * math.pi
# self.scale = scale
# def forward(self, tensor_list):
# x = tensor_list.tensors
# mask = tensor_list.mask
# not_mask = ~mask
# y_embed = not_mask.cumsum(1, dtype=torch.float32)
# x_embed = not_mask.cumsum(2, dtype=torch.float32)
# print('x_embed size: {}'.format(x_embed.shape))
# if self.normalize:
# eps = 1e-6
# y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
# x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
# dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
# dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
# print('dim_t size: {}'.format(dim_t.shape))
# pos_x = x_embed[:, :, :, None] / dim_t
# pos_y = y_embed[:, :, :, None] / dim_t
# print('pos_x size 1: {}'.format(pos_x.shape))
# pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
# pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
# print('pos_x size 2: {}'.format(pos_x.shape))
# pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
# return pos
class PositionEmbeddingLearned(nn.Module):
"""
Absolute pos embedding, learned.
"""
def __init__(self, num_pos_feats=256):
super().__init__()
self.row_embed = nn.Embedding(50, num_pos_feats)
self.col_embed = nn.Embedding(50, num_pos_feats)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.row_embed.weight)
nn.init.uniform_(self.col_embed.weight)
def forward(self, tensor_list):
x = tensor_list.tensors
h, w = x.shape[-2:]
i = torch.arange(w, device=x.device)
j = torch.arange(h, device=x.device)
x_emb = self.col_embed(i)
y_emb = self.row_embed(j)
pos = (
torch.cat(
[x_emb.unsqueeze(0).repeat(h, 1, 1), y_emb.unsqueeze(1).repeat(1, w, 1)], dim=-1
)
.permute(2, 0, 1)
.unsqueeze(0)
.repeat(x.shape[0], 1, 1, 1)
)
return pos
def build_position_encoding(args):
N_steps = args.hidden_dim // 3
if args.position_embedding == "v2":
# TODO find a better way of exposing other arguments
position_embedding = PositionEmbedding(N_steps, normalize=True)
elif args.position_embedding == "v3":
position_embedding = PositionEmbeddingLearned(N_steps)
else:
raise ValueError(f"not supported {args.position_embedding}")
return position_embedding
| craftassist-master | python/craftassist/voxel_models/detection-transformer/models/position_encoding.py |
import importlib
def build_model(args):
# what a hack
mod = importlib.import_module("models." + args.model_file)
return mod.build(args)
| craftassist-master | python/craftassist/voxel_models/detection-transformer/models/__init__.py |
import torch
import torch.nn.functional as F
from torch import nn
from torchvision.ops import misc as misc_ops
import box_ops
# TODO need to do proper packaging as this is getting confusing
from utils import NestedTensor, accuracy, get_world_size, is_dist_avail_and_initialized
from .backbone import build_backbone
from .common import MLP
from .detr import build_transformer
from .loss_utils import dice_loss, sigmoid_focal_loss
from .mask_heads import DETRmask
from .matcher import build_matcher
class DETR(nn.Module):
def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False):
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
self.class_embed = nn.Linear(hidden_dim, num_classes + 1)
self.bbox_embed = MLP(hidden_dim, hidden_dim, 6, 3)
self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.input_proj = nn.Conv3d(backbone.num_channels, hidden_dim, kernel_size=1)
self.backbone = backbone
self.aux_loss = aux_loss
def forward(self, samples: NestedTensor):
print("... DTER Forwarding ... ")
print(samples.tensors.shape)
if not isinstance(samples, NestedTensor):
samples = NestedTensor.from_tensor_list(samples)
features, pos = self.backbone(samples)
src, mask = features[-1].decompose()
# (6, bs, num_queries, hidden_dim)
hs = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos[-1])[0]
print("---- hs size ----")
print(hs.shape)
outputs_class = self.class_embed(hs)
outputs_coord = self.bbox_embed(hs).sigmoid()
out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]}
if self.aux_loss:
out["aux_outputs"] = [
{"pred_logits": a, "pred_boxes": b}
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])
]
return out
class SetCriterion(nn.Module):
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer("empty_weight", empty_weight)
def match(self, outputs, targets):
assert len(outputs["pred_logits"]) == len(targets)
return self.matcher(outputs, targets)
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
assert "pred_logits" in outputs
src_logits = outputs["pred_logits"]
# class loss
target_classes_o = [t["labels"][J] for t, (_, J) in zip(targets, indices)]
target_classes = torch.full(
src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device
)
# TODO optimize this
for k, (I, _) in enumerate(indices):
target_classes[k][I] = target_classes_o[k]
loss_ce = F.cross_entropy(
src_logits.flatten(0, 1), target_classes.flatten(0, 1), self.empty_weight
)
losses = {"loss_ce": loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in
# this one here
idx = self._get_src_permutation_idx(indices)
ordered_src_logits = src_logits[idx]
target_classes_o = torch.cat(target_classes_o)
losses["class_error"] = (
100 - accuracy(ordered_src_logits.detach(), target_classes_o)[0]
)
return losses
@torch.no_grad()
def loss_cardinality(self, outputs, targets, indices, num_boxes):
"""
Not really a loss, but well :-)
No gradients anyway
"""
pred_logits = outputs["pred_logits"]
device = pred_logits.device
tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device)
card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
losses = {"cardinality_error": card_err}
return losses
def loss_boxes(self, outputs, targets, indices, num_boxes):
assert "pred_boxes" in outputs
# print('------ outputs ---------')
# print(outputs['pred_logits'].shape)
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs["pred_boxes"][idx]
target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction="none")
losses = {}
losses["loss_bbox"] = loss_bbox.sum() / (num_boxes * 4)
if "loss_giou" in self.weight_dict:
loss_giou = 1 - torch.diag(
box_ops.generalized_box_iou(
box_ops.box_cxcyczwhd_to_xyzxyz(src_boxes),
box_ops.box_cxcyczwhd_to_xyzxyz(target_boxes),
)
)
losses["loss_giou"] = loss_giou.sum() / num_boxes
return losses
def loss_masks(self, outputs, targets, indices, num_boxes):
assert "pred_masks" in outputs
# print('---- loss masks ----')
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs["pred_masks"]
# print('---- src masks ----')
# print(src_masks[0][0])
# print('---- targets ----')
# print(len(targets))
# print(targets[0]['masks'].shape)
# print(targets[0]['labels'].shape)
# TODO use valid to mask invalid areas due to padding in loss
target_masks, valid = NestedTensor.from_tensor_list(
[t["masks"] for t in targets]
).decompose()
target_masks = target_masks.to(src_masks)
src_masks = src_masks[src_idx]
src_masks = misc_ops.interpolate(
src_masks[:, None], size=target_masks.shape[-3:], mode="trilinear", align_corners=False
)
src_masks = src_masks[:, 0].flatten(1)
target_masks = target_masks[tgt_idx].flatten(1)
losses = {
"loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes),
"loss_dice": dice_loss(src_masks, target_masks, num_boxes),
}
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat(
[torch.full((len(src),), i, dtype=torch.int64) for i, (src, _) in enumerate(indices)]
)
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat(
[torch.full((len(tgt),), i, dtype=torch.int64) for i, (_, tgt) in enumerate(indices)]
)
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
loss_map = {
"labels": self.loss_labels,
"cardinality": self.loss_cardinality,
"boxes": self.loss_boxes,
"masks": self.loss_masks,
}
assert loss in loss_map, f"do you really want to compute {loss} loss?"
return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
def forward(self, outputs, targets):
outputs_without_aux = {k: v for k, v in outputs.items() if k != "aux_outputs"}
indices = self.matcher(outputs_without_aux, targets)
num_boxes = sum([len(t["labels"]) for t in targets])
num_boxes = torch.as_tensor(
[num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device
)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
if "aux_outputs" in outputs:
for i, aux_outputs in enumerate(outputs["aux_outputs"]):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
if loss == "masks":
continue
kwargs = {}
if loss == "labels":
kwargs = {"log": False}
l_dict = self.get_loss(
loss, aux_outputs, targets, indices, num_boxes, **kwargs
)
l_dict = {k + f"_{i}": v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
class PostProcess(nn.Module):
def __init__(self, rescale_to_orig_size=False, threshold=0.3):
super().__init__()
self.rescale_to_orig_size = rescale_to_orig_size
self.threshold = threshold
def forward(self, outputs, targets):
out_logits, out_bbox = outputs["pred_logits"], outputs["pred_boxes"]
# convert to [x0, y0, x1, y1, z0, z1] format
boxes = []
field = "orig_size" if self.rescale_to_orig_size else "size"
out_bbox = box_ops.box_cxcyczwhd_to_xyzxyz(out_bbox)
for b, t in zip(out_bbox, targets):
img_d, img_h, img_w = t[field].tolist()
b = b * torch.tensor(
[img_w, img_h, img_d, img_w, img_h, img_d], dtype=torch.float32, device=b.device
)
boxes.append(b)
prob = F.softmax(out_logits, -1)
scores, labels = prob[..., :-1].max(-1)
results = [
{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)
]
if "pred_masks" in outputs:
max_h = max([tgt["size"][0] for tgt in targets])
max_w = max([tgt["size"][1] for tgt in targets])
outputs_masks = outputs["pred_masks"]
outputs_masks = outputs_masks.squeeze(2)
outputs_masks = F.interpolate(
outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False
).sigmoid()
outputs_masks = (outputs_masks > self.threshold).byte().cpu().detach()
out_masks = outputs_masks
for i, (cur_mask, t) in enumerate(zip(out_masks, targets)):
img_h, img_w = t["size"][0], t["size"][1]
results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)
if self.rescale_to_orig_size:
results[i]["masks"] = F.interpolate(
results[i]["masks"].float(),
size=tuple(t["orig_size"].tolist()),
mode="nearest",
).byte()
return results
def build(args):
num_classes = 20 if args.dataset_file != "coco" else 91
if args.dataset_file == "lvis":
num_classes = 1235
if args.dataset_file == "coco_panoptic":
num_classes = 250 # TODO: what is correct number? would be nice to refactor this anyways
device = torch.device(args.device)
assert not args.masks or args.mask_model != "none"
backbone = build_backbone(args)
transformer = build_transformer(args)
model = DETR(
backbone,
transformer,
num_classes=num_classes,
num_queries=args.num_queries,
aux_loss=args.aux_loss,
)
if args.mask_model != "none":
model = DETRmask(model, mask_head=args.mask_model)
matcher = build_matcher(args)
weight_dict = {"loss_ce": 1, "loss_bbox": args.bbox_loss_coef}
if args.giou_loss_coef:
weight_dict["loss_giou"] = args.giou_loss_coef
if args.masks:
weight_dict["loss_mask"] = args.mask_loss_coef
weight_dict["loss_dice"] = args.dice_loss_coef
# TODO this is a hack
if args.aux_loss:
aux_weight_dict = {}
for i in range(args.dec_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
# print(aux_weight_dict)
weight_dict.update(aux_weight_dict)
losses = ["labels", "boxes", "cardinality"]
if args.masks:
losses += ["masks"]
criterion = SetCriterion(
num_classes,
matcher=matcher,
weight_dict=weight_dict,
eos_coef=args.eos_coef,
losses=losses,
)
criterion.to(device)
postprocessor = PostProcess().to(device)
return model, criterion, postprocessor
| craftassist-master | python/craftassist/voxel_models/detection-transformer/models/model_parallel.py |
from torch import nn
import torch.nn.functional as F
class MLP(nn.Module):
"""Simple feed forward fully connected, with some options
Parameters
----------
input_dim : int
Number of input channels
hidden_dim : int
Number of channels in the hidden layers
output_dim : int
Number of output channels
nb_layers : int
Number of layers
"""
def __init__(self, input_dim, hidden_dim, output_dim, nb_layers=1):
super().__init__()
self.layers = nn.ModuleList()
for i in range(nb_layers):
is_last_layer = i == nb_layers - 1
cur_in = input_dim if i == 0 else hidden_dim
cur_out = output_dim if is_last_layer else hidden_dim
linear = nn.Linear(cur_in, cur_out)
self.layers.append(linear)
def forward(self, x): # pylint: disable=arguments-differ
for i in range(len(self.layers) - 1):
x = self.layers[i](x)
x = F.relu(x)
x = self.layers[-1](x)
return x
| craftassist-master | python/craftassist/voxel_models/detection-transformer/models/common.py |
"""
This file provides the definition of the convolutional heads used to predict masks
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import NestedTensor
class DETRmask(nn.Module):
def __init__(self, detr, mask_head="v2"):
super().__init__()
self.detr = detr
hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead
self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0)
if mask_head == "smallconv":
maskHead = MaskHeadSmallConv
mask_dim = hidden_dim + nheads
elif mask_head == "v2":
maskHead = MaskHeadV2
mask_dim = hidden_dim
else:
raise RuntimeError(f"Unknown mask model {mask_head}")
self.mask_head = maskHead(mask_dim, [256], hidden_dim)
def forward(self, samples: NestedTensor):
if not isinstance(samples, NestedTensor):
samples = NestedTensor.from_tensor_list(samples)
features, pos = self.detr.backbone(samples)
bs = features[-1].tensors.shape[0]
src, mask = features[-1].decompose()
src_proj = self.detr.input_proj(src)
hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1])
outputs_class = self.detr.class_embed(hs)
outputs_coord = self.detr.bbox_embed(hs).sigmoid()
out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]}
if self.detr.aux_loss:
out["aux_outputs"] = [
{"pred_logits": a, "pred_boxes": b}
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])
]
# FIXME h_boxes takes the last one computed, keep this in mind
bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask)
seg_masks = self.mask_head(src_proj, bbox_mask, [features[-1].tensors])
outputs_seg_masks = seg_masks.view(
bs,
self.detr.num_queries,
seg_masks.shape[-3],
seg_masks.shape[-2],
seg_masks.shape[-1],
)
out["pred_masks"] = outputs_seg_masks
return out
class MaskHeadSmallConv(nn.Module):
"""
Simple convolutional head, using group norm.
Upsampling is done using a FPN approach
"""
def __init__(self, dim, fpn_dims, context_dim):
super().__init__()
inter_dims = [
dim,
context_dim // 2,
context_dim // 4,
context_dim // 8,
context_dim // 16,
context_dim // 64,
]
self.lay1 = torch.nn.Conv3d(dim, dim, 3, padding=1)
self.gn1 = torch.nn.GroupNorm(8, dim)
self.lay2 = torch.nn.Conv3d(dim, inter_dims[1], 3, padding=1)
self.gn2 = torch.nn.GroupNorm(8, inter_dims[1])
self.out_lay = torch.nn.Conv3d(inter_dims[1], 1, 3, padding=1)
self.dim = dim
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_uniform_(m.weight, a=1)
nn.init.constant_(m.bias, 0)
def forward(self, x, bbox_mask, fpns):
def expand(tensor, length):
return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1, 1).flatten(0, 1)
# print(' @@@@ bbox mask')
# print(bbox_mask.shape)
# print(' @@@@ before maskhead size')
# print(x.shape)
x = torch.cat([expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1)
x = self.lay1(x)
x = self.gn1(x)
x = F.relu(x)
x = self.lay2(x)
x = self.gn2(x)
x = F.relu(x)
x = self.out_lay(x)
# print(' @@@@ after maskhead size')
# print(x.shape)
return x
class MaskHeadV2(nn.Module):
def __init__(self, dim, fpn_dims, context_dim):
super().__init__()
# inter_dims = [dim, context_dim // 4, context_dim // 16, context_dim // 64, context_dim // 128]
inter_dims = [context_dim // 4, context_dim // 16, context_dim // 64, context_dim // 128]
blocks = []
adapters = []
refiners = []
in_dim = dim
for i in range(2):
out_dim = inter_dims[i]
blocks.append(
nn.Sequential(
nn.Conv2d(in_dim, out_dim, 3, padding=1),
# nn.GroupNorm(8, out_dim),
# nn.ReLU()
)
)
adapters.append(nn.Conv2d(fpn_dims[i], out_dim, 1))
refiners.append(
nn.Sequential(
nn.Conv2d(out_dim, out_dim, 3, padding=1), nn.GroupNorm(8, out_dim), nn.ReLU()
)
)
in_dim = out_dim
self.blocks = nn.ModuleList(blocks)
self.adapters = nn.ModuleList(adapters)
self.refiners = nn.ModuleList(refiners)
self.out_lay = nn.Conv2d(in_dim, 1, 3, padding=1)
self.dim = dim
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
nn.init.constant_(m.bias, 0)
def forward(self, x, bbox_mask, fpns):
# bbox_mask = bbox_mask.mean(2)
bs, num_queries, num_heads = bbox_mask.shape[:3]
for fpn, block, adapter, refiner in zip(fpns, self.blocks, self.adapters, self.refiners):
x = block(x)
adapted_fpn = adapter(fpn)
x = F.interpolate(x, size=adapted_fpn.shape[-2:], mode="nearest")
x = x.reshape((bs, -1) + x.shape[1:]) + adapted_fpn[:, None]
mask = F.interpolate(bbox_mask.flatten(1, 2), size=x.shape[-2:], mode="bilinear")
mask = mask.reshape((bs, num_queries, num_heads) + mask.shape[-2:])
x = x.reshape((bs, -1, num_heads, x.shape[2] // num_heads) + x.shape[3:])
x = x * mask[:, :, :, None]
x = x.flatten(2, 3)
x = x.flatten(0, 1)
x = refiner(x)
x = self.out_lay(x)
return x
class MHAttentionMap(nn.Module):
def __init__(self, query_dim, hidden_dim, num_heads, dropout=0, bias=True):
super().__init__()
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.dropout = nn.Dropout(dropout)
self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
nn.init.zeros_(self.k_linear.bias)
nn.init.zeros_(self.q_linear.bias)
nn.init.xavier_uniform_(self.k_linear.weight)
nn.init.xavier_uniform_(self.q_linear.weight)
self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5
def forward(self, q, k, mask=None):
# q: (bs, num_queries, hidden_dim)
# k: (bs, hiddem_dim, h, w, d)
q = self.q_linear(q)
k = F.conv3d(
k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1), self.k_linear.bias
)
qh = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads)
kh = k.view(
k.shape[0],
self.num_heads,
self.hidden_dim // self.num_heads,
k.shape[-3],
k.shape[-2],
k.shape[-1],
)
weights = torch.einsum("bqnc,bnchwd->bqnhwd", qh * self.normalize_fact, kh)
if mask is not None:
weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float("-inf"))
weights = F.softmax(weights.flatten(2), dim=-1).view_as(weights)
weights = self.dropout(weights)
return weights
| craftassist-master | python/craftassist/voxel_models/detection-transformer/models/mask_heads.py |
"""
This file defines the basic loss functions that are used in the project
"""
import torch.nn.functional as F
def dice_loss(inputs, targets, num_boxes):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_boxes
def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Adapted to avoid propagation of very certain items (confidence > 97%)
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_boxes
| craftassist-master | python/craftassist/voxel_models/detection-transformer/models/loss_utils.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import os
import sys
import visdom
import torch
VOXEL_MODELS_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../")
sys.path.append(VOXEL_MODELS_DIR)
import plot_voxels as pv
import spatial_utils as su
import training_utils as tu
class GeoscorerDatasetVisualizer:
def __init__(self, dataset):
self.vis = visdom.Visdom(server="http://localhost")
self.sp = pv.SchematicPlotter(self.vis)
self.dataset = dataset
self.vis_index = 0
self.model = None
self.opts = None
def set_model(self, model, opts=None):
self.model = model
if opts:
self.opts = opts
def visualize(self, use_model=False):
if self.vis_index == len(self.dataset):
raise Exception("No more examples to visualize in dataset")
b = self.dataset[self.vis_index]
if "schematic" in b:
self.sp.drawGeoscorerPlotly(b["schematic"])
c_sl = b["context"].size()[0]
self.vis_index += 1
self.sp.drawGeoscorerPlotly(b["context"])
self.sp.drawGeoscorerPlotly(b["seg"])
target_coord = su.index_to_coord(b["target"].item(), c_sl)
combined_voxel = su.combine_seg_context(b["seg"], b["context"], target_coord, seg_mult=3)
self.sp.drawGeoscorerPlotly(combined_voxel)
if use_model:
b = {k: t.unsqueeze(0) for k, t in b.items()}
targets, scores = tu.get_scores_from_datapoint(self.model, b, self.opts)
max_ind = torch.argmax(scores, dim=1)
pred_coord = su.index_to_coord(max_ind, c_sl)
b = {k: t.squeeze(0) for k, t in b.items()}
predicted_voxel = su.combine_seg_context(
b["seg"], b["context"], pred_coord, seg_mult=3
)
self.sp.drawGeoscorerPlotly(predicted_voxel)
| craftassist-master | python/craftassist/voxel_models/geoscorer/visualization_utils.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import torch
import torch.optim as optim
import torch.nn as nn
import directional_utils as du
def conv3x3x3(in_planes, out_planes, stride=1, bias=True):
"""3x3x3 convolution with padding"""
return nn.Conv3d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=bias)
def conv3x3x3up(in_planes, out_planes, bias=True):
"""3x3x3 convolution with padding"""
return nn.ConvTranspose3d(
in_planes, out_planes, stride=2, kernel_size=3, padding=1, output_padding=1
)
def convbn(in_planes, out_planes, stride=1, bias=True):
return nn.Sequential(
(conv3x3x3(in_planes, out_planes, stride=stride, bias=bias)),
nn.BatchNorm3d(out_planes),
nn.ReLU(inplace=True),
)
def convbnup(in_planes, out_planes, bias=True):
return nn.Sequential(
(conv3x3x3up(in_planes, out_planes, bias=bias)),
nn.BatchNorm3d(out_planes),
nn.ReLU(inplace=True),
)
# Return an 32 x 32 x 32 x 3 tensor where each len 3 inner tensor is
# the xyz coordinates of that position
def create_xyz_tensor(sl):
incr_t = torch.tensor(range(sl), dtype=torch.float64)
z = incr_t.expand(sl, sl, sl).unsqueeze(3)
y = incr_t.unsqueeze(1).expand(sl, sl, sl).unsqueeze(3)
x = incr_t.unsqueeze(1).unsqueeze(2).expand(sl, sl, sl).unsqueeze(3)
xyz = torch.cat([x, y, z], 3)
return xyz
class ContextEmbeddingNet(nn.Module):
def __init__(self, opts, blockid_embedding):
super(ContextEmbeddingNet, self).__init__()
self.blockid_embedding_dim = opts.get("blockid_embedding_dim", 8)
output_embedding_dim = opts.get("output_embedding_dim", 8)
num_layers = opts.get("num_layers", 4)
hidden_dim = opts.get("hidden_dim", 64)
self.use_direction = opts.get("cont_use_direction", False)
self.use_xyz_from_viewer_look = opts.get("cont_use_xyz_from_viewer_look", False)
self.c_sl = opts.get("context_side_length", 32)
self.xyz = None
input_dim = self.blockid_embedding_dim
if self.use_direction:
input_dim += 5
if self.use_xyz_from_viewer_look:
input_dim += 3
self.xyz = create_xyz_tensor(self.c_sl).view(1, -1, 3)
self.viewer_look = du.get_viewer_look(self.c_sl)
if opts.get("cuda", 0):
self.xyz = self.xyz.cuda()
self.viewer_look = self.viewer_look.cuda()
# A shared embedding for the block id types
self.blockid_embedding = blockid_embedding
# Create model for converting the context into HxWxL D dim representations
self.layers = nn.ModuleList()
# B dim block id -> hidden dim, maintain input size
self.layers.append(
nn.Sequential(
nn.Conv3d(input_dim, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm3d(hidden_dim),
nn.ReLU(inplace=True),
)
)
# hidden dim -> hidden dim, maintain input size
for i in range(num_layers - 1):
self.layers.append(
nn.Sequential(
nn.Conv3d(hidden_dim, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm3d(hidden_dim),
nn.ReLU(inplace=True),
)
)
# hidden dim -> spatial embedding dim, maintain input size
self.out = nn.Linear(hidden_dim, output_embedding_dim)
# Input: [context, opt:viewer_pos, opt:viewer_look, opt:direction]
# Returns N x D x H x W x L
def forward(self, b):
bsls = b["context"].size()[1:]
if bsls[0] != self.c_sl or bsls[1] != self.c_sl or bsls[2] != self.c_sl:
raise Exception(
"Size of context should be Nx{}x{}x{} but it is {}".format(
self.c_sl, self.c_sl, self.c_sl, b["context"].size()
)
)
sizes = list(b["context"].size())
x = b["context"].view(-1)
# Get the blockid embedding for each space in the context input
z = self.blockid_embedding.weight.index_select(0, x)
z = z.float()
# Add the embedding dim B
sizes.append(self.blockid_embedding_dim)
# z: N*D x B
if self.use_xyz_from_viewer_look:
n_xyz = self.xyz.expand(sizes[0], -1, -1)
# Input: viewer pos, viewer look (N x 3), n_xyz (N x D x 3)
n_xyz = (
du.get_xyz_viewer_look_coords_batched(b["viewer_pos"], self.viewer_look, n_xyz)
.view(-1, 3)
.float()
)
z = torch.cat([z, n_xyz], 1)
# Add the xyz_look_position to the input size list
sizes[-1] += 3
if self.use_direction:
# direction: N x 5
direction = b["dir_vec"]
d = self.c_sl * self.c_sl * self.c_sl
direction = direction.unsqueeze(1).expand(-1, d, -1).contiguous().view(-1, 5)
direction = direction.float()
z = torch.cat([z, direction], 1)
# Add the direction emb to the input size list
sizes[-1] += 5
z = z.view(torch.Size(sizes))
# N x H x W x L x B ==> N x B x H x W x L
z = z.permute(0, 4, 1, 2, 3).contiguous()
for i in range(len(self.layers)):
z = self.layers[i](z)
z = z.permute(0, 2, 3, 4, 1).contiguous()
return self.out(z)
class SegmentEmbeddingNet(nn.Module):
def __init__(self, opts, blockid_embedding):
super(SegmentEmbeddingNet, self).__init__()
self.blockid_embedding_dim = opts.get("blockid_embedding_dim", 8)
spatial_embedding_dim = opts.get("spatial_embedding_dim", 8)
hidden_dim = opts.get("hidden_dim", 64)
self.s_sl = 8 # TODO make this changeable in model arch
# A shared embedding for the block id types
self.blockid_embedding = blockid_embedding
# Create model for converting the segment into 1 D dim representation
# input size: 8x8x8
self.layers = nn.ModuleList()
# B dim block id -> hidden dim, maintain input size
self.layers.append(
nn.Sequential(
nn.Conv3d(self.blockid_embedding_dim, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm3d(hidden_dim),
nn.ReLU(inplace=True),
)
)
# hidden dim -> hidden dim
# (maintain input size x2, max pool to half) x 3: 8x8x8 ==> 1x1x1
for i in range(3):
self.layers.append(
nn.Sequential(
nn.Conv3d(hidden_dim, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm3d(hidden_dim),
nn.ReLU(inplace=True),
nn.Conv3d(hidden_dim, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm3d(hidden_dim),
nn.ReLU(inplace=True),
nn.MaxPool3d(2, stride=2),
)
)
# hidden dim -> spatial embedding dim, 1x1x1
self.out = nn.Linear(hidden_dim, spatial_embedding_dim)
# Returns N x D x 1 x 1 x 1
def forward(self, b):
bsls = b["seg"].size()[1:]
if bsls[0] != self.s_sl or bsls[1] != self.s_sl or bsls[2] != self.s_sl:
raise Exception("Size of input should be Nx8x8x8 but it is {}".format(b["seg"].size()))
sizes = list(b["seg"].size())
seg = b["seg"].view(-1)
# Get the blockid embedding for each space in the context input
z = self.blockid_embedding.weight.index_select(0, seg)
# Add the embedding dim B
sizes.append(self.blockid_embedding_dim)
z = z.view(torch.Size(sizes))
# N x H x W x L x B ==> N x B x H x W x L
z = z.permute(0, 4, 1, 2, 3).contiguous()
for i in range(len(self.layers)):
z = self.layers[i](z)
z = z.permute(0, 2, 3, 4, 1).contiguous()
return self.out(z)
class SegmentDirectionEmbeddingNet(nn.Module):
def __init__(self, opts):
super(SegmentDirectionEmbeddingNet, self).__init__()
output_embedding_dim = opts.get("output_embedding_dim", 8)
self.use_viewer_pos = opts.get("seg_use_viewer_pos", False)
self.use_direction = opts.get("seg_use_direction", False)
hidden_dim = opts.get("hidden_dim", 64)
num_layers = opts.get("num_seg_dir_layers", 3)
self.seg_input_dim = opts.get("spatial_embedding_dim", 8)
self.c_sl = opts.get("context_side_length", 32)
input_dim = self.seg_input_dim
if self.use_viewer_pos:
input_dim += 3
if self.use_direction:
input_dim += 5
# Create model for converting the segment, viewer info,
self.layers = nn.ModuleList()
self.layers.append(nn.Sequential(nn.Linear(input_dim, hidden_dim), nn.ReLU()))
for i in range(num_layers - 1):
self.layers.append(nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.ReLU()))
self.out = nn.Linear(hidden_dim, output_embedding_dim)
# In: batch dict, must have s_embeds, viewer_pos, dir_vec
# Out: N x D x 1 x 1 x 1
def forward(self, b):
if b["s_embeds"].size()[1] != self.seg_input_dim:
raise Exception("The seg spatial embed is wrong size: {}".format(b["s_embeds"].size()))
inp = [b["s_embeds"]]
normalizing_const = self.c_sl * 1.0 / 2.0
if self.use_viewer_pos:
inp.append(b["viewer_pos"].float().div_(normalizing_const))
if self.use_direction:
inp.append(b["dir_vec"].float())
z = torch.cat(inp, 1)
for i in range(len(self.layers)):
z = self.layers[i](z)
return self.out(z).unsqueeze(2).unsqueeze(3).unsqueeze(4)
class ContextSegmentScoringModule(nn.Module):
def __init__(self):
super(ContextSegmentScoringModule, self).__init__()
def forward(self, x):
context_emb = x["c_embeds"] # N x 32 x 32 x 32 x D
seg_emb = x["s_embeds"] # N x 1 x 1 x 1 x D
c_szs = context_emb.size() # N x 32 x 32 x 32 x D
batch_dim = c_szs[0]
emb_dim = c_szs[4]
num_scores = c_szs[1] * c_szs[2] * c_szs[3]
# Prepare context for the dot product
context_emb = context_emb.view(-1, emb_dim, 1) # N*32^3 x D x 1
# Prepare segment for the dot product
seg_emb = seg_emb.view(batch_dim, 1, -1) # N x 1 x D
seg_emb = seg_emb.expand(-1, num_scores, -1).contiguous() # N x 32^3 x D
seg_emb = seg_emb.view(-1, 1, emb_dim) # N*32^3 x 1 x D
# Dot product & reshape
# (K x 1 x D) bmm (K x D x 1) = (K x 1 x 1)
out = torch.bmm(seg_emb, context_emb)
return out.view(batch_dim, -1)
class spatial_emb_loss(nn.Module):
def __init__(self):
super(spatial_emb_loss, self).__init__()
self.lsm = nn.LogSoftmax()
self.crit = nn.NLLLoss()
# format [scores (Nx32^3), targets (N)]
def forward(self, inp):
assert len(inp) == 2
scores = inp[0]
targets = inp[1]
logsuminp = self.lsm(scores)
return self.crit(logsuminp, targets)
class rank_loss(nn.Module):
def __init__(self, margin=0.1, nneg=5):
super(rank_loss, self).__init__()
self.nneg = 5
self.margin = margin
self.relu = nn.ReLU()
def forward(self, inp):
# it is expected that the batch is arranged as pos neg neg ... neg pos neg ...
# with self.nneg negs per pos
assert inp.shape[0] % (self.nneg + 1) == 0
inp = inp.view(self.nneg + 1, -1)
pos = inp[0]
neg = inp[1:].contiguous()
errors = self.relu(neg - pos.repeat(self.nneg, 1) + self.margin)
return errors.mean()
class reshape_nll(nn.Module):
def __init__(self, nneg=5):
super(reshape_nll, self).__init__()
self.nneg = nneg
self.lsm = nn.LogSoftmax()
self.crit = nn.NLLLoss()
def forward(self, inp):
# it is expected that the batch is arranged as pos neg neg ... neg pos neg ...
# with self.nneg negs per pos
assert inp.shape[0] % (self.nneg + 1) == 0
inp = inp.view(-1, self.nneg + 1).contiguous()
logsuminp = self.lsm(inp)
o = torch.zeros(inp.size(0), device=inp.device).long()
return self.crit(logsuminp, o)
def get_optim(model_params, opts):
optim_type = opts.get("optim", "adagrad")
lr = opts.get("lr", 0.1)
momentum = opts.get("momentum", 0.0)
betas = (0.9, 0.999)
if optim_type == "adagrad":
return optim.Adagrad(model_params, lr=lr)
elif optim_type == "sgd":
return optim.SGD(model_params, lr=lr, momentum=momentum)
elif optim_type == "adam":
return optim.Adam(model_params, lr=lr, betas=betas)
else:
raise Exception("Undefined optim type {}".format(optim_type))
def create_context_segment_modules(opts):
possible_params = ["context_net", "seg_net", "seg_direction_net"]
# Add all of the modules
emb_dict = torch.nn.Embedding(opts["num_words"], opts["blockid_embedding_dim"])
tms = {
"context_net": ContextEmbeddingNet(opts, emb_dict),
"seg_net": SegmentEmbeddingNet(opts, emb_dict),
"score_module": ContextSegmentScoringModule(),
"lfn": spatial_emb_loss(),
}
if opts.get("seg_direction_net", False):
tms["seg_direction_net"] = SegmentDirectionEmbeddingNet(opts)
# Move everything to the right device
if "cuda" in opts and opts["cuda"]:
emb_dict.cuda()
for n in possible_params:
if n in tms:
tms[n].cuda()
# Setup the optimizer
all_params = []
for n in possible_params:
if n in tms:
all_params.extend(list(tms[n].parameters()))
tms["optimizer"] = get_optim(all_params, opts)
return tms
| craftassist-master | python/craftassist/voxel_models/geoscorer/models.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import torch
import time
import training_utils as tu
def train_epoch(tms, DL, opts):
l = 0
error = 0
count = 0
dlit = iter(DL)
tu.set_modules(tms, train=True)
for i in range(len(DL)):
b = dlit.next()
targets, scores = tu.get_scores_from_datapoint(tms, b, opts)
loss = tms["lfn"]([scores, targets])
max_ind = torch.argmax(scores, dim=1)
num_correct = sum(max_ind.eq(targets)).item()
error += opts["batchsize"] - num_correct
loss.backward()
tms["optimizer"].step()
l = l + loss.detach().item()
count = count + 1
return (l / count, error / (count * opts["batchsize"]))
def run_visualization(
vis, tms, opts, checkpoint_path=None, num_examples=2, tsleep=1, loadpath=False
):
if loadpath and checkpoint_path is not None and checkpoint_path != "":
new_tms = tu.get_context_segment_trainer_modules(
opts, checkpoint_path=checkpoint_path, backup=False, verbose=True
)
else:
new_tms = tms
tu.set_modules(new_tms, train=False)
vis.set_model(new_tms)
for n in range(num_examples):
vis.visualize(use_model=True)
time.sleep(tsleep)
if __name__ == "__main__":
parser = tu.get_train_parser()
opts = vars(parser.parse_args())
# Setup the data, models and optimizer
dataset, dataloader = tu.setup_dataset_and_loader(opts)
tms = tu.get_context_segment_trainer_modules(
opts, opts["checkpoint"], backup=opts["backup"], verbose=True
)
# The context and seg net were already moved
if opts["cuda"] == 1:
tms["score_module"].cuda()
tms["lfn"].cuda()
# Setup visualization
vis = None
if opts["visualize_epochs"]:
from visualization_utils import GeoscorerDatasetVisualizer
vis = GeoscorerDatasetVisualizer(dataset)
vis.set_model(tms, opts)
run_visualization(
vis, tms, opts, checkpoint_path=None, num_examples=2, tsleep=1, loadpath=False
)
# Run training
for i in range(opts["nepoch"]):
train_loss, train_error = train_epoch(tms, dataloader, opts)
tu.pretty_log(
"train loss {:<5.4f} error {:<5.2f} {}".format(train_loss, train_error * 100, i)
)
if opts["checkpoint"] != "":
metadata = {"epoch": i, "train_loss": train_loss, "train_error": train_error}
tu.save_checkpoint(tms, metadata, opts, opts["checkpoint"])
if opts["visualize_epochs"]:
run_visualization(vis, tms, opts, opts["checkpoint"], 2, 1, False)
| craftassist-master | python/craftassist/voxel_models/geoscorer/train_spatial_emb.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import json
dataset_config = {
"inst_dir": [
{"drop_perc": -1.0, "ground_type": None},
{"drop_perc": -1.0, "ground_type": "flat"},
],
"shape_dir": [
{"ground_type": "flat", "max_shift": None},
{"ground_type": "flat", "max_shift": 6},
{"ground_type": "hilly", "max_shift": 6},
],
"autogen_glue_cubes_dir": [
{"fixed_center": True, "ground_type": None},
{"fixed_center": True, "ground_type": "flat"},
{"fixed_center": True, "ground_type": "hilly"},
],
}
filename = "run_config.json"
with open(filename, "w+") as f:
json.dump(dataset_config, f)
print("dumped", filename)
| craftassist-master | python/craftassist/voxel_models/geoscorer/config_maker.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import argparse
from training_utils import get_context_segment_trainer_modules
from spatial_utils import index_to_coord
class ContextSegmentMergerWrapper(object):
"""
Wrapper for the geoscorer
"""
def __init__(self, models_path):
if models_path is None:
raise Exception("Geoscorer wrapper requires a model path")
self.opts = {}
tms = get_context_segment_trainer_modules(
opts=self.opts, checkpoint_path=models_path, backup=False, verbose=False
)
self.context_net = tms["context_net"]
self.seg_net = tms["seg_net"]
self.score_module = tms["score_module"]
self.context_sl = 32
self.seg_sl = 8
self.context_net.eval()
self.seg_net.eval()
self.score_module.eval()
def segment_context_to_pos(self, segment, context):
# Coords are in Z, X, Y, so put segment into same coords
segment = segment.permute(1, 2, 0).contiguous()
batch = {"context": context.unsqueeze(0), "seg": segment.unsqueeze(0)}
batch["c_embeds"] = self.context_net(batch)
batch["s_embeds"] = self.seg_net(batch)
scores = self.score_module(batch)
index = scores[0].flatten().max(0)[1]
target_coord = index_to_coord(index.item(), self.context_sl)
# Then take final coord back into X, Y, Z coords
final_target_coord = (target_coord[2], target_coord[0], target_coord[1])
return final_target_coord
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--models_path", type=str, help="path to geoscorer models")
args = parser.parse_args()
geoscorer = ContextSegmentMergerWrapper(args.models_path)
| craftassist-master | python/craftassist/voxel_models/geoscorer/geoscorer_wrapper.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import math
import os
import random
import pickle
import torch
import torch.utils.data
from collections import defaultdict
import spatial_utils as su
import directional_utils as du
def parse_instance_data(inst_data):
parsed_instance_data = []
for h in inst_data:
S, segs, labels, _ = h
segs = segs.astype("int32")
blocks = list(zip(*S.nonzero()))
# First convert the schematic into sparse segment info
offsets = [[i, j, k] for i in range(-1, 2) for j in range(-1, 2) for k in range(-1, 2)]
sizes = [len(segs), len(segs[0]), len(segs[0][0])]
instances = defaultdict(list)
touching = defaultdict(set)
for b in blocks:
b_w_id = [b[0], b[2], b[1]]
b_w_id.append(S[b])
seg_id = segs[b]
instances[seg_id].append(b_w_id)
for off in offsets:
nc = tuple([a + b for a, b in zip(b, off)])
# check out of bounds
if not all(e > 0 for e in nc) or not all([nc[i] < sizes[i] for i in range(3)]):
continue
if segs[nc] != 0 and segs[nc] != seg_id:
touching[seg_id].add(segs[nc])
# Then get the width/height/depth metadata
metadata = {}
for i, blocks in instances.items():
maxs = [0, 0, 0]
mins = [sizes[i] for i in range(3)]
for b in blocks:
maxs = [max(b[i], maxs[i]) for i in range(3)]
mins = [min(b[i], mins[i]) for i in range(3)]
metadata[i] = {"size": [x - n + 1 for n, x in zip(mins, maxs)], "label": labels[i]}
# For now remove houses where there are no touching components
# this is only one house
if len(touching) == 0:
continue
parsed_instance_data.append(
{"segments": instances, "touching": touching, "metadata": metadata}
)
return parsed_instance_data
def parse_segments_into_file(dpath, save_path):
inst_data = pickle.load(open(dpath, "rb"))
parsed_instance_data = parse_instance_data(inst_data)
pickle.dump(parsed_instance_data, open(save_path, "wb+"))
def convert_tuple_to_block(b):
if b[3] < 0 or b[3] > 255:
raise Exception("block id out of bounds")
return ((b[0], b[1], b[2]), (b[3], 0))
def get_seg_context_sparse(seg_data, drop_perc, rand_drop=True):
# first choose a house
sd = random.choice(seg_data)
# then drop some segs
if drop_perc < 0:
drop_perc = random.randint(0, 80) * 1.0 / 100
seg_ids = list(sd["segments"].keys())
random.shuffle(seg_ids)
num_segs = len(seg_ids)
to_keep = math.ceil(num_segs - num_segs * drop_perc)
keep_ids = seg_ids[:to_keep]
# choose a remaining seg to get a connected one
conn_to_target_id = random.choice(keep_ids)
if conn_to_target_id not in sd["touching"]:
conn_to_target_id = random.choice(list(sd["touching"].keys()))
keep_ids.append(conn_to_target_id)
# get a connected seg as target
target_seg_id = random.choice(list(sd["touching"][conn_to_target_id]))
keep_ids = [k for k in keep_ids if k != target_seg_id]
# make segment out of blocks from target_seg
seg_sparse = [convert_tuple_to_block(b) for b in sd["segments"][target_seg_id]]
# make context out of blocks from keep_ids
context_sparse = []
for i in set(keep_ids):
context_sparse += [convert_tuple_to_block(b) for b in sd["segments"][i]]
return seg_sparse, context_sparse
def get_inst_seg_example(seg_data, drop_perc, c_sl, s_sl, use_id, ground_type=None):
seg_sparse, context_sparse = get_seg_context_sparse(seg_data, drop_perc)
target_coord, shift_vec, shifted_seg_sparse = su.convert_sparse_context_seg_to_target_coord_shifted_seg(
context_sparse, seg_sparse, c_sl, s_sl
)
if ground_type is not None:
max_z = max([c[0][2] for c in context_sparse] + [s[0][2] for s in shifted_seg_sparse])
if max_z < c_sl - 1:
context_sparse = [((c[0][0], c[0][1], c[0][2] + 1), c[1]) for c in context_sparse]
shifted_seg_sparse = [
((s[0][0], s[0][1], s[0][2] + 1), s[1]) for s in shifted_seg_sparse
]
target_coord[2] += 1
su.add_ground_to_context(context_sparse, target_coord, flat=True, random_height=False)
schem_sparse = seg_sparse + context_sparse
example = su.convert_sparse_context_seg_target_to_example(
context_sparse, shifted_seg_sparse, target_coord, c_sl, s_sl, use_id, schem_sparse
)
return example
# Returns three tensors: 32x32x32 context, 8x8x8 segment, 1 target
# TODO: Note that 1/7 segments are larger than 8x8x8
# Only 1/70 are larger than 16x16x16, maybe move to this size seg
# Returns three tensors: 32x32x32 context, 8x8x8 segment, 1 target
class SegmentContextInstanceData(torch.utils.data.Dataset):
def __init__(
self,
data_dir="/checkpoint/drotherm/minecraft_dataset/vision_training/training3/",
nexamples=10000,
context_side_length=32,
seg_side_length=8,
useid=False,
use_direction=False,
drop_perc=0.8,
ground_type=None,
):
self.use_direction = use_direction
self.c_sl = context_side_length
self.s_sl = seg_side_length
self.num_examples = nexamples
self.useid = useid
self.drop_perc = drop_perc
self.ground_type = ground_type
# Load the parsed data
parsed_file = os.path.join(data_dir, "training_parsed.pkl")
if not os.path.exists(parsed_file):
print(">> Redo inst seg parse")
data_path = os.path.join(data_dir, "training_data.pkl")
parse_segments_into_file(data_path, parsed_file)
self.seg_data = pickle.load(open(parsed_file, "rb"))
def _get_example(self):
if not self.use_direction:
return get_inst_seg_example(
self.seg_data, self.drop_perc, self.c_sl, self.s_sl, self.useid, self.ground_type
)
else:
example = get_inst_seg_example(
self.seg_data, self.drop_perc, self.c_sl, self.s_sl, self.useid, self.ground_type
)
viewer_pos, viewer_look = du.get_random_viewer_info(self.c_sl)
target_coord = torch.tensor(su.index_to_coord(example["target"], self.c_sl))
example["dir_vec"] = du.get_sampled_direction_vec(
viewer_pos, viewer_look, target_coord
)
example["viewer_pos"] = viewer_pos
return example
def __getitem__(self, index):
return self._get_example()
def __len__(self):
return abs(self.num_examples)
if __name__ == "__main__":
import argparse
from visualization_utils import GeoscorerDatasetVisualizer
parser = argparse.ArgumentParser()
parser.add_argument(
"--use_direction", action="store_true", help="use direction in example creation"
)
parser.add_argument("--useid", action="store_true", help="should we use the block id")
parser.add_argument("--drop_perc", type=float, default=0.8, help="should we use the block id")
parser.add_argument("--ground_type", type=str, default=None, help="ground type")
opts = parser.parse_args()
dataset = SegmentContextInstanceData(
nexamples=3,
use_direction=opts.use_direction,
drop_perc=opts.drop_perc,
useid=opts.useid,
ground_type=opts.ground_type,
)
vis = GeoscorerDatasetVisualizer(dataset)
for n in range(len(dataset)):
vis.visualize()
| craftassist-master | python/craftassist/voxel_models/geoscorer/inst_seg_dataset.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
import random
import torch
import torch.utils.data
from shape_dataset import SegmentContextShapeData, SegmentContextShapeDirData
from inst_seg_dataset import SegmentContextInstanceData
from autogen_dataset import SegmentContextGlueCubesData
# Returns three tensors: 32x32x32 context, 8x8x8 segment, 1 target
class SegmentContextData(torch.utils.data.Dataset):
def __init__(
self,
nexamples=100000,
context_side_length=32,
seg_side_length=8,
useid=False,
ratios={"shape": 1.0},
extra_params={},
config=None,
):
self.c_sl = context_side_length
self.s_sl = seg_side_length
self.num_examples = nexamples
self.useid = useid
self.examples = []
self.extra_params = extra_params
self.ds_names = [k for k, p in ratios.items() if p > 0]
self.ds_probs = [ratios[name] for name in self.ds_names]
if sum(self.ds_probs) != 1.0:
raise Exception("Sum of probs must equal 1.0")
if config is None:
self.datasets = dict(
[(name, [self._get_dataset(name, extra_params)]) for name in self.ds_names]
)
else:
self.datasets = {}
for name in self.ds_names:
if name not in config:
continue
self.datasets[name] = []
for params in config[name]:
self.datasets[name].append(self._get_dataset(name, params))
print("Datasets")
for name, dss in self.datasets.items():
print(" ", name, len(dss))
def _get_dataset(self, name, extra_params):
if name == "inst_dir":
drop_perc = extra_params.get("drop_perc", 0.0)
ground_type = extra_params.get("ground_type", None)
return SegmentContextInstanceData(
use_direction=True,
nexamples=self.num_examples,
drop_perc=drop_perc,
context_side_length=self.c_sl,
seg_side_length=self.s_sl,
useid=self.useid,
ground_type=ground_type,
)
if name == "inst":
drop_perc = extra_params.get("drop_perc", 0.0)
return SegmentContextInstanceData(
use_direction=False,
nexamples=self.num_examples,
drop_perc=drop_perc,
context_side_length=self.c_sl,
seg_side_length=self.s_sl,
useid=self.useid,
)
if name == "shape":
return SegmentContextShapeData(
nexamples=self.num_examples,
context_side_length=self.c_sl,
seg_side_length=self.s_sl,
useid=self.useid,
)
if name == "shape_dir":
ground_type = extra_params.get("ground_type", None)
max_shift = self.extra_params.get("max_shift", 0)
return SegmentContextShapeDirData(
nexamples=self.num_examples,
context_side_length=self.c_sl,
seg_side_length=self.s_sl,
useid=self.useid,
ground_type=ground_type,
max_shift=max_shift,
)
if name == "autogen_glue_cubes_dir":
type_name = extra_params.get("type_name", "random")
fixed_cube_size = extra_params.get("fixed_cube_size", None)
fixed_center = extra_params.get("fixed_center", False)
ground_type = extra_params.get("ground_type", None)
return SegmentContextGlueCubesData(
nexamples=self.num_examples,
context_side_length=self.c_sl,
seg_side_length=self.s_sl,
useid=self.useid,
type_name=type_name,
use_direction=True,
fixed_cube_size=fixed_cube_size,
fixed_center=fixed_center,
ground_type=ground_type,
)
if name == "autogen_glue_cubes":
type_name = extra_params.get("type_name", "random")
return SegmentContextGlueCubesData(
nexamples=self.num_examples,
context_side_length=self.c_sl,
seg_side_length=self.s_sl,
useid=self.useid,
type_name=type_name,
use_direction=False,
)
raise Exception("No dataset with name {}".format(name))
def _get_example(self):
ds_name = np.random.choice(self.ds_names, p=self.ds_probs)
dataset = random.choice(self.datasets[ds_name])
return dataset._get_example()
def __getitem__(self, index):
return self._get_example()
def __len__(self):
return self.num_examples
if __name__ == "__main__":
from visualization_utils import GeoscorerDatasetVisualizer
dataset = SegmentContextData(
nexamples=3,
useid=False,
ratios={"autogen_glue_cubes": 1.0},
extra_params={"min_seg_size": 6},
)
vis = GeoscorerDatasetVisualizer(dataset)
for n in range(len(dataset)):
vis.visualize()
| craftassist-master | python/craftassist/voxel_models/geoscorer/combined_dataset.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
import random
import os
import sys
import argparse
import torch
import string
import json
from shutil import copyfile
from inspect import currentframe, getframeinfo
from datetime import datetime
import models
import combined_dataset as cd
"""
General Training Utils
"""
def pretty_log(log_string):
cf = currentframe().f_back
filename = getframeinfo(cf).filename.split("/")[-1]
print(
"{} {}:{} {}".format(
datetime.now().strftime("%m/%d/%Y %H:%M:%S"), filename, cf.f_lineno, log_string
)
)
sys.stdout.flush()
def prepare_variables(b, opts):
X = b.long()
if opts["cuda"]:
X = X.cuda()
return X
def set_modules(tms, train):
for m in ["context_net", "seg_net", "score_module", "seg_direction_net"]:
if m not in tms:
continue
if train:
tms[m].train()
else:
tms[m].eval()
def multitensor_collate_fxn(x):
regroup_tensors = {n: [] for n in x[0].keys()}
use_names = list(x[0].keys())
for t_dict in x:
use_names = [n for n in use_names if n in t_dict]
for n, t in t_dict.items():
if n not in regroup_tensors:
continue
regroup_tensors[n].append(t.unsqueeze(0))
use_names = set(use_names)
batched_tensors = {
n: torch.cat([t.float() for t in tl])
for n, tl in regroup_tensors.items()
if n in use_names
}
return batched_tensors
def get_dataloader(dataset, opts, collate_fxn):
def init_fn(wid):
np.random.seed(torch.initial_seed() % (2 ** 32))
return torch.utils.data.DataLoader(
dataset,
batch_size=opts["batchsize"],
shuffle=True,
pin_memory=True,
drop_last=True,
num_workers=opts["num_workers"],
worker_init_fn=init_fn,
collate_fn=collate_fxn,
)
def setup_dataset_and_loader(opts):
extra_params = {
"drop_perc": opts.get("drop_perc", 0.0),
"fixed_cube_size": opts.get("fixed_cube_size", None),
"fixed_center": opts.get("fixed_center", False),
"max_shift": opts.get("max_shift", 0),
"ground_type": opts.get("ground_type", None),
}
config = opts.get("dataset_config", None)
if config is not None:
print("loaded config from", config)
with open(config, "r") as f:
config = json.load(f)
dataset = cd.SegmentContextData(
nexamples=opts["epochsize"],
useid=opts["useid"],
extra_params=extra_params,
ratios=parse_dataset_ratios(opts),
config=config,
)
dataloader = get_dataloader(dataset=dataset, opts=opts, collate_fxn=multitensor_collate_fxn)
return dataset, dataloader
def get_scores_from_datapoint(tms, batch, opts):
batch = {k: prepare_variables(t, opts) for k, t in batch.items()}
batch["target"] = batch["target"].squeeze()
tms["optimizer"].zero_grad()
c_embeds = tms["context_net"](batch)
s_embeds = tms["seg_net"](batch)
if "seg_direction_net" in tms:
if s_embeds.dim() > 2:
s_embeds = s_embeds.squeeze()
if s_embeds.dim() == 1:
s_embeds = s_embeds.unsqueeze(0)
batch["s_embeds"] = s_embeds
s_embeds = tms["seg_direction_net"](batch)
scores = tms["score_module"]({"c_embeds": c_embeds, "s_embeds": s_embeds})
return batch["target"], scores
"""
Checkpointing
"""
def check_and_print_opts(curr_opts, old_opts):
mismatches = []
print(">> Options:")
for opt, val in curr_opts.items():
if opt and val:
print(" - {:>20}: {:<30}".format(opt, val))
else:
print(" - {}: {}".format(opt, val))
if old_opts and opt in old_opts and old_opts[opt] != val:
mismatches.append((opt, val, old_opts[opt]))
print("")
if len(mismatches) > 0:
print(">> Mismatching options:")
for m in mismatches:
print(" - {:>20}: new '{:<10}' != old '{:<10}'".format(m[0], m[1], m[2]))
print("")
return True if len(mismatches) > 0 else False
def load_context_segment_checkpoint(checkpoint_path, opts, backup=True, verbose=False):
if not os.path.isfile(checkpoint_path):
check_and_print_opts(opts, None)
return {}
if backup:
random_uid = "".join(
[random.choice(string.ascii_letters + string.digits) for n in range(4)]
)
backup_path = checkpoint_path + ".backup_" + random_uid
copyfile(checkpoint_path, backup_path)
print(">> Backing up checkpoint before loading and overwriting:")
print(" {}\n".format(backup_path))
checkpoint = torch.load(checkpoint_path)
if verbose:
print(">> Loading model from checkpoint {}".format(checkpoint_path))
for opt, val in checkpoint["metadata"].items():
print(" - {:>20}: {:<30}".format(opt, val))
print("")
check_and_print_opts(opts, checkpoint["options"])
checkpoint_opts_dict = checkpoint["options"]
if type(checkpoint_opts_dict) is not dict:
checkpoint_opts_dict = vars(checkpoint_opts_dict)
for opt, val in checkpoint_opts_dict.items():
opts[opt] = val
print(opts)
trainer_modules = models.create_context_segment_modules(opts)
trainer_modules["context_net"].load_state_dict(checkpoint["model_state_dicts"]["context_net"])
trainer_modules["seg_net"].load_state_dict(checkpoint["model_state_dicts"]["seg_net"])
trainer_modules["optimizer"].load_state_dict(checkpoint["optimizer_state_dict"])
if opts.get("seg_direction_net", False):
trainer_modules["seg_direction_net"].load_state_dict(
checkpoint["model_state_dicts"]["seg_direction_net"]
)
return trainer_modules
def get_context_segment_trainer_modules(opts, checkpoint_path=None, backup=False, verbose=False):
trainer_modules = load_context_segment_checkpoint(checkpoint_path, opts, backup, verbose)
if len(trainer_modules) == 0:
trainer_modules = models.create_context_segment_modules(opts)
return trainer_modules
def save_checkpoint(tms, metadata, opts, path):
model_dict = {"context_net": tms["context_net"], "seg_net": tms["seg_net"]}
if opts.get("seg_direction_net", False):
model_dict["seg_direction_net"] = tms["seg_direction_net"]
# Add all models to dicts and move state to cpu
state_dicts = {}
for model_name, model in model_dict.items():
state_dicts[model_name] = model.state_dict()
for n, s in state_dicts[model_name].items():
state_dicts[model_name][n] = s.cpu()
# Save to path
torch.save(
{
"metadata": metadata,
"model_state_dicts": state_dicts,
"optimizer_state_dict": tms["optimizer"].state_dict(),
"options": opts,
},
path,
)
"""
Parser Arguments
"""
def get_train_parser():
parser = argparse.ArgumentParser()
# Base Training Flags
parser.add_argument("--cuda", type=int, default=1, help="0 for cpu")
parser.add_argument("--batchsize", type=int, default=64, help="batchsize")
parser.add_argument(
"--epochsize", type=int, default=1000, help="number of examples in an epoch"
)
parser.add_argument("--nepoch", type=int, default=1000, help="number of epochs")
parser.add_argument("--lr", type=float, default=0.1, help="step size for net")
parser.add_argument(
"--optim", type=str, default="adagrad", help="optim type to use (adagrad|sgd|adam)"
)
parser.add_argument("--momentum", type=float, default=0.0, help="momentum")
parser.add_argument("--checkpoint", default="", help="where to save model")
parser.add_argument("--num_workers", type=int, default=4, help="number of dataloader workers")
parser.add_argument(
"--backup", action="store_true", help="backup the checkpoint path before saving to it"
)
parser.add_argument(
"--visualize_epochs", action="store_true", help="use visdom to visualize progress"
)
# Model Flags
parser.add_argument("--hidden_dim", type=int, default=64, help="size of hidden dim")
parser.add_argument("--num_layers", type=int, default=3, help="num layers")
parser.add_argument(
"--blockid_embedding_dim", type=int, default=8, help="size of blockid embedding"
)
parser.add_argument("--context_sidelength", type=int, default=32, help="size of cube")
parser.add_argument("--useid", action="store_true", help="use blockid")
parser.add_argument(
"--num_words", type=int, default=256, help="number of words for the blockid embeds"
)
# Dataset Flags
parser.add_argument(
"--dataset_ratios", type=str, default="shape:1.0", help="comma separated name:prob"
)
parser.add_argument("--fixed_cube_size", type=int, default=None, help="fixed_cube_size")
parser.add_argument("--fixed_center", action="store_true", help="fixed_center")
parser.add_argument(
"--drop_perc", type=float, default=0.5, help="perc segs to drop from inst_seg"
)
parser.add_argument(
"--max_shift", type=int, default=6, help="max amount to offset shape_dir target"
)
parser.add_argument(
"--ground_type", type=str, default=None, help="ground type to include in datasets"
)
parser.add_argument(
"--dataset_config", type=str, default=None, help="for more complex training"
)
# Directional Placement Flags
parser.add_argument("--spatial_embedding_dim", type=int, default=8, help="size of spatial emb")
parser.add_argument("--output_embedding_dim", type=int, default=8, help="size of output emb")
parser.add_argument("--seg_direction_net", action="store_true", help="use segdirnet module")
parser.add_argument("--seg_use_viewer_pos", action="store_true", help="use viewer pos in seg")
parser.add_argument("--seg_use_direction", action="store_true", help="use direction in seg")
parser.add_argument("--num_seg_dir_layers", type=int, default=3, help="num segdir net layers")
parser.add_argument(
"--cont_use_direction", action="store_true", help="use direction in context"
)
parser.add_argument(
"--cont_use_xyz_from_viewer_look",
action="store_true",
help="use xyz position relative to viewer look in context emb",
)
return parser
def parse_dataset_ratios(opts):
ratios_str = opts["dataset_ratios"]
ratio = {}
try:
l_s = ratios_str.split(",")
print("\n>> Using datasets in the following proportions:")
for t in l_s:
name, prob = t.split(":")
ratio[name] = float(prob)
print(" - {}: {}".format(name, prob))
print("")
except:
raise Exception("Failed to parse the dataset ratio string {}".format(ratios_str))
return ratio
| craftassist-master | python/craftassist/voxel_models/geoscorer/training_utils.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
import torch
import os
import sys
import random
CRAFTASSIST_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../")
TEST_DIR = os.path.join(CRAFTASSIST_DIR, "test/")
sys.path.append(CRAFTASSIST_DIR)
sys.path.append(TEST_DIR)
from world import World, Opt, flat_ground_generator
"""
Generic Spatial Utils
"""
def get_bounds(sparse_voxel):
"""
Voxel should either be a schematic, a list of ((x, y, z), (block_id, ?)) objects
or a list of coordinates.
Returns a list of bounds.
"""
if len(sparse_voxel) == 0:
return [0, 0, 0, 0, 0, 0]
# A schematic
if len(sparse_voxel[0]) == 2 and len(sparse_voxel[0][0]) == 3 and len(sparse_voxel[0][1]) == 2:
x, y, z = list(zip(*list(zip(*sparse_voxel))[0]))
# A list or coordinates
elif len(sparse_voxel[0]) == 3:
x, y, z = list(zip(*sparse_voxel))
else:
raise Exception("Unknown schematic format")
return min(x), max(x), min(y), max(y), min(z), max(z)
def get_side_lengths(bounds):
"""
Bounds should be a list of [min_x, max_x, min_y, max_y, min_z, max_z].
Returns a list of the side lengths.
"""
return [x + 1 for x in (bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4])]
def get_bounds_and_sizes(sparse_voxel):
bounds = get_bounds(sparse_voxel)
side_lengths = get_side_lengths(bounds)
return bounds, side_lengths
def coord_to_index(coord, sl):
"""
Takes a 3D coordinate in a cube and the cube side length.
Returns index in flattened 3D array.
"""
return coord[0] * sl * sl + coord[1] * sl + coord[2]
def index_to_coord(index, sl):
"""
Takes an index into a flattened 3D array and its side length.
Returns the coordinate in the cube.
"""
coord = []
two_d_slice_size = sl * sl
coord.append(index // two_d_slice_size)
remaining = index % two_d_slice_size
coord.append(remaining // sl)
coord.append(remaining % sl)
return coord
def shift_sparse_voxel_to_origin(sparse_voxel):
"""
Takes a segment, described as a list of tuples of the form:
((x, y, z), (block_id, ?))
Returns the segment in the same form, shifted to the origin, and the shift vec
"""
bounds = get_bounds(sparse_voxel)
shift_zero_vec = [-bounds[0], -bounds[2], -bounds[4]]
new_voxel = []
for s in sparse_voxel:
new_voxel.append((tuple([sum(x) for x in zip(s[0], shift_zero_vec)]), s[1]))
return new_voxel, shift_zero_vec
# outputs a dense voxel rep (np array) from a sparse one.
# size should be a tuple of (H, W, D) for the desired voxel representation
# useid=True puts the block id into the voxel representation,
# otherwise put a 1
def densify(blocks, size, center=(0, 0, 0), useid=False):
V = np.zeros((size[0], size[1], size[2]), dtype="int32")
offsets = (size[0] // 2 - center[0], size[1] // 2 - center[1], size[2] // 2 - center[2])
for b in blocks:
x = b[0][0] + offsets[0]
y = b[0][1] + offsets[1]
z = b[0][2] + offsets[2]
if x >= 0 and y >= 0 and z >= 0 and x < size[0] and y < size[1] and z < size[2]:
if type(b[1]) is int:
V[x, y, z] = b[1]
else:
V[x, y, z] = b[1][0]
if not useid:
V[V > 0] = 1
return V, offsets
def get_dense_array_from_sl(sparse_shape, sl, useid):
center = [sl // 2, sl // 2, sl // 2]
shape_dense, _ = np.asarray(densify(sparse_shape, [sl, sl, sl], center=center, useid=useid))
return shape_dense
"""
Geoscorer Specific Spatial Utils
"""
def combine_seg_context(seg, context, seg_shift, seg_mult=1):
c_sl = context.size()[0]
s_sl = seg.size()[0]
completed_context = context.clone()
# Calculate the region to copy over, sometimes the segment
# falls outside the range of the context bounding box
cs = [slice(s, min(s + s_sl, c_sl)) for s in seg_shift]
ss = [slice(0, s_sl - max(0, s + s_sl - c_sl)) for s in seg_shift]
completed_context[cs] = seg_mult * seg[ss] + context[cs]
return completed_context
def convert_sparse_context_seg_to_target_coord_shifted_seg(context_sparse, seg_sparse, c_sl, s_sl):
shifted_seg_sparse, shift_vec = shift_sparse_voxel_to_origin(seg_sparse)
target_coord = [-x for x in shift_vec]
return target_coord, shift_vec, shifted_seg_sparse
def convert_sparse_context_seg_target_to_example(
context_sparse, shifted_seg_sparse, target_coord, c_sl, s_sl, useid, schem_sparse=None
):
context_dense = get_dense_array_from_sl(context_sparse, c_sl, useid)
seg_dense = get_dense_array_from_sl(shifted_seg_sparse, s_sl, useid)
target_index = coord_to_index(target_coord, c_sl)
example = {
"context": torch.from_numpy(context_dense),
"seg": torch.from_numpy(seg_dense),
"target": torch.tensor([target_index]),
}
if schem_sparse:
schem_dense = get_dense_array_from_sl(schem_sparse, c_sl, useid)
example["schematic"] = torch.from_numpy(schem_dense)
return example
def convert_sparse_context_seg_to_example(
context_sparse, seg_sparse, c_sl, s_sl, useid, schem_sparse=None
):
context_dense = get_dense_array_from_sl(context_sparse, c_sl, useid)
shifted_seg_sparse, shift_vec = shift_sparse_voxel_to_origin(seg_sparse)
seg_dense = get_dense_array_from_sl(shifted_seg_sparse, s_sl, useid)
target_coord = [-x for x in shift_vec]
target_index = coord_to_index(target_coord, c_sl)
example = {
"context": torch.from_numpy(context_dense),
"seg": torch.from_numpy(seg_dense),
"target": torch.tensor([target_index]),
}
if schem_sparse:
schem_dense = get_dense_array_from_sl(schem_sparse, c_sl, useid)
example["schematic"] = torch.from_numpy(schem_dense)
return example
def add_ground_to_context(context_sparse, target_coord, flat=True, random_height=True):
min_z = min([c[0][2] for c in context_sparse] + [target_coord[2].item()])
max_ground_depth = min_z
if max_ground_depth == 0:
return
if random_height:
ground_depth = random.randint(1, max_ground_depth)
else:
ground_depth = max_ground_depth
pos_z = 63
shift = (-16, pos_z - 1 - ground_depth, -16)
spec = {
"players": [],
"item_stacks": [],
"mobs": [],
"agent": {"pos": (0, pos_z, 0)},
"coord_shift": shift,
}
world_opts = Opt()
world_opts.sl = 32
if flat or max_ground_depth == 1:
spec["ground_generator"] = flat_ground_generator
spec["ground_args"] = {"ground_depth": ground_depth}
else:
world_opts.avg_ground_height = max_ground_depth // 2
world_opts.hill_scale = max_ground_depth // 2
world = World(world_opts, spec)
ground_blocks = []
for l, d in world.blocks_to_dict().items():
shifted_l = tuple([l[i] - shift[i] for i in range(3)])
xzy_to_xyz = [shifted_l[0], shifted_l[2], shifted_l[1]]
ground_blocks.append((xzy_to_xyz, d))
context_sparse += ground_blocks
| craftassist-master | python/craftassist/voxel_models/geoscorer/spatial_utils.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
import os
import sys
import random
import torch
import torch.utils.data
CRAFTASSIST_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../")
sys.path.append(CRAFTASSIST_DIR)
import shapes
import shape_helpers as sh
import spatial_utils as su
import directional_utils as du
# subshapes by everything in a l1 or l2 ball from a point.
# put pairs + triples of shapes in frame, sometimes one partially built
PERM = torch.randperm(256)
r = np.arange(0, 256) / 256
CMAP = np.stack((r, np.roll(r, 80), np.roll(r, 160)))
MIN_SIZE = 4
def get_shape(name="random", max_size=20, opts=None):
if name != "random" and name not in SHAPENAMES:
print(">> Shape name {} not in dict, choosing randomly".format(name))
name = "random"
if name == "random":
name = random.choice(SHAPENAMES)
while name in ("HOLLOW_RECTANGLE", "RECTANGULOID_FRAME", "HOLLOW_TRIANGLE"):
name = random.choice(SHAPENAMES)
if not opts:
opts = SHAPE_HELPERS[name](max_size)
opts["labelme"] = False
return SHAPEFNS[name](**opts), opts, name
def options_cube(max_size):
return {"size": np.random.randint(MIN_SIZE, max_size + 1)}
def options_hollow_cube(max_size):
opts = {}
opts["size"] = np.random.randint(MIN_SIZE, max_size + 1)
if opts["size"] < 5:
opts["thickness"] = 1
else:
opts["thickness"] = np.random.randint(1, opts["size"] - 3)
return opts
def options_rectanguloid(max_size):
return {"size": np.random.randint(MIN_SIZE, max_size + 1, size=3)}
def options_hollow_rectanguloid(max_size):
opts = {}
opts["size"] = np.random.randint(MIN_SIZE, max_size + 1, size=3)
ms = min(opts["size"])
opts["thickness"] = np.random.randint(1, ms - 3 + 1)
return opts
def options_sphere(max_size):
min_r = MIN_SIZE // 2
max_r = max_size // 2
return {"radius": np.random.randint(min_r, max_r + 1)}
def options_spherical_shell(max_size):
min_r = MIN_SIZE // 2
max_r = max_size // 2
opts = {}
if max_r <= 5:
opts["radius"] = np.random.randint(min_r, max_r + 1)
opts["thickness"] = 1
else:
opts["radius"] = np.random.randint(5, max_r + 1)
opts["thickness"] = np.random.randint(1, opts["radius"] - 3)
return opts
# TODO: can we make this work??
def options_square_pyramid(max_size):
min_r = MIN_SIZE
max_r = max_size
opts = {}
opts["radius"] = np.random.randint(min_r, max_r + 1)
opts["slope"] = np.random.rand() * 0.4 + 0.8
fullheight = opts["radius"] * opts["slope"]
opts["height"] = np.random.randint(0.5 * fullheight, fullheight)
return opts
def options_square(max_size):
return {"size": np.random.randint(MIN_SIZE, max_size + 1), "orient": sh.orientation3()}
def options_rectangle(max_size):
return {"size": np.random.randint(MIN_SIZE, max_size + 1, size=2), "orient": sh.orientation3()}
def options_circle(max_size):
min_r = MIN_SIZE // 2
max_r = max_size // 2
return {"radius": np.random.randint(min_r, max_r + 1), "orient": sh.orientation3()}
def options_disk(max_size):
min_r = MIN_SIZE // 2
max_r = max_size // 2
return {"radius": np.random.randint(min_r, max_r + 1), "orient": sh.orientation3()}
def options_triangle(max_size):
return {"size": np.random.randint(MIN_SIZE, max_size + 1), "orient": sh.orientation3()}
def options_dome(max_size):
min_r = MIN_SIZE // 2
max_r = max_size // 2
return {"radius": np.random.randint(min_r, max_r + 1)}
# TODO: can we make this work
def options_arch(max_size):
ms = max(MIN_SIZE + 1, max_size * 2 // 3)
return {"size": np.random.randint(MIN_SIZE, ms), "distance": 2 * np.random.randint(2, 5) + 1}
def options_ellipsoid(max_size):
# these sizes are actually radiuses
min_r = MIN_SIZE // 2
max_r = max_size // 2
return {"size": np.random.randint(min_r, max_r + 1, size=3)}
def options_tower(max_size):
return {"height": np.random.randint(3, max_size + 1), "base": np.random.randint(-4, 6)}
def options_empty(max_size):
return {}
def empty(labelme=False):
num = np.random.randint(1, 64)
S = []
for i in range(num):
pos = np.random.randint(0, 32, 3)
bid = np.random.randint(0, 64)
S.append((pos, bid))
return S, []
# eventually put ground blocks, add 'floating', 'hill', etc.
# TODO hollow is separate tag
SHAPENAMES = sh.SHAPE_NAMES
SHAPENAMES.append("TOWER")
# SHAPENAMES.append("empty")
SHAPEFNS = sh.SHAPE_FNS
SHAPEFNS["TOWER"] = shapes.tower
SHAPEFNS["empty"] = empty
SHAPE_HELPERS = {
"CUBE": options_cube,
"HOLLOW_CUBE": options_hollow_cube,
"RECTANGULOID": options_rectanguloid,
"HOLLOW_RECTANGULOID": options_hollow_rectanguloid,
"SPHERE": options_sphere,
"SPHERICAL_SHELL": options_spherical_shell,
"PYRAMID": options_square_pyramid,
"SQUARE": options_square,
"RECTANGLE": options_rectangle,
"CIRCLE": options_circle,
"DISK": options_disk,
"TRIANGLE": options_triangle,
"DOME": options_dome,
"ARCH": options_arch,
"ELLIPSOID": options_ellipsoid,
"TOWER": options_tower,
"empty": options_empty,
}
################################################################################
################################################################################
def check_l1_dist(a, b, d):
return abs(b[0] - a[0]) <= d[0] and abs(b[1] - a[1]) <= d[1] and abs(b[2] - a[2]) <= d[2]
def get_rectanguloid_subsegment(S, c, max_chunk=10):
bounds, segment_sizes = su.get_bounds_and_sizes(S)
max_dists = []
for s in segment_sizes:
max_side_len = min(s - 1, max_chunk)
max_dist = int(max(max_side_len / 2, 1))
max_dists.append(random.randint(1, max_dist))
return [check_l1_dist(c, b[0], max_dists) for b in S]
def get_random_shape_pt(shape, side_length=None):
sl = side_length
p = random.choice(shape)[0]
if not side_length:
return p
while p[0] >= sl or p[1] >= sl or p[2] >= sl:
p = random.choice(shape)[0]
return p
# Return values:
# shape: array of ((3D coords), (block_id, ??))
# seg: array of bool for whether pos is in seg
def get_shape_segment(max_chunk=10, side_length=None):
shape, _, _ = get_shape()
p = get_random_shape_pt(shape, side_length)
seg = get_rectanguloid_subsegment(shape, p, max_chunk=max_chunk)
s_tries = 0
while all(item for item in seg):
p = get_random_shape_pt(shape, side_length)
seg = get_rectanguloid_subsegment(shape, p, max_chunk=max_chunk)
s_tries += 1
# Get new shape
if s_tries > 3:
shape, _, _ = get_shape()
p = get_random_shape_pt(shape, side_length)
seg = get_rectanguloid_subsegment(shape, p, max_chunk=max_chunk)
s_tries = 0
return shape, seg
def shift_vector_gen(side_length):
shift_max = int(side_length / 2)
for i in range(side_length):
for j in range(side_length):
for k in range(side_length):
yield (i - shift_max, j - shift_max, k - shift_max)
# Returns three tensors: 32x32x32 context, 8x8x8 segment, 1 target
class SegmentContextShapeData(torch.utils.data.Dataset):
def __init__(self, nexamples=100000, context_side_length=32, seg_side_length=8, useid=False):
self.c_sl = context_side_length
self.s_sl = seg_side_length
self.num_examples = nexamples
self.useid = useid
self.examples = []
def _get_example(self):
schem_sparse, seg = get_shape_segment(max_chunk=self.s_sl - 1, side_length=self.c_sl)
seg_inds = set([i for i, use in enumerate(seg) if use])
seg_sparse = [schem_sparse[i] for i in seg_inds]
context_sparse = [b for i, b in enumerate(schem_sparse) if i not in seg_inds]
return su.convert_sparse_context_seg_to_example(
context_sparse, seg_sparse, self.c_sl, self.s_sl, self.useid
)
def __getitem__(self, index):
return self._get_example()
def __len__(self):
return self.num_examples
def get_two_shape_sparse(c_sl, s_sl):
max_s_size = random.randint(MIN_SIZE, s_sl)
max_c_size = c_sl - 2 * max_s_size
c_shape_sparse, _, nc = get_shape("random", max_c_size)
s_shape_sparse, _, ns = get_shape("random", max_s_size)
# move segment to (0,0,0) and bound size
s_bounds, s_sizes = su.get_bounds_and_sizes(s_shape_sparse)
seg_sparse, _ = su.shift_sparse_voxel_to_origin(s_shape_sparse)
seg_sparse = [b for b in seg_sparse if all([i < 8 for i in b[0]])]
s_bounds, s_sizes = su.get_bounds_and_sizes(seg_sparse)
# ensure context isn't too big
c_bounds, c_sizes = su.get_bounds_and_sizes(c_shape_sparse)
total_sizes = [c + s * 2 for c, s in zip(c_sizes, s_sizes)]
for i, size in enumerate(total_sizes):
if size > 32:
remove = size - 32
remove_to = c_bounds[i * 2] + remove
c_shape_sparse = [b for b in c_shape_sparse if b[0][i] >= remove_to]
if len(c_shape_sparse) == 0:
raise Exception("There should be something in c_shape_sparse {}".format(c_shape_sparse))
c_bounds, c_sizes = su.get_bounds_and_sizes(c_shape_sparse)
# shift context center to space center
c_center = [sl // 2 for sl in c_sizes]
c_space_center = [c_sl // 2 for _ in range(3)]
sv = [sc - c for c, sc in zip(c_center, c_space_center)]
context_sparse = [
((b[0][0] + sv[0], b[0][1] + sv[1], b[0][2] + sv[2]), b[1]) for b in c_shape_sparse
]
return context_sparse, c_sizes, seg_sparse, s_sizes
def get_shape_dir_target(viewer_pos, dir_vec, c_sizes, s_sizes, c_sl, max_shift=0):
c_space_center = [c_sl // 2 for _ in range(3)]
c_half = [cs // 2 for cs in c_sizes]
c_pos = [c_space_center[i] - c_half[i] for i in range(3)]
target_coord, dim, dr = du.get_rotated_context_to_seg_origin(
viewer_pos, dir_vec, c_pos, c_sizes, s_sizes
)
if any([t > c_sl - 1 or t < 0 for t in target_coord]):
raise Exception("target coord:", target_coord)
if max_shift > 0:
# Shift by some precalculated amount and turn into a target
shift_constraint = c_space_center[dim] - c_half[dim] - s_sizes[dim] - 2
shift_by = random.randint(0, min(max(shift_constraint, 0), max_shift))
target_coord[dim] += dr * shift_by
target = su.coord_to_index(target_coord.tolist(), c_sl)
return torch.tensor(target, dtype=torch.int)
# Returns a 32x32x32 context, 8x8x8 segment, 6 viewer, 1 direction, 1 target
class SegmentContextShapeDirData(torch.utils.data.Dataset):
def __init__(
self,
nexamples=100000,
context_side_length=32,
seg_side_length=8,
useid=False,
max_shift=0,
ground_type=None,
):
self.c_sl = context_side_length
self.s_sl = seg_side_length
self.num_examples = nexamples
self.useid = useid
self.examples = []
self.max_shift = max_shift
self.ground_type = ground_type
def _get_example(self):
# note that seg_sparse is not in target location
context_sparse, c_sizes, seg_sparse, s_sizes = get_two_shape_sparse(self.c_sl, self.s_sl)
viewer_pos, viewer_look = du.get_random_viewer_info(self.c_sl)
dir_vec = du.random_dir_vec_tensor()
target = get_shape_dir_target(
viewer_pos, dir_vec, c_sizes, s_sizes, self.c_sl, self.max_shift
)
if self.ground_type is not None:
target_coord = su.index_to_coord(target, self.c_sl)
su.add_ground_to_context(
context_sparse, target_coord, flat=(self.ground_type == "flat")
)
context = su.get_dense_array_from_sl(context_sparse, self.c_sl, self.useid)
seg = su.get_dense_array_from_sl(seg_sparse, self.s_sl, self.useid)
return {
"context": torch.from_numpy(context),
"seg": torch.from_numpy(seg),
"target": torch.tensor([target]),
"viewer_pos": viewer_pos,
"dir_vec": dir_vec,
}
def __getitem__(self, index):
return self._get_example()
def __len__(self):
return self.num_examples
if __name__ == "__main__":
from visualization_utils import GeoscorerDatasetVisualizer
dataset = SegmentContextShapeDirData(nexamples=3, ground_type="hilly")
vis = GeoscorerDatasetVisualizer(dataset)
for n in range(len(dataset)):
vis.visualize()
| craftassist-master | python/craftassist/voxel_models/geoscorer/shape_dataset.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
import torch
import random
def get_viewer_look(c_sl):
return torch.tensor([c_sl // 2 for _ in range(3)])
def get_random_viewer_info(sl):
viewer_pos = torch.tensor(np.random.randint(0, sl, 3))
viewer_look = get_viewer_look(sl)
if viewer_pos.eq(viewer_look).sum() == viewer_pos.size(0):
if viewer_pos[0] < sl + 1:
viewer_pos[0] += 1
else:
viewer_pos[0] -= 1
return viewer_pos, viewer_look
def get_vector(start, end):
return end - start
def dim_to_vec(dim):
return [(1 if i == dim else 0) for i in range(3)]
def dir_vec_to_dim(dir_vec):
for i in range(3):
if dir_vec[i] == 1:
return i
raise Exception("dir vec has no dimension")
def dr_to_vec(dr):
return [1, 0] if dr == 1 else [0, 1]
def dir_vec_to_dr(dir_vec):
if dir_vec[3] == 1:
return 1
elif dir_vec[4] == 1:
return -1
else:
raise Exception("dir vec has no direction")
def dim_dir_to_dir_tensor(dim, dr):
dim_l = dim_to_vec(dim)
dir_l = dr_to_vec(dr)
return torch.tensor(dim_l + dir_l, dtype=torch.long)
def dir_vec_to_dim_dir(dir_vec):
dim = dir_vec_to_dim(dir_vec)
dr = dir_vec_to_dr(dir_vec)
return dim, dr
def random_dir_vec_tensor():
dim = random.choice([0, 1, 2])
dr = random.choice([-1, 1])
return dim_dir_to_dir_tensor(dim, dr)
def normalize(batched_vector):
vec = batched_vector.double()
norm = torch.norm(vec, dim=1)
# Set norm to 1 if it's 0
norm = norm + norm.eq(0).double()
expanded_norm = norm.unsqueeze(1).expand(-1, vec.size()[1])
return torch.div(vec, expanded_norm)
def get_rotation_matrix(viewer_pos, viewer_look):
# VP, VL: N x 3, VP_to_VL: N x 3
vp_to_vl = get_vector(viewer_pos, viewer_look)[:, :2]
nlook_vec = normalize(vp_to_vl)
nly = nlook_vec[:, 1]
# Nlx necessary to correct for the range of acrcos
nlx = nlook_vec[:, 0]
nlx = nlx.gt(0).double() - nlx.lt(0).double() - nlx.eq(0).double()
# Take care of nans created by raising 0 to a power
# and then masking the sin theta to 0 as intended
base = 1 - nly * nly
nan_mask = torch.isnan(torch.pow(base, 0.5)).double()
base = base + nan_mask
sin_theta = nlx * nan_mask.eq(0).double() * torch.pow(base, 0.5)
nly = nly.unsqueeze(1)
sin_theta = sin_theta.unsqueeze(1)
rm_pt1 = torch.cat([nly, sin_theta], 1).unsqueeze(1)
rm_pt2 = torch.cat([-sin_theta, nly], 1).unsqueeze(1)
rm = torch.cat([rm_pt1, rm_pt2], 1)
return rm
def rotate_x_y(coord, rotation_matrix):
return torch.mm(coord.unsqueeze(0), rotation_matrix).squeeze(0)
def float_equals(a, b, epsilon):
return True if abs(a - b) < epsilon else False
def get_argmax_list(vals, epsilon=0.0001, minlist=False, maxlen=None):
mult = -1 if minlist else 1
max_ind = []
for i, v in enumerate(vals):
if not max_ind or float_equals(max_ind[0][1], v, epsilon):
if maxlen and len(max_ind) == maxlen:
continue
max_ind.append((i, v))
elif mult * (v - max_ind[0][1]) > 0:
max_ind = [(i, v)]
return max_ind
def get_firstmax(vals, epsilon=0.0001, minlist=False):
return get_argmax_list(vals, epsilon, minlist, 1)[0]
# N -> batch size in training
# D -> num target coord per element
# Viewer pos, viewer_look are N x 3 tensors
# Batched target coords is a N x D x 3 tensor
# Output is a N x D x 3 tensor
def get_xyz_viewer_look_coords_batched(viewer_pos, viewer_look, batched_target_coords):
# First verify the sizing and unsqueeze if necessary
btc_sizes = batched_target_coords.size()
vp_sizes = viewer_pos.size()
vl_sizes = viewer_look.size()
if len(btc_sizes) > 3 or len(vp_sizes) > 2 or len(vl_sizes) > 2:
raise Exception("One input has too many dimensions")
if btc_sizes[-1] != 3 or vp_sizes[-1] != 3 or vl_sizes[-1] != 3:
raise Exception("The last dimension of all inputs should be size 3")
if len(btc_sizes) < 3:
for i in range(3 - len(btc_sizes)):
batched_target_coords = batched_target_coords.unsqueeze(0)
if len(vp_sizes) == 1:
viewer_pos = viewer_pos.unsqueeze(0)
if len(vl_sizes) == 1:
viewer_look = viewer_look.unsqueeze(0)
n = batched_target_coords.size()[0]
d = batched_target_coords.size()[1]
# Handle xy and z separately
# XY = N X D x 2
xy = batched_target_coords[:, :, 0:2].double()
# Z = N x D x 1
z = batched_target_coords[:, :, 2].unsqueeze(2).double()
## XY
# Shift such that viewer pos is the origin
# VPXY, VLXY: N x 2
vpxy = viewer_pos.double()[:, 0:2]
vlxy = viewer_look.double()[:, 0:2]
vpxy_to_vlxy = vlxy - vpxy
# VPXY to XY: N x D x 2
vpxy_to_xy = xy - vpxy.unsqueeze(1).expand(n, d, -1)
# Rotate them around the viewer position such that a normalized
# viewer look vector would be (0, 1)
# Rotation_matrix: N x 2 x 2
rotation_matrix = get_rotation_matrix(viewer_pos, viewer_look)
# N x 1 x 2 mm N x 2 x 2 ==> N x 1 x 2 ==> N x 2
r_vpxy_to_vlxy = torch.bmm(vpxy_to_vlxy.unsqueeze(1), rotation_matrix).unsqueeze(1)
# RM: N x 2 x 2 ==> N x D x 2 x 2
expanded_rm = rotation_matrix.unsqueeze(1).expand(n, d, 2, 2).contiguous().view(-1, 2, 2)
# N x D x 2 ==> N*D x 1 x 2 mm N*D x 2 x 2 ==> N*D x 1 x 2 ==> N x D x 2
reshape_vpxy_to_xy = vpxy_to_xy.contiguous().view(-1, 1, 2)
r_vpxy_to_xy = torch.bmm(reshape_vpxy_to_xy, expanded_rm).contiguous().view(n, d, 2)
# N x D x 2
# Get the xy position in this rotated coord system with rvl as the origin
rvl_to_rxy = r_vpxy_to_xy - r_vpxy_to_vlxy.squeeze(1).expand(n, d, 2)
## Z
# VLZ = N x 1
vlz = viewer_look.double()[:, 2]
# Z = N x D x 1
diffz = z - vlz.view(-1, 1, 1).expand(n, d, -1)
## Combine
# rvl_to_rxy: N x D x 2, diffz: N x D x 1
new_xyz = torch.cat([rvl_to_rxy, diffz], 2)
return new_xyz
def get_dir_dist(viewer_pos, viewer_look, batched_target_coords):
if len(batched_target_coords.size()) == 1:
batched_target_coords = batched_target_coords.unsqueeze(0)
xyz = get_xyz_viewer_look_coords_batched(viewer_pos, viewer_look, batched_target_coords)
dist = xyz.abs()
direction = xyz.gt(0).double() - xyz.lt(0).double()
return direction, dist
def get_sampled_direction_vec(viewer_pos, viewer_look, target_coord):
directions, dists = get_dir_dist(viewer_pos, viewer_look, target_coord)
dists = dists.squeeze()
directions = directions.squeeze()
ndists = dists / sum(dists)
dim = np.random.choice(3, p=ndists)
direction = directions[dim].item()
return dim_dir_to_dir_tensor(dim, direction)
def get_max_direction_vec(viewer_pos, viewer_look, target_coord):
directions, dists = get_dir_dist(viewer_pos, viewer_look, target_coord)
dists = dists.squeeze()
directions = directions.squeeze()
ndists = dists / sum(dists)
dim = np.argmax(ndists)
direction = directions[dim].item()
return dim_dir_to_dir_tensor(dim, direction)
def convert_origin_to_center(origin, sizes):
half = [s // 2 for s in sizes]
return [origin[i] + half[i] for i in range(3)]
def convert_center_to_origin(center, sizes):
half = [s // 2 for s in sizes]
return [center[i] - half[i] for i in range(3)]
def get_rotated_context_to_seg_origin(viewer_pos, dir_vec, c_pos, c_sizes, s_sizes):
c_half = [sl // 2 for sl in c_sizes]
s_half = [sl // 2 for sl in s_sizes]
c_center = [c_pos[i] + c_half[i] for i in range(3)]
dim, dr = dir_vec_to_dim_dir(dir_vec)
# These account for the sl // 2 discretization, apply in positive direction
# TODO: there must be a way to only use one of these
c_offset_even = [1 if c % 2 == 0 else 0 for c in c_sizes]
c_offset_odd = [c % 2 for c in c_sizes]
# For above below, directly attach in that dir
if dim == 2:
touch_p = [i for i in c_center]
if dr == -1:
touch_p[2] -= c_half[2]
target_coord = [
touch_p[0] - s_half[0],
touch_p[1] - s_half[1],
touch_p[2] - s_sizes[2],
]
else:
touch_p[2] += c_half[2] + c_offset_odd[2]
target_coord = [touch_p[0] - s_half[0], touch_p[1] - s_half[1], touch_p[2]]
return torch.tensor(target_coord, dtype=torch.int), dim, dr
# Find the 4 possible positions
c_shift = [c_half[i] + 1 for i in range(2)]
possible_touch_points = []
possible_targets = []
shift_dims = []
shift_dirs = []
for sdim in [0, 1]:
for sdr in [1, -1]:
shift_dims.append(sdim)
shift_dirs.append(sdr)
tp = [p for p in c_center]
tp[sdim] += sdr * c_shift[sdim]
if sdr > 0:
tp[sdim] -= c_offset_even[sdim]
possible_touch_points.append(torch.tensor(tp, dtype=torch.float))
t = [p for p in tp]
for d in range(3):
if d == sdim:
if sdr < 0:
t[d] -= s_sizes[d] - 1
else:
t[d] -= s_half[d]
possible_targets.append(torch.tensor(t, dtype=torch.float))
# Chooose the best touch point based on rotation
c_center_t = torch.tensor(c_center, dtype=torch.float)
c_to_ts = [get_vector(c_center_t[:2], t[:2]) for t in possible_touch_points]
rotation_matrix = get_rotation_matrix(
viewer_pos.unsqueeze(0), c_center_t.unsqueeze(0)
).squeeze(0)
c_to_t_rotated = [rotate_x_y(c_to_t.double(), rotation_matrix) for c_to_t in c_to_ts]
vals = [v[dim] for v in c_to_t_rotated]
if dr == 1:
max_ind, dist = get_firstmax(vals)
else:
max_ind, dist = get_firstmax(vals, minlist=True)
target_coord = possible_targets[max_ind]
return target_coord.int(), shift_dims[max_ind], shift_dirs[max_ind]
| craftassist-master | python/craftassist/voxel_models/geoscorer/directional_utils.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
import random
import torch
import torch.utils.data
import spatial_utils as su
import directional_utils as du
def get_glue_cubes_direction_target_coord(viewer_pos, dir_vec, cube_size, origin_cont, c_sl):
# Note: c_sizes and s_sizes are the same for this dataset
c_sizes = [cube_size for _ in range(3)]
target, _, _ = du.get_rotated_context_to_seg_origin(
viewer_pos, dir_vec, origin_cont, c_sizes, c_sizes
)
return target
def get_glue_cubes_cont_size_loc(c_sl, s_sl, fixed_size=None, center=False):
cube_size = fixed_size
if not cube_size:
cube_size = np.random.randint(1, s_sl + 1) # +1 for inclusive
if center:
origin_cont = [c_sl // 2 - cube_size // 2 for i in range(3)]
else:
possible_range = [[cube_size, c_sl - 2 * cube_size] for i in range(3)]
origin_cont = [np.random.randint(*possible_range[i]) for i in range(3)]
return cube_size, origin_cont
def get_sparse_cube_context_seg(cube_size, origin_cont, origin_seg, block_type=None):
context_sparse = []
seg_sparse = []
cube_type = block_type if block_type else np.random.randint(1, 256)
for i in range(cube_size):
for j in range(cube_size):
for k in range(cube_size):
offset = (i, j, k)
context_sparse.append(
(tuple([sum(x) for x in zip(offset, origin_cont)]), (cube_type, 0))
)
seg_sparse.append(
(tuple([sum(x) for x in zip(offset, origin_seg)]), (cube_type, 0))
)
return context_sparse, seg_sparse
def glue_cubes(c_sl, s_sl, dim, direction):
if dim < 0 or dim > 2 or direction not in [-1, 1]:
raise Exception("Invalid dimension {} or direction {}".format(dim, direction))
cube_size, origin_cont = get_glue_cubes_cont_size_loc(c_sl, s_sl)
origin_seg = [i for i in origin_cont]
origin_seg[dim] = origin_seg[dim] + direction * cube_size
context_sparse, seg_sparse = get_sparse_cube_context_seg(cube_size, origin_cont, origin_seg)
return context_sparse, seg_sparse
def directional_glue_cubes(c_sl, s_sl, fixed_cube_size=None, fixed_center=False):
dir_vec = du.random_dir_vec_tensor()
viewer_pos, viewer_look = du.get_random_viewer_info(c_sl)
cube_size, origin_cont = get_glue_cubes_cont_size_loc(
c_sl, s_sl, fixed_size=fixed_cube_size, center=fixed_center
)
target_coord = get_glue_cubes_direction_target_coord(
viewer_pos, dir_vec, cube_size, origin_cont, c_sl
)
context_sparse, seg_sparse = get_sparse_cube_context_seg(
cube_size, origin_cont, target_coord.tolist()
)
return {
"context_sparse": context_sparse,
"seg_sparse": seg_sparse,
"target_coord": target_coord,
"viewer_pos": viewer_pos,
"dir_vec": dir_vec,
}
# Returns three tensors: 32x32x32 context, 8x8x8 segment, 1 target
class SegmentContextGlueCubesData(torch.utils.data.Dataset):
def __init__(
self,
nexamples=100000,
context_side_length=32,
seg_side_length=8,
useid=False,
type_name="random",
use_direction=False,
fixed_cube_size=None,
fixed_center=False,
ground_type=None,
):
self.c_sl = context_side_length
self.s_sl = seg_side_length
self.num_examples = nexamples
self.useid = useid
self.examples = []
self.use_direction = use_direction
self.fixed_cube_size = fixed_cube_size
self.fixed_center = fixed_center
self.ground_type = ground_type
def _get_example(self):
if not self.use_direction:
dim = random.choice([0, 1, 2])
dr = random.choice([-1, 1])
context_sparse, seg_sparse = glue_cubes(self.c_sl, self.s_sl, dim, dr)
return su.convert_sparse_context_seg_to_example(
context_sparse, seg_sparse, self.c_sl, self.s_sl, self.useid
)
else:
dgc = directional_glue_cubes(
self.c_sl, self.s_sl, self.fixed_cube_size, self.fixed_center
)
if self.ground_type is not None:
flat = self.ground_type == "flat"
su.add_ground_to_context(dgc["context_sparse"], dgc["target_coord"], flat=flat)
example = su.convert_sparse_context_seg_to_example(
dgc["context_sparse"], dgc["seg_sparse"], self.c_sl, self.s_sl, self.useid
)
example["target"] = su.coord_to_index(dgc["target_coord"], self.c_sl)
example["target"] = example["target"].unsqueeze(0)
example["viewer_pos"] = dgc["viewer_pos"]
example["dir_vec"] = dgc["dir_vec"]
return example
def __getitem__(self, index):
return self._get_example()
def __len__(self):
return self.num_examples
if __name__ == "__main__":
import argparse
from visualization_utils import GeoscorerDatasetVisualizer
parser = argparse.ArgumentParser()
parser.add_argument(
"--use_direction", action="store_true", help="use direction in example creation"
)
parser.add_argument(
"--fixed_center", action="store_true", help="fix the center of the context cube"
)
parser.add_argument(
"--fixed_cube_size", type=int, default=None, help="fix the size of the cubes"
)
parser.add_argument(
"--ground_type", type=str, default=None, help="ground type to use (None|flat|hilly)"
)
opts = parser.parse_args()
dataset = SegmentContextGlueCubesData(
nexamples=3,
use_direction=opts.use_direction,
fixed_center=opts.fixed_center,
fixed_cube_size=opts.fixed_cube_size,
useid=True,
ground_type=opts.ground_type,
)
vis = GeoscorerDatasetVisualizer(dataset)
for n in range(len(dataset)):
vis.visualize()
| craftassist-master | python/craftassist/voxel_models/geoscorer/autogen_dataset.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
import torch
import pickle
import torch.nn as nn
from data_loaders import make_example_from_raw
class SemSegNet(nn.Module):
def __init__(self, opts, classes=None):
super(SemSegNet, self).__init__()
if opts.load:
if opts.load_model != "":
self.load(opts.load_model)
else:
raise ("loading from file specified but no load_filepath specified")
if opts.vocab_path != "":
self.load_vocab(opts.vocab_path)
else:
raise ("loading from file specified but no vocab_path specified")
else:
self.opts = opts
self._build()
self.classes = classes
def _build(self):
opts = self.opts
try:
embedding_dim = opts.embedding_dim
except:
embedding_dim = 8
try:
num_words = opts.num_words
except:
num_words = 3
try:
num_layers = opts.num_layers
except:
num_layers = 4 # 32x32x32 input
try:
hidden_dim = opts.hidden_dim
except:
hidden_dim = 64
self.embedding_dim = embedding_dim
self.embedding = nn.Embedding(num_words, embedding_dim)
self.layers = nn.ModuleList()
self.num_layers = num_layers
self.layers.append(
nn.Sequential(
nn.Conv3d(embedding_dim, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm3d(hidden_dim),
nn.ReLU(inplace=True),
)
)
for i in range(num_layers - 1):
self.layers.append(
nn.Sequential(
nn.Conv3d(hidden_dim, hidden_dim, kernel_size=5, padding=2),
nn.BatchNorm3d(hidden_dim),
nn.ReLU(inplace=True),
)
)
self.out = nn.Conv3d(hidden_dim, opts.num_classes, kernel_size=1)
self.lsm = nn.LogSoftmax(dim=1)
def forward(self, x):
# FIXME when pytorch is ready for this, embedding
# backwards is soooooo slow
# z = self.embedding(x)
szs = list(x.size())
x = x.view(-1)
z = self.embedding.weight.index_select(0, x)
szs.append(self.embedding_dim)
z = z.view(torch.Size(szs))
z = z.permute(0, 4, 1, 2, 3).contiguous()
for i in range(self.num_layers):
z = self.layers[i](z)
return self.lsm(self.out(z))
def save(self, filepath):
self.cpu()
sds = {}
sds["opts"] = self.opts
sds["classes"] = self.classes
sds["state_dict"] = self.state_dict()
torch.save(sds, filepath)
if self.opts.cuda:
self.cuda()
def load_vocab(self, vocab_path):
with open(vocab_path, "rb") as file:
self.vocab = pickle.load(file)
print("Loaded vocab")
def load(self, filepath):
sds = torch.load(filepath)
self.opts = sds["opts"]
print("loading from file, using opts")
print(self.opts)
self._build()
self.load_state_dict(sds["state_dict"])
self.zero_grad()
self.classes = sds["classes"]
class Opt:
pass
class SemSegWrapper:
def __init__(self, model, vocab_path, threshold=-1.0, blocks_only=True, cuda=False):
if type(model) is str:
opts = Opt()
opts.load = True
opts.load_model = model
opts.vocab_path = vocab_path
model = SemSegNet(opts)
self.model = model
self.cuda = cuda
if self.cuda:
model.cuda()
else:
model.cpu()
self.classes = model.classes
# threshold for relevance; unused rn
self.threshold = threshold
# if true only label non-air blocks
self.blocks_only = blocks_only
# this is used by the semseg_process
i2n = self.classes["idx2name"]
self.tags = [(c, self.classes["name2count"][c]) for c in i2n]
assert self.classes["name2idx"]["none"] == 0
@torch.no_grad()
def segment_object(self, blocks):
self.model.eval()
if self.model.vocab:
vocab = self.model.vocab
vocab_blocks = np.zeros(blocks.shape[:-1])
for x in range(blocks.shape[0]):
for y in range(blocks.shape[1]):
for z in range(blocks.shape[2]):
block_id = blocks[x,y,z,0]
meta_id = blocks[x,y,z,1]
id_tuple = (block_id, meta_id)
# First see if that specific block-meta pair is in the vocab.
if id_tuple in vocab:
id_ = vocab[id_tuple]
# Else, check if the same general material (block-id) exists.
elif (block_id, 0) in vocab:
id_ = vocab[(block_id, 0)]
# If not, the network has no clue what it is, ignore it (treat as air).
else:
id_ = vocab[(0,0)]
vocab_blocks[x,y,z] = id_
else:
vocab_blocks = blocks[:, :, :, 0]
blocks = torch.from_numpy(vocab_blocks)
blocks, _, o = make_example_from_raw(blocks)
blocks = blocks.unsqueeze(0)
if self.cuda:
blocks = blocks.cuda()
y = self.model(blocks)
_, mids = y.squeeze().max(0)
locs = mids.nonzero()
locs = locs.tolist()
if self.blocks_only:
return {
tuple(np.subtract(l, o)): mids[l[0], l[1], l[2]].item()
for l in locs
if blocks[0, l[0], l[1], l[2]] > 0
}
else:
return {tuple(ll for ll in l): mids[l[0], l[1], l[2]].item() for l in locs}
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--hidden_dim", type=int, default=128, help="size of hidden dim in fc layer"
)
parser.add_argument("--embedding_dim", type=int, default=16, help="size of blockid embedding")
parser.add_argument("--num_words", type=int, default=256, help="number of blocks")
parser.add_argument("--num_classes", type=int, default=20, help="number of blocks")
args = parser.parse_args()
N = SemSegNet(args)
| craftassist-master | python/craftassist/voxel_models/semantic_segmentation/semseg_models.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import pickle
import numpy as np
import torch
from torch.utils import data as tds
from copy import deepcopy
def underdirt(schematic, labels=None, max_shift=0, nothing_id=0):
# todo fancier dirt!
# FIXME!!!! label as ground where appropriate
shift = torch.randint(max_shift + 1, (1,)).item()
if shift > 0:
new_schematic = torch.LongTensor(schematic.size())
new_schematic[:, shift:, :] = schematic[:, :-shift, :]
new_schematic[:, :shift, :] = 3
new_labels = None
if labels is not None:
new_labels = torch.LongTensor(labels.size())
new_labels[:, shift:, :] = labels[:, :-shift, :]
new_labels[:, :shift, :] = nothing_id
return new_schematic, new_labels
else:
return schematic, labels
def flip_rotate(c, l=None, idx=None):
"""
Randomly transform the cube for more data.
The transformation is chosen from:
0. original
1. x-z plane rotation 90
2. x-z plane rotation 180
3. x-z plane rotation 270
4. x-axis flip
5. z-axis flip
"""
idx = np.random.choice(range(6)) if (idx is None) else idx
l_ = l
if idx == 0:
c_ = c
l_ = l
elif idx >= 1 and idx <= 3: # rotate
npc = c.numpy()
npc = np.rot90(npc, idx, axes=(0, 2)) # rotate on the x-z plane
c_ = torch.from_numpy(npc.copy())
if l is not None:
npl = l.numpy()
npl = np.rot90(npl, idx, axes=(0, 2)) # rotate on the x-z plane
l_ = torch.from_numpy(npl.copy())
else: # flip
npc = c.numpy()
npc = np.flip(npc, axis=(idx - 4) * 2) # 0 or 2
c_ = torch.from_numpy(npc.copy())
if l is not None:
npl = l.numpy()
npl = np.flip(npl, axis=(idx - 4) * 2) # 0 or 2
l_ = torch.from_numpy(npl.copy())
return c_, l_, idx
def pad_to_sidelength(schematic, labels=None, nothing_id=0, sidelength=32):
szs = list(schematic.size())
szs = np.add(szs, -sidelength)
pad = []
# this is all backwards bc pytorch pad semantics :(
for s in szs:
if s >= 0:
pad.append(0)
else:
pad.append(-s)
pad.append(0)
schematic = torch.nn.functional.pad(schematic, pad[::-1])
if labels is not None:
labels = torch.nn.functional.pad(labels, pad[::-1], value=nothing_id)
return schematic, labels
# TODO cut outliers
# TODO simplify
def fit_in_sidelength(schematic, labels=None, nothing_id=0, sl=32, max_shift=0):
schematic, labels = pad_to_sidelength(
schematic, labels=labels, nothing_id=nothing_id, sidelength=sl
)
nz = schematic.nonzero()
m, _ = nz.median(0)
min_y, _ = nz.min(0)
min_y = min_y[1]
xshift = max(torch.randint(-max_shift, max_shift + 1, (1,)).item() - m[0].item() + sl // 2, 0)
zshift = max(torch.randint(-max_shift, max_shift + 1, (1,)).item() - m[2].item() + sl // 2, 0)
new_schematic = torch.LongTensor(sl, sl, sl).fill_(1)
new_schematic[xshift:, : sl - min_y, zshift:] = schematic[
: sl - xshift, min_y:sl, : sl - zshift
]
new_labels = None
if labels is not None:
new_labels = torch.LongTensor(sl, sl, sl).fill_(nothing_id)
new_labels[xshift:, : sl - min_y, zshift:] = labels[: sl - xshift, min_y:sl, : sl - zshift]
return new_schematic, new_labels, (xshift, -min_y, zshift)
def make_example_from_raw(schematic, labels=None, augment={}, nothing_id=0, sl=32):
max_shift = augment.get("max_shift", 0)
s, l, o = fit_in_sidelength(
schematic, labels=labels, nothing_id=nothing_id, max_shift=max_shift
)
if len(augment) > 0:
if augment.get("flip_rotate", False):
s, l, _ = flip_rotate(s, l=l)
m = augment.get("underdirt")
if m is not None:
# really should fix offset here.....TODO
s, l = underdirt(s, labels=l, max_shift=m, nothing_id=nothing_id)
s[s == 0] = 1
s -= 1
return s, l, o
def swallow_classes(classes, predator, prey_classes, class_map):
new_classes = deepcopy(classes)
apex = class_map.get(predator, predator)
for prey in prey_classes:
class_map[prey] = apex
new_classes["name2count"][predator] += new_classes["name2count"][prey]
del new_classes["name2count"][prey]
# if prey in new_classes["name2count"]:
# new_classes["name2count"][predator] += new_classes["name2count"][prey]
# del new_classes["name2count"][prey]
for prey in prey_classes:
for s, t in class_map.items():
if t == prey:
class_map[s] = apex
return new_classes, class_map
def organize_classes(classes, min_occurence):
class_map = {}
new_classes = deepcopy(classes)
for cname in classes["name2count"]:
# hacky, should stem this properly
if cname[-1] == "s" and classes["name2count"].get(cname[:-1]) is not None:
new_classes, class_map = swallow_classes(new_classes, cname[:-1], [cname], class_map)
small_classes = []
for cname, count in new_classes["name2count"].items():
if count < min_occurence:
small_classes.append(cname)
new_classes, class_map = swallow_classes(new_classes, "none", small_classes, class_map)
new_classes, class_map = swallow_classes(new_classes, "none", ["nothing"], class_map)
counts = sorted(list(new_classes["name2count"].items()), key=lambda x: x[1], reverse=True)
new_classes["name2idx"]["none"] = 0
new_classes["idx2name"].append("none")
for i in range(len(counts)):
cname = counts[i][0]
if cname != "none":
new_classes["name2idx"][cname] = i
new_classes["idx2name"].append(cname)
return new_classes, class_map
def reconcile_classes(classes, to_match):
new_to_match = deepcopy(to_match)
new_classes = deepcopy(classes)
class_map = {}
existing_class_names = set(new_to_match["name2idx"].keys())
new_class_names = set(new_classes["name2count"].keys())
to_merge = [n for n in new_class_names if n not in existing_class_names]
new_classes, class_map = swallow_classes(new_classes, "none", to_merge, class_map)
for key in ["name2idx", "idx2name"]:
new_classes[key] = new_to_match[key]
return new_classes, class_map
class SemSegData(tds.Dataset):
def __init__(
self,
data_path,
nexamples=-1,
sidelength=32,
classes_to_match=None,
augment={},
min_class_occurence=250,
useid=True,
):
self.sidelength = sidelength
self.useid = useid
self.examples = []
self.inst_data = pickle.load(open(data_path, "rb"))
self.nexamples = nexamples
self.augment = augment
if self.nexamples < 0:
self.nexamples = len(self.inst_data)
else:
self.nexamples = min(len(self.inst_data), self.nexamples)
classes = {"name2idx": {}, "idx2name": [], "name2count": {}}
for i in range(len(self.inst_data)):
for cname in self.inst_data[i][2]:
if classes["name2count"].get(cname) is None:
classes["name2count"][cname] = 1
else:
classes["name2count"][cname] += 1
if classes["name2count"].get("none") is None:
classes["name2count"]["none"] = 1
if classes_to_match is None:
merged_classes, class_map = organize_classes(classes, min_class_occurence)
else:
merged_classes, class_map = reconcile_classes(classes, classes_to_match)
for cname in merged_classes["name2idx"]:
class_map[cname] = cname
self.classes = merged_classes
# this should be 0...
self.nothing_id = self.classes["name2idx"]["none"]
c = self.classes["name2idx"]
for i in range(len(self.inst_data)):
self.inst_data[i] = list(self.inst_data[i])
x = self.inst_data[i]
x[0] = torch.from_numpy(x[0]).long()
x[1] = torch.from_numpy(x[1]).long()
x[1].apply_(lambda z: c[class_map[x[2][z]]] if z > 0 else self.nothing_id)
def get_classes(self):
return self.classes
def set_classes(self, classes):
self.classes = classes
def __getitem__(self, index):
x = self.inst_data[index]
s, l, _ = make_example_from_raw(
x[0], labels=x[1], nothing_id=self.nothing_id, sl=self.sidelength, augment=self.augment
)
return s, l
def __len__(self):
return self.nexamples
| craftassist-master | python/craftassist/voxel_models/semantic_segmentation/data_loaders.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import os
import argparse
import sys
from tqdm import tqdm
from data_loaders import SemSegData
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.optim as optim
import semseg_models as models
from pathlib import Path
##################################################
# for debugging
##################################################
def print_slices(model, H, r, c, n, data):
x, y = data[n]
x = x.unsqueeze(0).cuda()
yhat = model(x).squeeze()
print(x[0, c - r : c + r, H, c - r : c + r].cpu())
print(y[c - r : c + r, H, c - r : c + r])
_, mm = yhat.max(0)
print(mm[c - r : c + r, H, c - r : c + r].cpu())
def blocks_from_data(data, n):
x, y = data[n]
ids = x.nonzero()
idl = ids.tolist()
blocks = [((b[0], b[1], b[2]), (x[b[0], b[1], b[2]].item() + 1, 0)) for b in idl]
return x, y, blocks
def semseg_output(S, n, data):
x, y, blocks = blocks_from_data(data, n)
class_stats = {}
for i in range(29):
class_stats[train_data.classes["idx2name"][i]] = len((y == i).nonzero())
# print(train_data.classes['idx2name'][i], len((y==i).nonzero()))
a = S._watch_single_object(blocks)
return class_stats, a
##################################################
# training loop
##################################################
def get_loss(x, y, yhat, loss):
# loss is expected to not reduce
preloss = loss(yhat, y)
mask = torch.zeros_like(y).float()
u = x.float() + x.float().uniform_(0, 1)
idx = u.view(-1).gt((1 - args.sample_empty_prob)).nonzero().squeeze()
mask.view(-1)[idx] = 1
M = float(idx.size(0))
# FIXME: eventually need to intersect with "none" tags; want to push loss on labeled empty voxels
preloss *= mask
l = preloss.sum() / M
return l
def get_accuracy(y, yhat):
vals, pred = torch.max(yhat, 1)
correct_num = torch.sum(pred == y)
total_num = float(torch.numel(y))
acc = correct_num / total_num
return acc
def validate(model: nn.Module, validation_data: DataLoader, loss, args):
losses = []
accs = []
model.eval()
with torch.no_grad():
for x, y in tqdm(validation_data):
if args.cuda:
x = x.cuda()
y = y.cuda()
yhat = model(x)
l = get_loss(x, y, yhat, loss)
a = get_accuracy(y, yhat)
accs.append(a.item())
losses.append(l.item())
return losses, accs
def train_epoch(model, DL, loss, optimizer, args):
model.train()
losses = []
accs = []
for b in tqdm(DL):
x = b[0]
y = b[1]
if args.cuda:
x = x.cuda()
y = y.cuda()
model.train()
yhat = model(x)
l = get_loss(x, y, yhat, loss)
a = get_accuracy(y, yhat)
losses.append(l.item())
accs.append(a.item())
l.backward()
optimizer.step()
return losses, accs
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--debug", type=int, default=-1, help="no shuffle, keep only debug num examples"
)
parser.add_argument("--num_labels", type=int, default=50, help="How many top labels to use")
parser.add_argument("--num_epochs", type=int, default=50, help="training epochs")
parser.add_argument("--augment", default="none", help="none or maxshift:K_underdirt:J")
parser.add_argument("--cuda", action="store_true", help="use cuda")
parser.add_argument("--gpu_id", type=int, default=0, help="which gpu to use")
parser.add_argument("--batchsize", type=int, default=32, help="batch size")
parser.add_argument("--data_dir", default="")
parser.add_argument("--save_model", default="", help="where to save model (nowhere if blank)")
parser.add_argument(
"--load_model", default="", help="from where to load model (nowhere if blank)"
)
parser.add_argument("--save_logs", default="/dev/null", help="where to save logs")
parser.add_argument(
"--hidden_dim", type=int, default=128, help="size of hidden dim in fc layer"
)
parser.add_argument("--embedding_dim", type=int, default=4, help="size of blockid embedding")
parser.add_argument("--lr", type=float, default=0.01, help="step size for net")
parser.add_argument(
"--sample_empty_prob",
type=float,
default=0.01,
help="prob of taking gradients on empty locations",
)
parser.add_argument("--num_words", default=1024, type=int, help="number of rows in embedding table")
parser.add_argument("--ndonkeys", type=int, default=4, help="workers in dataloader")
args = parser.parse_args()
if args.save_model == "":
print("WARNING: No save path specified, model will not be saved.")
this_dir = os.path.dirname(os.path.realpath(__file__))
parent_dir = os.path.join(this_dir, "../")
sys.path.append(parent_dir)
print("loading train data")
aug = {}
if args.augment != "none":
a = args.augment.split("_")
aug = {t.split(":")[0]: int(t.split(":")[1]) for t in a}
aug["flip_rotate"] = True
if args.debug > 0 and len(aug) > 0:
print("warning debug and augmentation together?")
data_dir = Path(args.data_dir)
train_data = SemSegData(data_dir / "training_data.pkl", nexamples=args.debug, augment=aug)
print("loaded train")
valid_data = SemSegData(
data_dir / "validation_data.pkl",
classes_to_match=train_data.classes,
)
print("loaded valid")
shuffle = True
if args.debug > 0:
shuffle = False
print("making dataloader")
def make_dataloader(ds):
return torch.utils.data.DataLoader(
ds,
batch_size=args.batchsize,
shuffle=shuffle,
pin_memory=True,
drop_last=True,
num_workers=args.ndonkeys,
)
rDL = make_dataloader(train_data)
valid_dl = make_dataloader(valid_data)
args.num_classes = len(train_data.classes["idx2name"])
print("making model")
args.load = False
if args.load_model != "":
args.load = True
model = models.SemSegNet(args, classes=train_data.classes)
nll = nn.NLLLoss(reduction="none")
if args.cuda:
model.cuda()
nll.cuda()
optimizer = optim.Adagrad(model.parameters(), lr=args.lr)
print("training")
for m in tqdm(range(args.num_epochs)):
train_losses, train_accs = train_epoch(model, rDL, nll, optimizer, args)
valid_losses, valid_accs = validate(model, valid_dl, nll, args)
print(f"\nEpoch {m}:")
print(f"Train loss: {sum(train_losses) / len(train_losses)}")
print(f"Valid loss: {sum(valid_losses) / len(valid_losses)}")
print(f"Train acc: {sum(train_accs) / len(train_accs)}")
print(f"Valid acc: {sum(valid_accs) / len(valid_accs)}")
if args.save_model != "":
model.save(args.save_model)
| craftassist-master | python/craftassist/voxel_models/semantic_segmentation/train_semantic_segmentation.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import struct
class Decoder:
def __init__(self, fp):
self.fp = fp
self.count = 0
def readByte(self):
return self.readStructFmt(">b")
def readUByte(self):
return self.readStructFmt(">B")
def readShort(self):
return self.readStructFmt(">h")
def readUShort(self):
return self.readStructFmt(">H")
def readInt(self):
return self.readStructFmt(">i")
def readUInt(self):
return self.readStructFmt(">I")
def readLong(self):
return self.readStructFmt(">q")
def readULong(self):
return self.readStructFmt(">Q")
def readFloat(self):
return self.readStructFmt(">f")
def readDouble(self):
return self.readStructFmt(">d")
def readRaw(self, n):
buf = self.fp.read(n)
assert n == len(buf)
self.count += n
return buf
def readStructFmt(self, fmt):
size = struct.calcsize(fmt)
buf = self.fp.read(size)
if len(buf) != size:
raise EOFError
self.count += size
return struct.unpack(fmt, buf)[0]
def readString(self):
length = self.readShort()
x = self.readRaw(length).decode("utf-8")
assert self.readByte() == 0, "String not null-terminated: {}".format(x)
return x
def readIntPos(self):
return (self.readLong(), self.readLong(), self.readLong())
def readFloatPos(self):
return (self.readDouble(), self.readDouble(), self.readDouble())
def readLook(self):
return (self.readFloat(), self.readFloat())
def readItem(self):
return (self.readShort(), self.readShort(), self.readShort())
def readBlock(self):
return (self.readByte(), self.readByte())
| craftassist-master | python/logging_plugin/decoder.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
BLOCK_SPREAD = 1
# BLOCK_TO_PICKUPS = 2
# BREWING_COMPLETED = 3
# BREWING_COMPLETING = 4
CHAT = 5
CHUNK_AVAILABLE = 6
# CHUNK_GENERATED = 7
# CHUNK_GENERATING = 8
# CHUNK_UNLOADED = 9
# CHUNK_UNLOADING = 10
COLLECTING_PICKUP = 11
# CRAFTING_NO_RECIPE = 12
# DISCONNECT = 13
# ENTITY_ADD_EFFECT = 14
# ENTITY_CHANGED_WORLD = 15
# ENTITY_CHANGING_WORLD = 16
# ENTITY_TELEPORT = 17
# EXECUTE_COMMAND = 18
EXPLODED = 19
# EXPLODING = 20
# HANDSHAKE = 21
# HOPPER_PULLING_ITEM = 22
# HOPPER_PUSHING_ITEM = 23
KILLED = 24
# KILLING = 25
# LOGIN = 26
# LOGIN_FORGE = 27
# PLAYER_ANIMATION = 28
# PLAYER_BREAKING_BLOCK = 29
PLAYER_BROKEN_BLOCK = 30
PLAYER_DESTROYED = 31
PLAYER_EATING = 32
# PLAYER_FISHED = 33
# PLAYER_FISHING = 34
PLAYER_FOOD_LEVEL_CHANGE = 35
# PLAYER_JOINED = 36
# PLAYER_LEFT_CLICK = 37
PLAYER_MOVING = 38
# PLAYER_OPENING_WINDOW = 39
PLAYER_PLACED_BLOCK = 40
# PLAYER_PLACING_BLOCK = 41
# PLAYER_RIGHT_CLICK = 42
# PLAYER_RIGHT_CLICKING_ENTITY = 43
PLAYER_SHOOTING = 44
PLAYER_SPAWNED = 45
# PLAYER_TOSSING_ITEM = 46
PLAYER_USED_BLOCK = 47
PLAYER_USED_ITEM = 48
# PLAYER_USING_BLOCK = 49
# PLAYER_USING_ITEM = 50
# PLUGINS_LOADED = 51
# PLUGIN_MESSAGE = 52
POST_CRAFTING = 53
PRE_CRAFTING = 54
PROJECTILE_HIT_BLOCK = 55
PROJECTILE_HIT_ENTITY = 56
# SERVER_PING = 57
SPAWNED_ENTITY = 58
SPAWNED_MONSTER = 59
# SPAWNING_ENTITY = 60
# SPAWNING_MONSTER = 61
TAKE_DAMAGE = 62
# TICK = 63
UPDATED_SIGN = 64
# UPDATING_SIGN = 65
WEATHER_CHANGED = 66
# WEATHER_CHANGING = 67
WORLD_STARTED = 68
WORLD_TICK = 69
MONSTER_MOVED = 70
PLAYER_LOOK = 71
| craftassist-master | python/logging_plugin/hooks.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
from scipy.misc import imread
INF_DEPTH = 100
def plot(blockpath, plt, imgpath=None, depthpath=None, vis=None, out_path=None, size=None):
block = np.fromfile(blockpath, np.uint8)
if size is None:
width = height = int((len(block) / 2) ** 0.5)
else:
width, height = size
try:
block = block.reshape((height, width, 2))
except ValueError:
print('\nReshape failed. Try using "--size width height"')
import sys
sys.exit(1)
if depthpath is not None:
depth = np.fromfile(depthpath, np.float32)
depth = depth.reshape((height, width))
depth[depth > INF_DEPTH] = INF_DEPTH
else:
depth = np.zeros((height, width, "float"))
if imgpath is not None:
img = imread(imgpath)
else:
img = np.zeros((height, width), "float")
plt.close()
plt.subplot(2, 2, 1)
plt.imshow(img)
plt.title(imgpath)
plt.subplot(2, 2, 3)
plt.imshow(block[:, :, 0], cmap="prism")
center = block[30:-30, 30:-30, 0]
max_, min_ = center.max(), center.min()
plt.title("block_id range: %d, %d" % (min_, max_))
plt.subplot(2, 2, 4)
plt.imshow(depth, cmap="Blues_r")
center = depth[50:-50, 50:-50]
max_, min_ = center.max(), center.min()
plt.title("depth range: %f, %f" % (min_, max_))
if vis is None:
if out_path:
plt.savefig(out_path)
else:
plt.show()
else:
vis.matplot(plt)
return block, depth
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--blocks", required=True, help="e.g. path/to/block.135.bin")
parser.add_argument("--img", help="e.g. path/to/img.png")
parser.add_argument("--depth", help="e.g. path/to/depth.135.bin")
parser.add_argument(
"--visdom", action="store_true", help="visdom if specified, else matplotlib"
)
parser.add_argument("--out_path", help="Output path for image")
parser.add_argument(
"--size", type=int, nargs="+", help="width and height, e.g. --size 800 600"
)
args = parser.parse_args()
import matplotlib
if args.visdom:
import visdom
matplotlib.use("Agg")
vis = visdom.Visdom(server="http://localhost")
else:
matplotlib.use("TkAgg")
vis = None
import matplotlib.pyplot as plt
block, depth = plot(
args.blocks,
plt,
imgpath=args.img,
depthpath=args.depth,
vis=vis,
out_path=args.out_path,
size=args.size,
)
| craftassist-master | python/logging_plugin/plot_vision.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import binascii
from decoder import Decoder
import hooks
import util
# https://api.cuberite.org/Globals.html
dtAttack = 0
etMob = 4
class BaseLogReader:
def __init__(self, logdir):
self.logdir = logdir
fp = open(logdir + "/logging.bin", "rb")
self.decoder = Decoder(fp)
self.last_tick = -1
self.player_eids = set()
def start(self):
version_major = self.decoder.readShort()
version_minor = self.decoder.readShort()
print("Version: {}.{}".format(version_major, version_minor))
while True:
try:
buf_start = self.decoder.count
hook_id = self.decoder.readByte()
world_tick = self.decoder.readLong()
if world_tick < self.last_tick:
raise RuntimeError(
"Error: {} < {}\n".format(world_tick, self.last_tick)
+ "buf_start={} hook_id={}".format(buf_start, hook_id)
)
self.last_tick = world_tick
self.decode_and_handle_hook(hook_id, world_tick, buf_start)
except EOFError:
return
def decode_and_handle_hook(self, hook_id, world_tick, buf_start):
args = [world_tick, buf_start]
if hook_id == hooks.WORLD_STARTED:
# Check settings.ini hash
expected_settings_hash = binascii.hexlify(self.decoder.readRaw(20)).decode("ascii")
settings_hashes = util.get_hashes(self.logdir + "/settings.ini")
assert (
expected_settings_hash in settings_hashes
), "Bad hash for settings.ini: {} not in {}".format(
expected_settings_hash, settings_hashes
)
# Check world.ini hash
expected_world_hash = binascii.hexlify(self.decoder.readRaw(20)).decode("ascii")
world_hashes = util.get_hashes(self.logdir + "/world/world.ini")
assert (
expected_world_hash in world_hashes
), "Bad hash for world/world.ini: {} not in {}".format(
expected_world_hash, world_hashes
)
elif hook_id == hooks.PLAYER_SPAWNED:
eid = self.decoder.readLong()
name = self.decoder.readString()
pos = self.decoder.readFloatPos()
look = self.decoder.readLook()
args += [eid, name, pos, look]
# FIXME: remove when v0.2 patch no longer needed
self.player_eids.add(eid)
elif hook_id == hooks.PLAYER_DESTROYED:
eid = self.decoder.readLong()
args += [eid]
elif hook_id == hooks.PLAYER_MOVING:
eid = self.decoder.readLong()
oldpos = self.decoder.readFloatPos()
newpos = self.decoder.readFloatPos()
args += [eid, oldpos, newpos]
elif hook_id == hooks.CHUNK_AVAILABLE:
cx, cz = self.decoder.readLong(), self.decoder.readLong()
args += [cx, cz]
elif hook_id == hooks.BLOCK_SPREAD:
pos = self.decoder.readIntPos()
source = self.decoder.readByte()
args += [pos, source]
elif hook_id == hooks.CHAT:
eid = self.decoder.readLong()
chat = self.decoder.readString()
args += [eid, chat]
elif hook_id == hooks.COLLECTING_PICKUP:
eid = self.decoder.readLong()
item = self.decoder.readItem()
args += [eid, item]
elif hook_id == hooks.KILLED:
eid = self.decoder.readLong()
args += [eid]
elif hook_id == hooks.PLAYER_BROKEN_BLOCK:
eid = self.decoder.readLong()
pos = self.decoder.readIntPos()
face = self.decoder.readByte()
block = self.decoder.readBlock()
args += [eid, pos, face, block]
elif hook_id == hooks.PLAYER_PLACED_BLOCK:
eid = self.decoder.readLong()
pos = self.decoder.readIntPos()
block = self.decoder.readBlock()
args += [eid, pos, block]
elif hook_id == hooks.PLAYER_USED_BLOCK:
eid = self.decoder.readLong()
pos = self.decoder.readIntPos()
face = self.decoder.readByte()
cursor = [self.decoder.readFloat() for _ in range(3)]
block = self.decoder.readBlock()
args += [eid, pos, face, cursor, block]
elif hook_id == hooks.PLAYER_USED_ITEM:
eid = self.decoder.readLong()
pos = self.decoder.readIntPos()
face = self.decoder.readByte()
cursor = [self.decoder.readFloat() for _ in range(3)]
item = self.decoder.readShort()
args += [eid, pos, face, cursor, item]
elif hook_id == hooks.POST_CRAFTING:
eid = self.decoder.readLong()
grid_h, grid_w = self.decoder.readByte(), self.decoder.readByte()
grid = [self.decoder.readItem() for _ in range(grid_h * grid_w)]
recipe_h, recipe_w = self.decoder.readByte(), self.decoder.readByte()
recipe = [self.decoder.readItem() for _ in range(recipe_h * recipe_w)]
result = self.decoder.readItem()
args += [eid, (grid_h, grid_w, grid), (recipe_h, recipe_w, recipe), result]
elif hook_id == hooks.SPAWNED_ENTITY:
eid = self.decoder.readLong()
etype = self.decoder.readByte()
pos = self.decoder.readFloatPos()
look = self.decoder.readLook()
args += [eid, etype, pos, look]
if etype == etMob:
mtype = self.decoder.readByte()
args += [mtype]
elif hook_id == hooks.SPAWNED_MONSTER:
eid = self.decoder.readLong()
etype = self.decoder.readByte()
mobtype = self.decoder.readByte()
pos = self.decoder.readFloatPos()
look = self.decoder.readLook()
args += [eid, etype, mobtype, pos, look]
elif hook_id == hooks.TAKE_DAMAGE:
eid = self.decoder.readLong()
dmgType = self.decoder.readByte()
finalDmg = self.decoder.readDouble()
rawDmg = self.decoder.readDouble()
knockback = self.decoder.readFloatPos()
args += [eid, dmgType, finalDmg, rawDmg, knockback]
if dmgType == dtAttack:
attackerId = self.decoder.readLong()
args += [attackerId]
elif hook_id == hooks.WEATHER_CHANGED:
weather = self.decoder.readByte()
args += [weather]
elif hook_id == hooks.MONSTER_MOVED:
eid = self.decoder.readLong()
# patch for broken v0.2, where MONSTER_MOVED and PLAYER_LOOK have
# the same hook_id
if eid in self.player_eids:
hook_id = hooks.PLAYER_LOOK
look = self.decoder.readLook()
args += [eid, look]
else:
pos = self.decoder.readFloatPos()
look = self.decoder.readLook()
args += [eid, pos, look]
elif hook_id == hooks.PLAYER_LOOK:
eid = self.decoder.readLong()
look = self.decoder.readLook()
args += [eid, look]
else:
print("Debug:", args)
raise NotImplementedError("Not implemented: hook id {}".format(hook_id))
# Call subclass handler method
# e.g. for PLAYER_SPAWNED, call self.on_player_spawned
func_name = "on_" + util.get_hook_name(hook_id).lower()
func = getattr(self, func_name, lambda *args: None)
func(*args)
| craftassist-master | python/logging_plugin/base_log_reader.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import hashlib
import hooks
HOOK_MAP = {getattr(hooks, h): h for h in dir(hooks) if not h.startswith("__")}
def get_hook_name(hook_id):
return HOOK_MAP[hook_id]
def get_hashes(path):
with open(path, "rb") as f:
contents = f.read()
raw_hash = hashlib.sha1(contents).hexdigest()
# Cuberite sometimes (?) rewrites .ini files with CRLF
nocr_hash = hashlib.sha1(contents.replace(b"\x0d\x0a", b"\x0a")).hexdigest()
return (raw_hash, nocr_hash)
| craftassist-master | python/logging_plugin/util.py |
import os.path
import sys
sys.path.insert(0, os.path.dirname(__file__))
| craftassist-master | python/logging_plugin/__init__.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
from base_log_reader import BaseLogReader
import hooks
import util
class PrintLogReader(BaseLogReader):
def __init__(self, *args, ignore_hooks=[], only_hooks=[], **kwargs):
super().__init__(*args, **kwargs)
assert (
len(only_hooks) == 0 or len(ignore_hooks) == 0
), "Can't specify both only_hooks and ignore_hooks"
for hid, hook_name in util.HOOK_MAP.items():
if (len(ignore_hooks) > 0 and hid in ignore_hooks) or (
len(only_hooks) > 0 and hid not in only_hooks
):
continue
func_name = "on_" + hook_name.lower()
func = lambda *x, name=hook_name: print(*x[:2], name, *x[2:])
setattr(self, func_name, func)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("logdir", help="Cuberite workdir; should contain settings.ini")
parser.add_argument("--only", nargs="+", default=[])
parser.add_argument("--ignore", nargs="+", default=[])
args = parser.parse_args()
only_hooks = [getattr(hooks, h.upper()) for h in args.only]
ignore_hooks = [getattr(hooks, h.upper()) for h in args.ignore]
PrintLogReader(args.logdir, only_hooks=only_hooks, ignore_hooks=ignore_hooks).start()
| craftassist-master | python/logging_plugin/print_log_reader.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
from matplotlib import pyplot as plt
import glob
import matplotlib.animation as animation
import numpy as np
import os.path
import time
FFMpegWriter = animation.writers["ffmpeg"]
def render_video(ob_dir, outfile, dpi=100, max_depth=48):
writer = FFMpegWriter(fps=20)
fig = plt.figure()
with writer.saving(fig, outfile, dpi):
t_start = time.time()
i = 0
while True:
fig.clear()
blockfile = os.path.join(ob_dir, "block.{:08}.bin".format(i))
if not os.path.isfile(blockfile):
return
block = np.fromfile(blockfile, np.uint8).reshape(128, 128)
plt.subplot(1, 2, 1)
plt.imshow(block, cmap="prism", animated=True)
depthfile = os.path.join(ob_dir, "depth.{:08}.bin".format(i))
depth = np.fromfile(depthfile, np.float32).reshape(128, 128)
depth[depth > max_depth] = max_depth
plt.subplot(1, 2, 2)
plt.imshow(depth, cmap="Blues_r", animated=True)
plt.title("tick={}".format(i))
writer.grab_frame()
i += 1
avg_fps = i / (time.time() - t_start)
print("Wrote tick={}, avg_fps={}".format(i, avg_fps))
if __name__ == "__main__":
import argparse
import tempfile
import subprocess
from recover_initial_blockmap import recover_initial_blockmap
from repo import repo_home
parser = argparse.ArgumentParser()
parser.add_argument("--logdir", required=True, help="Directory containing logging.bin")
parser.add_argument("--outfile", required=True, help="Path to video file to create")
parser.add_argument(
"--player-name", required=True, help='Name of player whose eyes to "see" through'
)
args = parser.parse_args()
# Using the seed/config, recover the block map as it was at the start of
# the world, including all chunks ever loaded by any player
initial_blockmap_dir = recover_initial_blockmap(args.logdir)
# Step through each tick, rendering the player's observations to a tempdir
ob_dir = tempfile.mkdtemp()
print("Writing observations to:", ob_dir)
subprocess.check_call(
[
os.path.join(repo_home, "bin/log_render"),
"--out-dir",
ob_dir,
"--log-file",
os.path.join(args.logdir, "logging.bin"),
"--name",
args.player_name,
"--mca-files",
*glob.glob(os.path.join(initial_blockmap_dir, "*.mca")),
]
)
print("Wrote observations to:", ob_dir)
# Render the video from the raw observations
render_video(ob_dir, args.outfile)
print("Wrote video to:", args.outfile)
| craftassist-master | python/logging_plugin/video.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import math
from base_log_reader import BaseLogReader
def to_discrete(pos):
return tuple(math.floor(p) for p in pos)
class PrintLogReader(BaseLogReader):
def on_player_spawned(self, tick, buf_start, eid, name, pos, look):
print("[{}] Player {} spawned at {}".format(tick, (eid, name), to_discrete(pos)))
def on_player_destroyed(self, tick, buf_start, eid):
print("[{}] Player {} destroyed".format(tick, eid))
def on_player_moving(self, tick, buf_start, eid, oldpos, newpos):
oldpos = to_discrete(oldpos)
newpos = to_discrete(newpos)
if oldpos == newpos:
return
dx, dy, dz = tuple(n - o for n, o in zip(newpos, oldpos))
assert all(p in (-1, 0, 1) for p in (dx, dy, dz)), "Bad (dx, dy, dz) == {}".format(
(dx, dy, dz)
)
if dx == -1:
print("[{}] Player {} STEP_NEG_X".format(tick, eid))
elif dx == 1:
print("[{}] Player {} STEP_POS_X".format(tick, eid))
if dy == -1:
print("[{}] Player {} STEP_NEG_Y".format(tick, eid))
elif dy == 1:
print("[{}] Player {} STEP_POS_Y".format(tick, eid))
if dz == -1:
print("[{}] Player {} STEP_NEG_Z".format(tick, eid))
elif dz == 1:
print("[{}] Player {} STEP_POS_Z".format(tick, eid))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("logdir", help="Cuberite workdir; should contain settings.ini")
args = parser.parse_args()
PrintLogReader(args.logdir).start()
| craftassist-master | python/logging_plugin/discrete_move_log_reader.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import os
import shutil
import subprocess
import tempfile
import sys
python_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, python_dir)
import edit_cuberite_config
from repo import repo_home
from base_log_reader import BaseLogReader
PLUGIN_NAME = "recover_initial"
def recover_initial_blockmap(old_workdir):
"""Given a logdir containing a logging.bin, regenerate the initial blockmap
and return the directory with the region (.mca) files.
"""
workdir = tempfile.mkdtemp()
print("Workdir:", workdir, flush=True)
# Copy files from old workdir
paths = ["Plugins", "settings.ini", "blocks.json", "world/world.ini"]
for p in paths:
src = os.path.join(old_workdir, p)
dst = os.path.join(workdir, p)
if os.path.isfile(src):
os.makedirs(os.path.dirname(dst), exist_ok=True)
shutil.copy(src, dst)
elif os.path.isdir(src):
shutil.copytree(src, dst)
# Remove logging plugin, add recovery plugin
settings_ini = os.path.join(workdir, "settings.ini")
plugins_dir = os.path.join(workdir, "Plugins")
recovery_plugin_dir = os.path.join(plugins_dir, PLUGIN_NAME)
edit_cuberite_config.remove_plugin(settings_ini, "logging")
edit_cuberite_config.add_plugin(settings_ini, PLUGIN_NAME)
if not os.path.isdir(recovery_plugin_dir):
shutil.copytree(
os.path.join(repo_home, "cuberite_plugins", PLUGIN_NAME), recovery_plugin_dir
)
# Read logging.bin to get chunks available, and rewrite recovery plugin
chunks = get_chunks_avail(old_workdir)
chunks_lua = tuple_list_to_lua(chunks)
with open(os.path.join(recovery_plugin_dir, "recover_initial.lua"), "r") as f:
recovery_lua = f.read()
recovery_lua = recovery_lua.replace("__CHUNKS_TO_LOAD__", chunks_lua)
with open(os.path.join(recovery_plugin_dir, "recover_initial.lua"), "w") as f:
f.write(recovery_lua)
# Start cuberite and wait until the plugin kills it
p = subprocess.Popen([repo_home + "/cuberite/Server/Cuberite"], cwd=workdir)
p.wait()
# Return folder containing region files
return os.path.join(workdir, "world/region")
def get_chunks_avail(logdir):
chunks = []
class ChunkAvailLogReader(BaseLogReader):
def on_chunk_available(self, buf_start, hid, cx, cz):
chunks.append((cx, cz))
ChunkAvailLogReader(logdir).start()
return chunks
def tuple_list_to_lua(tuple_list):
"""Given a list of tuples, return a lua table of tables"""
def table(it):
return "{" + ",".join(map(str, it)) + "}"
return table(table(t) for t in tuple_list)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("workdir")
args = parser.parse_args()
recover_initial_blockmap(args.workdir)
| craftassist-master | python/logging_plugin/recover_initial_blockmap.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import argparse
import logging
import os
import subprocess
import numpy as np
import sys
python_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, python_dir)
from cuberite_process import CuberiteProcess
from repo import repo_home
logging.basicConfig(format="%(asctime)s [%(levelname)s]: %(message)s")
logging.getLogger().setLevel(logging.DEBUG)
def to_unit_vec(yaw, pitch):
pitch *= 3.14159 / 180
yaw *= 3.14159 / 180
return np.array(
[-1 * np.cos(pitch) * np.sin(yaw), -1 * np.sin(pitch), np.cos(pitch) * np.cos(yaw)]
)
def ground_height(blocks):
dirt_pct = np.mean(np.mean(blocks[:, :, :, 0] == 2, axis=1), axis=1)
if (dirt_pct > 0.25).any():
return np.argmax(dirt_pct)
return None
def render(npy_p2b, out_dir, port, spp, img_size):
npy_file = (
os.path.expanduser("~")
+ "/minecraft_houses/"
+ ".".join(npy_p2b.split(".")[1:-2])
+ "/schematic.npy"
)
schematic = np.load(npy_file)
house_name = os.path.basename(os.path.dirname(npy_file))
# remove blocks below ground-level
g = ground_height(schematic)
schematic = schematic[(g or 0) :, :, :, :]
ys, zs, xs = np.nonzero(schematic[:, :, :, 0] > 0)
xmid, ymid, zmid = np.mean(xs), np.mean(ys), np.mean(zs)
focus = np.array([xmid, ymid + 63, zmid]) # TODO: +63 only works for flat_world seed=0w
yaw, distance = list(map(int, npy_p2b.split(".")[-2].split("_")))
look = [yaw, 0]
look_xyz = to_unit_vec(*look)
camera = focus - (look_xyz * distance)
logging.info("Launching cuberite at port {}".format(port))
p = CuberiteProcess(
"flat_world", seed=0, game_mode="creative", place_blocks_yzx=schematic, port=port
)
logging.info("Destroying cuberite at port {}".format(port))
p.destroy()
world_dir = os.path.join(p.workdir, "world")
render_view_bin = os.path.join(repo_home, "bin/render_view")
assert os.path.isfile(
render_view_bin
), "{} not found.\n\nTry running: make render_view".format(render_view_bin)
procs = []
chunky_id = "{}_{}".format(yaw, distance)
out_file = "{}/chunky.{}.{}.png".format(out_dir, house_name, chunky_id)
call = [
str(a)
for a in [
"python3",
"{}/python/minecraft_render/render.py".format(repo_home),
"--world",
world_dir,
"--out",
out_file,
"--camera",
*camera,
"--look",
yaw,
0,
"--size",
*img_size,
"--spp",
spp,
]
]
logging.info("CALL: " + " ".join(call))
procs.append(subprocess.Popen(call))
for p in procs:
p.wait()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("npy_p2b")
parser.add_argument(
"--out-dir", "-o", required=True, help="Directory in which to write vision files"
)
parser.add_argument("--spp", type=int, default=25, help="samples per pixel")
parser.add_argument("--port", type=int, default=25565)
parser.add_argument("--size", type=int, nargs=2, default=[900, 675])
args = parser.parse_args()
render(args.npy_p2b, args.out_dir, args.port, args.spp, args.size)
| craftassist-master | python/render_vision_dataset/render_3x.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import argparse
import glob
import logging
import os
import subprocess
import random
import numpy as np
import sys
python_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, python_dir)
from cuberite_process import CuberiteProcess
from repo import repo_home
logging.basicConfig(format="%(asctime)s [%(levelname)s]: %(message)s")
logging.getLogger().setLevel(logging.DEBUG)
def to_unit_vec(yaw, pitch):
pitch *= 3.14159 / 180
yaw *= 3.14159 / 180
return np.array(
[-1 * np.cos(pitch) * np.sin(yaw), -1 * np.sin(pitch), np.cos(pitch) * np.cos(yaw)]
)
def ground_height(blocks):
dirt_pct = np.mean(np.mean(blocks[:, :, :, 0] == 2, axis=1), axis=1)
if (dirt_pct > 0.25).any():
return np.argmax(dirt_pct)
return None
def ray_intersect_triangle(p0, p1, triangle):
## Code taken from:
# https://www.erikrotteveel.com/python/three-dimensional-ray-tracing-in-python/
#
# Tests if a ray starting at point p0, in the direction
# p1 - p0, will intersect with the triangle.
#
# arguments:
# p0, p1: numpy.ndarray, both with shape (3,) for x, y, z.
# triangle: numpy.ndarray, shaped (3,3), with each row
# representing a vertex and three columns for x, y, z.
#
# returns:
# 0 if ray does not intersect triangle,
# 1 if it will intersect the triangle,
# 2 if starting point lies in the triangle.
v0, v1, v2 = triangle
u = v1 - v0
v = v2 - v0
normal = np.cross(u, v)
b = np.inner(normal, p1 - p0)
a = np.inner(normal, v0 - p0)
# Here is the main difference with the code in the link.
# Instead of returning if the ray is in the plane of the
# triangle, we set rI, the parameter at which the ray
# intersects the plane of the triangle, to zero so that
# we can later check if the starting point of the ray
# lies on the triangle. This is important for checking
# if a point is inside a polygon or not.
if b == 0.0:
# ray is parallel to the plane
if a != 0.0:
# ray is outside but parallel to the plane
return 0
else:
# ray is parallel and lies in the plane
rI = 0.0
else:
rI = a / b
if rI < 0.0:
return 0
w = p0 + rI * (p1 - p0) - v0
denom = np.inner(u, v) * np.inner(u, v) - np.inner(u, u) * np.inner(v, v)
si = (np.inner(u, v) * np.inner(w, v) - np.inner(v, v) * np.inner(w, u)) / denom
if (si < 0.0) | (si > 1.0):
return 0
ti = (np.inner(u, v) * np.inner(w, u) - np.inner(u, u) * np.inner(w, v)) / denom
if (ti < 0.0) | (si + ti > 1.0):
return 0
if rI == 0.0:
# point 0 lies ON the triangle. If checking for
# point inside polygon, return 2 so that the loop
# over triangles can stop, because it is on the
# polygon, thus inside.
return 2
return 1
def intersect_cube(xyz, camera, focus):
"""
Test if ray 'focus - camera' intersects with the cube
'(x, y, z) - (x + 1, y + 1, z + 1)'
To do this, we check if at least one triangle intersects with
the ray
"""
x, y, z = xyz
triangles = [
[[x, y, z], [x + 1, y, z], [x + 1, y + 1, z]],
[[x, y, z], [x, y + 1, z], [x + 1, y + 1, z]],
[[x, y, z + 1], [x + 1, y, z + 1], [x + 1, y + 1, z + 1]],
[[x, y, z + 1], [x, y + 1, z + 1], [x + 1, y + 1, z + 1]],
[[x, y, z], [x + 1, y, z], [x + 1, y, z + 1]],
[[x, y, z], [x, y, z + 1], [x + 1, y, z + 1]],
[[x, y + 1, z], [x + 1, y + 1, z], [x + 1, y + 1, z + 1]],
[[x, y + 1, z], [x, y + 1, z + 1], [x + 1, y + 1, z + 1]],
[[x, y, z], [x, y + 1, z], [x, y + 1, z + 1]],
[[x, y, z], [x, y, z + 1], [x, y + 1, z + 1]],
[[x + 1, y, z], [x + 1, y + 1, z], [x + 1, y + 1, z + 1]],
[[x + 1, y, z], [x + 1, y, z + 1], [x + 1, y + 1, z + 1]],
]
for t in triangles:
if (
ray_intersect_triangle(
np.array(camera).astype("float32"),
np.array(focus).astype("float32"),
np.array(t).astype("float32"),
)
== 1
):
return True
return False
def change_one_block(schematic, yaw):
## remove 'air' blocks whose ids are 0s
ymax, zmax, xmax, _ = schematic.shape
ys, zs, xs, _ = np.nonzero(schematic[:, :, :, :1] > 0)
xyzs = list(zip(*[xs, ys, zs]))
print("xmax={} ymax={} zmax={}".format(xmax, ymax, zmax))
max_dist = int((xmax ** 2 + zmax ** 2) ** 0.5 / 2)
distance_range = (5, max(5, max_dist) + 1)
min_camera_height = 1 ## the camera shouldn't be underground
pitch_range = (-60, 10)
if not xyzs:
print("all blocks are air!")
return None, None, None
while True:
focus = random.choice(xyzs) ## randomly select a block as the focus
pitch = random.randint(*pitch_range)
distance = random.randint(*distance_range)
look_xyz = to_unit_vec(*[yaw, pitch])
camera = focus - (look_xyz * distance)
if camera[1] <= min_camera_height:
continue
intersected = [
(np.linalg.norm(np.array(xyz) - camera), xyz)
for xyz in xyzs
if intersect_cube(xyz, camera, focus)
]
## sort the blocks according to their distances to the camera
## pick the nearest block that intersects with the ray starting at 'camera'
## and looking at 'focus'
intersected = sorted(intersected, key=lambda p: p[0])
if len(intersected) > 0 and intersected[0][0] >= distance_range[0]:
## the nearest block should have a distance > 10
break
x, y, z = intersected[0][1]
## change a non-zero block to red wool (id: 35, meta: 14)
schematic[y][z][x] = [35, 14]
return pitch, camera, [x, y, z]
def render(
npy_file,
out_dir,
no_chunky,
no_vision,
port,
yaw,
pitch,
camera,
pos,
spp,
img_size,
block_change,
):
schematic = np.load(npy_file)
house_name = os.path.basename(os.path.dirname(npy_file))
# remove blocks below ground-level
g = ground_height(schematic)
schematic = schematic[(g or 0) :, :, :, :]
if yaw is None:
yaw = random.randint(0, 360 - 1)
if block_change:
pitch, camera, pos = change_one_block(schematic, yaw)
# TODO: +63 only works for flat_world seed=0
camera[1] += 63 ## why??
logging.info("Launching cuberite at port {}".format(port))
p = CuberiteProcess(
"flat_world", seed=0, game_mode="creative", place_blocks_yzx=schematic, port=port
)
logging.info("Destroying cuberite at port {}".format(port))
p.destroy()
world_dir = os.path.join(p.workdir, "world")
region_dir = os.path.join(world_dir, "region")
mca_files = glob.glob(os.path.join(region_dir, "*.mca"))
assert len(mca_files) > 0, "No region files at {}".format(region_dir)
render_view_bin = os.path.join(repo_home, "bin/render_view")
assert os.path.isfile(
render_view_bin
), "{} not found.\n\nTry running: make render_view".format(render_view_bin)
procs = []
if not no_vision:
call = [
str(a)
for a in [
render_view_bin,
"--out-dir",
out_dir,
"--mca-files",
*mca_files,
"--camera",
*camera,
"--look",
yaw,
pitch,
]
]
logging.info("CALL: " + " ".join(call))
procs.append(subprocess.Popen(call))
if not no_chunky:
chunky_id = "_".join(map(str, list(map(int, camera)) + [pitch, yaw] + pos))
call = [
str(a)
for a in [
"python3",
"{}/python/minecraft_render/render.py".format(repo_home),
"--world",
world_dir,
"--out",
"{}/chunky.{}.{}.{}.png".format(out_dir, house_name, chunky_id, int(block_change)),
"--camera",
*camera,
"--look",
yaw,
pitch,
"--size",
*img_size,
"--spp",
spp,
]
]
logging.info("CALL: " + " ".join(call))
procs.append(subprocess.Popen(call))
for p in procs:
p.wait()
return yaw, pitch, camera, pos
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("npy_schematic")
parser.add_argument(
"--out-dir", "-o", required=True, help="Directory in which to write vision files"
)
parser.add_argument(
"--no-chunky", action="store_true", help="Skip generation of chunky (human-view) images"
)
parser.add_argument("--no-vision", action="store_true", help="Skip generation of agent vision")
parser.add_argument("--spp", type=int, default=25, help="samples per pixel")
parser.add_argument("--port", type=int, default=25565)
parser.add_argument("--size", type=int, nargs=2, default=[300, 225])
args = parser.parse_args()
yaw, pitch, camera, pos = render(
args.npy_schematic,
args.out_dir,
args.no_chunky,
args.no_vision,
args.port,
None,
None,
None,
None,
args.spp,
args.size,
True,
)
# yaw, pitch, camera, pos = None, None, None, None
render(
args.npy_schematic,
args.out_dir,
args.no_chunky,
args.no_vision,
args.port,
yaw,
pitch,
camera,
pos,
args.spp,
args.size,
False,
)
| craftassist-master | python/render_vision_dataset/render_one_block_change.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import argparse
import logging
import os
import subprocess
import random
import cv2
import numpy as np
import sys
python_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, python_dir)
from cuberite_process import CuberiteProcess
from repo import repo_home
logging.basicConfig(format="%(asctime)s [%(levelname)s]: %(message)s")
logging.getLogger().setLevel(logging.DEBUG)
def to_unit_vec(yaw, pitch):
pitch *= 3.14159 / 180
yaw *= 3.14159 / 180
return np.array(
[-1 * np.cos(pitch) * np.sin(yaw), -1 * np.sin(pitch), np.cos(pitch) * np.cos(yaw)]
)
def ground_height(blocks):
dirt_pct = np.mean(np.mean(blocks[:, :, :, 0] == 2, axis=1), axis=1)
if (dirt_pct > 0.25).any():
return np.argmax(dirt_pct)
return None
def change_block(schematic, b):
x, y, z = b
## change to red wool
schematic[y][z][x][0] = 35
schematic[y][z][x][1] = 14
def render(npy_p2b, out_dir, port, spp, img_size, mn=None):
npy_file = (
os.path.expanduser("~")
+ "/minecraft_houses/"
+ ".".join(npy_p2b.split(".")[1:-2])
+ "/schematic.npy"
)
schematic = np.load(npy_file)
print(schematic.shape)
house_name = os.path.basename(os.path.dirname(npy_file))
p2b = np.load(npy_p2b)
# remove blocks below ground-level
g = ground_height(schematic)
schematic = schematic[(g or 0) :, :, :, :]
ys, zs, xs = np.nonzero(schematic[:, :, :, 0] > 0)
xmid, ymid, zmid = np.mean(xs), np.mean(ys), np.mean(zs)
focus = np.array([xmid, ymid + 63, zmid]) # TODO: +63 only works for flat_world seed=0w
yaw, distance = list(map(int, npy_p2b.split(".")[-2].split("_")))
look = [yaw, 0]
look_xyz = to_unit_vec(*look)
camera = focus - (look_xyz * distance)
if mn == [0, 0]:
M, N = p2b.shape[:2]
while True:
m = random.randint(0, M - 1)
n = random.randint(0, N - 1)
if p2b[m][n][0] != -1:
break
else:
m, n = mn
print("Select pixel at {}".format((m, n)))
print("Mapped block {}".format(p2b[m][n]))
change_block(schematic, p2b[m][n])
logging.info("Launching cuberite at port {}".format(port))
p = CuberiteProcess(
"flat_world", seed=0, game_mode="creative", place_blocks_yzx=schematic, port=port
)
logging.info("Destroying cuberite at port {}".format(port))
p.destroy()
world_dir = os.path.join(p.workdir, "world")
render_view_bin = os.path.join(repo_home, "bin/render_view")
assert os.path.isfile(
render_view_bin
), "{} not found.\n\nTry running: make render_view".format(render_view_bin)
procs = []
chunky_id = "{}_{}".format(yaw, distance)
out_file = "{}/chunky_verify.{}.{}.png".format(out_dir, house_name, chunky_id)
call = [
str(a)
for a in [
"python3",
"{}/python/minecraft_render/render.py".format(repo_home),
"--world",
world_dir,
"--out",
out_file,
"--camera",
*camera,
"--look",
yaw,
0,
"--size",
*img_size,
"--spp",
spp,
]
]
logging.info("CALL: " + " ".join(call))
procs.append(subprocess.Popen(call))
for p in procs:
p.wait()
## draw the sampled pixel for a better view
img = cv2.imread(out_file)
cv2.circle(img, (n, m), 2, (255, 0, 0))
cv2.imwrite(out_file, img)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("npy_p2b")
parser.add_argument(
"--out-dir", "-o", required=True, help="Directory in which to write vision files"
)
parser.add_argument("--spp", type=int, default=25, help="samples per pixel")
parser.add_argument("--port", type=int, default=25565)
parser.add_argument("--size", type=int, nargs=2, default=[300, 225])
parser.add_argument("--mn", type=int, nargs=2, default=[0, 0])
args = parser.parse_args()
render(args.npy_p2b, args.out_dir, args.port, args.spp, args.size, args.mn)
| craftassist-master | python/render_vision_dataset/render_verify_pixel2block.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import argparse
import logging
import os
import subprocess
import random
from sklearn.neighbors import KDTree
import cv2
import pickle
import numpy as np
import sys
python_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, python_dir)
from cuberite_process import CuberiteProcess
from repo import repo_home
logging.basicConfig(format="%(asctime)s [%(levelname)s]: %(message)s")
logging.getLogger().setLevel(logging.DEBUG)
def rgb2hsv(r, g, b):
r, g, b = r / 255.0, g / 255.0, b / 255.0
mx = max(r, g, b)
mn = min(r, g, b)
df = mx - mn
if mx == mn:
h = 0
elif mx == r:
h = (60 * ((g - b) / df) + 360) % 360
elif mx == g:
h = (60 * ((b - r) / df) + 120) % 360
elif mx == b:
h = (60 * ((r - g) / df) + 240) % 360
if mx == 0:
s = 0
else:
s = df / mx
v = mx
return h, s, v
with open(os.path.expanduser("~") + "/minecraft/minecraft_specs/block_images/rgbs.pkl", "rb") as f:
block_colors = pickle.load(f)
wool_blocks = [(35, 1), (35, 2), (35, 4), (35, 5), (35, 11)]
metaid_to_hue = {b[1]: rgb2hsv(*(block_colors[b]))[0] for b in wool_blocks}
def to_unit_vec(yaw, pitch):
pitch *= 3.14159 / 180
yaw *= 3.14159 / 180
return np.array(
[-1 * np.cos(pitch) * np.sin(yaw), -1 * np.sin(pitch), np.cos(pitch) * np.cos(yaw)]
)
def ground_height(blocks):
dirt_pct = np.mean(np.mean(blocks[:, :, :, 0] == 2, axis=1), axis=1)
if (dirt_pct > 0.25).any():
return np.argmax(dirt_pct)
return None
def randomly_change_blocks(schematic):
new_schematic = np.copy(schematic)
ymax, zmax, xmax, _ = new_schematic.shape
for y in range(ymax):
for z in range(zmax):
for x in range(xmax):
if new_schematic[y][z][x][0] > 0:
## change all non-air blocks to a random wool block
new_schematic[y][z][x][0] = 35
new_schematic[y][z][x][1] = random.choice(list(metaid_to_hue.keys()))
return new_schematic
def add_new_schematic_hue(schematic_hue, new_schematic, i):
ymax, zmax, xmax = new_schematic.shape
new_schematic_hue = np.zeros((ymax, zmax, xmax), dtype=np.int32)
for y in range(ymax):
for z in range(zmax):
for x in range(xmax):
if new_schematic[y][z][x] in metaid_to_hue:
new_schematic_hue[y][z][x] = metaid_to_hue[new_schematic[y][z][x]]
else:
new_schematic_hue[y][z][x] = random.randint(-20000, -10000)
schematic_hue[:, :, :, i] = new_schematic_hue
def render(npy_file, out_dir, port, spp, img_size):
if "p2b" in npy_file: ## we're going to re-compute the correspondence
npy_file = os.path.basename(npy_file)
tokens = npy_file.split(".")
yaw, distance = list(map(int, tokens[-2].split("_")))
npy_file = (
os.path.expanduser("~")
+ "/minecraft_houses/"
+ ".".join(tokens[1:-2])
+ "/schematic.npy"
)
else:
yaw, distance = None, None
schematic = np.load(npy_file)
house_name = os.path.basename(os.path.dirname(npy_file))
# remove blocks below ground-level
g = ground_height(schematic)
schematic = schematic[(g or 0) :, :, :, :]
ys, zs, xs = np.nonzero(schematic[:, :, :, 0] > 0)
if len(ys) < 5:
print("too few non-air blocks; will not render")
return
xmid, ymid, zmid = np.mean(xs), np.mean(ys), np.mean(zs)
focus = np.array([xmid, ymid + 63, zmid]) # TODO: +63 only works for flat_world seed=0w
if yaw is None:
yaw = random.randint(0, 360 - 1)
sorted_xs = sorted(xs)
sorted_zs = sorted(zs)
N = len(xs)
## remove head and tail 2%
X = sorted_xs[-N // 100] - sorted_xs[N // 100]
Z = sorted_zs[-N // 100] - sorted_zs[N // 100]
distance = max(X, Z)
look = [yaw, 0]
look_xyz = to_unit_vec(*look)
camera = focus - (look_xyz * distance)
repeat = 10
schematic_hue = np.zeros(schematic.shape[:3] + (repeat - 1,), dtype=np.int32)
tmp_images = []
for i in range(repeat):
if i < repeat - 1:
new_schematic = randomly_change_blocks(schematic)
add_new_schematic_hue(schematic_hue, new_schematic[:, :, :, 1], i)
else:
break # do not render the full image again
new_schematic = schematic
img_size = [s * 3 for s in img_size]
logging.info("Launching cuberite at port {}".format(port))
p = CuberiteProcess(
"flat_world", seed=0, game_mode="creative", place_blocks_yzx=new_schematic, port=port
)
logging.info("Destroying cuberite at port {}".format(port))
p.destroy()
world_dir = os.path.join(p.workdir, "world")
render_view_bin = os.path.join(repo_home, "bin/render_view")
assert os.path.isfile(
render_view_bin
), "{} not found.\n\nTry running: make render_view".format(render_view_bin)
procs = []
chunky_id = "{}_{}".format(yaw, distance)
out_file = "{}/chunky.{}.{}.png".format(out_dir, house_name, chunky_id)
if i < repeat - 1: # tmp out file
out_file += ".tmp.png"
call = [
str(a)
for a in [
"python3",
"{}/python/minecraft_render/render.py".format(repo_home),
"--world",
world_dir,
"--out",
out_file,
"--camera",
*camera,
"--look",
yaw,
0,
"--size",
*img_size,
"--spp",
spp,
]
]
logging.info("CALL: " + " ".join(call))
procs.append(subprocess.Popen(call))
for p in procs:
p.wait()
if i < repeat - 1:
tmp_images.append(cv2.imread(out_file))
os.system("rm -f " + out_file) ## delete the tmp image
## now we need to compute the pixel-to-block correspondence
p2b = pixel2block(tmp_images, schematic_hue)
## write the correspondence to disk
## x-y-z is after applying ground_height
p2b_file = "{}/p2b.{}.{}.npy".format(out_dir, house_name, chunky_id)
np.save(p2b_file, p2b)
def pixel2block(random_images, schematic_hue):
"""
This function returns a numpy array (M,N,3) that indicates which pixel corresponds to
which block.
If a pixel has [-1, -1, -1], then it means this pixel does not map to any block
"""
for i in range(len(random_images)):
random_images[i] = cv2.cvtColor(random_images[i], cv2.COLOR_BGR2HSV)
## init the ret to all -1s
ret = np.ones(random_images[0].shape[:2] + (3,), dtype=np.int32) * -1
ymax, zmax, xmax, _ = schematic_hue.shape
schematic_hue = np.reshape(schematic_hue, (-1, schematic_hue.shape[-1]))
kdt = KDTree(schematic_hue, leaf_size=2)
hue_vecs = []
for m in range(ret.shape[0]):
for n in range(ret.shape[1]):
## the original range is [0,179]
hue_vecs.append([img[m, n][0] * 2 for img in random_images])
hue_vecs = np.reshape(np.array(hue_vecs), (-1, len(random_images)))
query = kdt.query(hue_vecs, k=1, return_distance=False)
assert len(query) == ret.shape[0] * ret.shape[1]
for i in range(len(query)):
m = i // ret.shape[1]
n = i % ret.shape[1]
y = query[i][0] // (zmax * xmax)
z = (query[i][0] % (zmax * xmax)) // xmax
x = (query[i][0] % (zmax * xmax)) % xmax
ret[m][n] = [x, y, z]
return ret
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("npy")
parser.add_argument(
"--out-dir", "-o", required=True, help="Directory in which to write vision files"
)
parser.add_argument("--spp", type=int, default=25, help="samples per pixel")
parser.add_argument("--port", type=int, default=25565)
parser.add_argument("--size", type=int, nargs=2, default=[300, 225])
args = parser.parse_args()
render(args.npy, args.out_dir, args.port, args.spp, args.size)
| craftassist-master | python/render_vision_dataset/render_schematic_with_pixel2block-color.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import argparse
import glob
import logging
import os
import subprocess
import random
import struct
import numpy as np
import sys
python_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, python_dir)
from cuberite_process import CuberiteProcess
from repo import repo_home
logging.basicConfig(format="%(asctime)s [%(levelname)s]: %(message)s")
logging.getLogger().setLevel(logging.DEBUG)
y_offset = 63
def to_unit_vec(yaw, pitch):
pitch *= 3.14159 / 180
yaw *= 3.14159 / 180
return np.array(
[-1 * np.cos(pitch) * np.sin(yaw), -1 * np.sin(pitch), np.cos(pitch) * np.cos(yaw)]
)
def ground_height(blocks):
dirt_pct = np.mean(np.mean(blocks[:, :, :, 0] == 2, axis=1), axis=1)
if (dirt_pct > 0.25).any():
return np.argmax(dirt_pct)
return None
def render(npy_file, out_dir, port, spp, img_size):
no_chunky = "p2b" in npy_file
if no_chunky: ## we're going to re-compute the correspondence
npy_file = os.path.basename(npy_file)
tokens = npy_file.split(".")
yaw, distance = list(map(int, tokens[-2].split("_")))
npy_file = (
os.path.expanduser("~")
+ "/minecraft_houses/"
+ ".".join(tokens[1:-2])
+ "/schematic.npy"
)
else:
yaw, distance = None, None
schematic = np.load(npy_file)
house_name = os.path.basename(os.path.dirname(npy_file))
# remove blocks below ground-level
g = ground_height(schematic)
schematic = schematic[(g or 0) :, :, :, :]
Y, Z, X, _ = schematic.shape
ys, zs, xs = np.nonzero(schematic[:, :, :, 0] > 0)
if len(ys) < 5:
print("too few non-air blocks; will not render")
return
xmid, ymid, zmid = np.mean(xs), np.mean(ys), np.mean(zs)
focus = np.array([xmid, ymid + y_offset, zmid]) # TODO: +63 only works for flat_world seed=0w
if yaw is None:
yaw = random.randint(0, 360 - 1)
sorted_xs = sorted(xs)
sorted_zs = sorted(zs)
N = len(xs)
## remove head and tail 2%
X = sorted_xs[-N // 100] - sorted_xs[N // 100]
Z = sorted_zs[-N // 100] - sorted_zs[N // 100]
distance = max(X, Z)
look = [yaw, 0]
look_xyz = to_unit_vec(*look)
camera = focus - (look_xyz * distance)
logging.info("Launching cuberite at port {}".format(port))
p = CuberiteProcess(
"flat_world", seed=0, game_mode="creative", place_blocks_yzx=schematic, port=port
)
logging.info("Destroying cuberite at port {}".format(port))
p.destroy()
world_dir = os.path.join(p.workdir, "world")
region_dir = os.path.join(world_dir, "region")
mca_files = glob.glob(os.path.join(region_dir, "*.mca"))
assert len(mca_files) > 0, "No region files at {}".format(region_dir)
render_view_bin = os.path.join(repo_home, "bin/render_view")
assert os.path.isfile(
render_view_bin
), "{} not found.\n\nTry running: make render_view".format(render_view_bin)
procs = []
chunky_id = "{}_{}".format(yaw, distance)
out_file = "{}/chunky.{}.{}.png".format(out_dir, house_name, chunky_id)
out_bin_prefix = "{}/{}.{}".format(out_dir, house_name, chunky_id)
call = [
str(a)
for a in [
render_view_bin,
"--out-prefix",
out_bin_prefix,
"--mca-files",
*mca_files,
"--camera",
*camera,
"--sizes",
*img_size,
"--look",
yaw,
0,
"--block",
0,
"--depth",
0,
"--blockpos",
1,
]
]
logging.info("CALL: " + " ".join(call))
procs.append(subprocess.Popen(call))
if not no_chunky:
## when re-computing the
call = [
str(a)
for a in [
"python3",
"{}/python/minecraft_render/render.py".format(repo_home),
"--world",
world_dir,
"--out",
out_file,
"--camera",
*camera,
"--look",
yaw,
0,
"--size",
*img_size,
"--spp",
spp,
]
]
logging.info("CALL: " + " ".join(call))
procs.append(subprocess.Popen(call))
for p in procs:
p.wait()
## read the output blockpos bin and convert it to a npy file
p2b = read_pixel2block(out_bin_prefix + ".blockpos.bin", X, Y, Z, img_size[0], img_size[1])
os.system("rm -f {}".format(out_bin_prefix + ".blockpos.bin"))
## write the correspondence to disk
## x-y-z is after applying ground_height
p2b_file = "{}/p2b.{}.{}.npy".format(out_dir, house_name, chunky_id)
np.save(p2b_file, p2b)
def read_pixel2block(blockpos_bin, X, Y, Z, width, height):
with open(blockpos_bin, "rb") as f:
content = f.read()
xyz = struct.unpack(width * height * 3 * "i", content)
xyz = np.array(xyz, dtype=np.int32)
p2b = xyz.reshape(height, width, 3)
for h in range(height):
for w in range(width):
x, y, z = p2b[h][w]
y -= y_offset
## check if the block is not on the house
if x < 0 or x >= X or y < 0 or y >= Y or z < 0 or z >= Z:
p2b[h][w] = [-1, -1, -1]
else:
p2b[h][w] = [x, y, z]
return p2b
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("npy")
parser.add_argument(
"--out-dir", "-o", required=True, help="Directory in which to write vision files"
)
parser.add_argument("--spp", type=int, default=25, help="samples per pixel")
parser.add_argument("--port", type=int, default=25565)
parser.add_argument("--size", type=int, nargs=2, default=[300, 225])
args = parser.parse_args()
render(args.npy, args.out_dir, args.port, args.spp, args.size)
| craftassist-master | python/render_vision_dataset/render_schematic_with_pixel2block.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
#!/usr/bin/python
import os
import sys
if __name__ == "__main__":
npy_files = sys.argv[1]
port = int(sys.argv[2])
home = os.path.expanduser("~")
## for each house, we render four different angles
with open(npy_files, "r") as f:
lines = f.read().splitlines()
for l in lines:
os.system(
"python render_schematic_with_pixel2block.py %s --out-dir=%s/minecraft/python/stack_agent_this/vision_training/render_results1/new_npys --port=%d"
% (l, home, port + 25565)
)
## clean up the bin files
# os.system("rm -f %s/render_results/*bin" % home)
| craftassist-master | python/render_vision_dataset/render_script.py |
import os
import threading
from flask import Flask
import socketio
from flask_cors import cross_origin, CORS
import mcevent
app = None
def _dashboard_thread(web_root, ip, port):
global app
root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))
static_folder = os.path.join(root_dir, web_root, "build")
print("static_folder:", static_folder)
app = Flask(__name__, static_folder=static_folder, static_url_path="")
sio = socketio.Server(async_mode="threading", cors_allowed_origins="*")
app.wsgi_app = socketio.WSGIApp(sio, app.wsgi_app)
mcevent.sio = sio
CORS(app, resources={r"*": {"origins": "*"}})
@app.route("/")
@cross_origin(origin="*")
def index():
return app.send_static_file("index.html")
if os.getenv("MCDASHBOARD_PORT"):
port = os.getenv("MCDASHBOARD_PORT")
print("setting MC dashboard port from env variable MCDASHBOARD_PORT={}".format(port))
if os.getenv("MCDASHBOARD_IP"):
ip = os.getenv("MCDASHBOARD_IP")
print("setting MC dashboard ip from env variable MCDASHBOARD_IP={}".format(ip))
app.run(ip, threaded=True, port=port, debug=False)
def start(web_root="tools/dashboard_web", ip="0.0.0.0", port=8000):
t = threading.Thread(target=_dashboard_thread, args=(web_root, ip, port))
t.start() | craftassist-master | python/dashboard/__init__.py |
import threading
import weakref
def _make_id(target):
if hasattr(target, "__func__"):
return (id(target.__self__), id(target.__func__))
return id(target)
NONE_ID = _make_id(None)
# A marker for caching
NO_RECEIVERS = object()
class Signal:
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None, use_caching=False):
"""
Create a new signal.
"""
self.receivers = []
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
a Python object, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, "__self__") and hasattr(receiver, "__func__"):
ref = weakref.WeakMethod
receiver_object = receiver.__self__
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
if not any(r_key == lookup_key for r_key, _ in self.receivers):
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be removed from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop. So it's possible that all receivers
won't be called if an error is raised.
Arguments:
sender
The sender of the signal. Either a specific object or None.
named
Named arguments which will be passed to receivers.
Return a list of tuple pairs [(receiver, response), ... ].
"""
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return []
return [
(receiver, receiver(signal=self, sender=sender, **named))
for receiver in self._live_receivers(sender)
]
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any Python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers.
Return a list of tuple pairs [(receiver, response), ... ].
If any receiver raises an error (specifically any subclass of
Exception), return the error instance as the result for that receiver.
"""
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return []
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
responses = []
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
self.receivers = [
r
for r in self.receivers
if not (isinstance(r[1], weakref.ReferenceType) and r[1]() is None)
]
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator
on = receiver
| craftassist-master | python/mcevent/dispatcher.py |
"""Multi-consumer multi-producer dispatching mechanism
Originally based on pydispatch (BSD) https://pypi.org/project/PyDispatcher/2.0.1/
See license.txt for original license.
Heavily modified for Django's purposes.
"""
from .dispatcher import Signal, receiver # NOQA
dispatch = Signal() # NOQA
class SocketIOMock:
mock = True
def on(self, event, **kwargs):
def _decorator(func):
return func
return _decorator
def emit(*args, **kwargs):
pass
sio = SocketIOMock() # NOQA
| craftassist-master | python/mcevent/__init__.py |
import argparse
import glob
import json
import os
import shutil
import subprocess
import uuid
# LOOK ANGLES
# -----------------
# - chunky definitions of yaw/pitch differ from minecraft's:
# -> chunky_pitch = minecraft_pitch - 90
# -> chunk_yaw = 90 - minecraft_yaw
# - chunky uses radians, minecraft uses degrees
parser = argparse.ArgumentParser()
parser.add_argument('--world', required=True, help='path to world files')
parser.add_argument('--out', '-o', required=True, help='path to write image')
parser.add_argument('--camera', type=float, nargs=3, default=[16, 70, 48])
parser.add_argument('--look', type=float, nargs=2, default=[-90, -90])
parser.add_argument('--focal-offset', type=float, default=30)
parser.add_argument('--chunk-min', type=int, default=-1)
parser.add_argument('--chunk-max', type=int, default=3)
parser.add_argument('--size', type=int, nargs=2, default=[800, 600])
parser.add_argument('--spp', type=int, default=100, help='samples per pixel')
REPO_DIR = os.path.dirname(__file__)
CHUNKY_DIR = os.path.join(REPO_DIR, 'chunky')
SCENES_DIR = os.path.join(REPO_DIR, 'chunky/scenes')
def gen_scene_json(args, name):
with open(os.path.join(REPO_DIR, 'world.json'), 'r') as f:
j = json.load(f)
j['name'] = name
j['world']['path'] = args.world
j['camera']['position']['x'] = args.camera[0]
j['camera']['position']['y'] = args.camera[1]
j['camera']['position']['z'] = args.camera[2]
j['camera']['orientation']['yaw'] = (90 - args.look[0]) * 3.14159 / 180
j['camera']['orientation']['pitch'] = (args.look[1] - 90) * 3.14159 / 180
j['camera']['focalOffset'] = args.focal_offset
j['chunkList'] = [[a, b] for a in range(args.chunk_min, args.chunk_max+1) \
for b in range(args.chunk_min, args.chunk_max+1)]
j['width'] = args.size[0]
j['height'] = args.size[1]
return json.dumps(j)
if __name__ == '__main__':
args = parser.parse_args()
name = str(uuid.uuid4())
base_call = 'java -jar -Dchunk.home={0} {0}/ChunkyLauncher.jar'.format(CHUNKY_DIR)
# Create scene
scene_json = gen_scene_json(args, name)
os.makedirs(SCENES_DIR, exist_ok=True)
scene_json_path = os.path.join(SCENES_DIR, name + '.json')
with open(scene_json_path, 'w') as f:
f.write(scene_json)
print('Wrote scene to', scene_json_path)
# Download minecraft if necessary
if not os.path.isfile(os.path.join(CHUNKY_DIR, 'resources/minecraft.jar')):
call = '{} -download-mc 1.12'.format(base_call)
subprocess.check_call(call.split(' '))
# Run chunky
call = '{} -render {}'.format(base_call, name)
subprocess.check_call(call.split(' '))
# Move output image
pngs = glob.glob(os.path.join(SCENES_DIR, '{}*.png'.format(name)))
assert len(pngs) == 1, pngs
shutil.move(pngs[0], args.out)
print('Wrote image to', args.out)
# Clean up
for f in glob.glob(os.path.join(SCENES_DIR, '*{}*'.format(name))):
os.remove(f)
| craftassist-master | python/minecraft_render/render.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
class StopCondition:
def __init__(self, agent):
self.agent = agent
def check(self) -> bool:
raise NotImplementedError("Implemented by subclass")
class NeverStopCondition(StopCondition):
def __init__(self, agent):
super().__init__(agent)
self.name = "never"
def check(self):
return False
| craftassist-master | python/base_agent/stop_condition.py |
from condition import NeverCondition
DEFAULT_THROTTLING_TICK = 16
THROTTLING_TICK_UPPER_LIMIT = 64
THROTTLING_TICK_LOWER_LIMIT = 4
# put a counter and a max_count so can't get stuck?
class Task(object):
def __init__(self):
self.memid = None
self.interrupted = False
self.finished = False
self.name = None
self.undone = False
self.last_stepped_time = None
self.throttling_tick = DEFAULT_THROTTLING_TICK
self.stop_condition = NeverCondition(None)
def step(self, agent):
# todo? make it so something stopped by condition can be resumed?
if self.stop_condition.check():
self.finished = True
return
return
def add_child_task(self, t, agent, pass_stop_condition=True):
# FIXME, this is ugly and dangerous; some conditions might keep state etc?
if pass_stop_condition:
t.stop_condition = self.stop_condition
agent.memory.task_stack_push(t, parent_memid=self.memid)
def interrupt(self):
self.interrupted = True
def check_finished(self):
if self.finished:
return self.finished
def hurry_up(self):
self.throttling_tick /= 4
if self.throttling_tick < THROTTLING_TICK_LOWER_LIMIT:
self.throttling_tick = THROTTLING_TICK_LOWER_LIMIT
def slow_down(self):
self.throttling_tick *= 4
if self.throttling_tick > THROTTLING_TICK_UPPER_LIMIT:
self.throttling_tick = THROTTLING_TICK_UPPER_LIMIT
def __repr__(self):
return str(type(self))
| craftassist-master | python/base_agent/task.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
from collections import defaultdict, namedtuple
import binascii
import hashlib
import logging
import numpy as np
import time
import traceback
from word2number.w2n import word_to_num
from typing import Tuple, List, TypeVar
import uuid
##FFS FIXME!!!! arrange utils properly, put things in one place
XYZ = Tuple[int, int, int]
# two points p0(x0, y0, z0), p1(x1, y1, z1) determine a 3d cube(point_at_target)
POINT_AT_TARGET = Tuple[int, int, int, int, int, int]
IDM = Tuple[int, int]
Block = Tuple[XYZ, IDM]
Hole = Tuple[List[XYZ], IDM]
T = TypeVar("T") # generic type
#####FIXME!!!!!! make all these dicts all through code
Pos = namedtuple("pos", ["x", "y", "z"])
Look = namedtuple("look", "yaw, pitch")
Player = namedtuple("Player", "entityId, name, pos, look")
TICKS_PER_SEC = 100
TICKS_PER_MINUTE = 60 * TICKS_PER_SEC
TICKS_PER_HOUR = 60 * TICKS_PER_MINUTE
TICKS_PER_DAY = 24 * TICKS_PER_HOUR
class Time:
def __init__(self):
self.init_time_raw = time.time()
# converts from seconds to internal tick
def round_time(self, t):
return int(TICKS_PER_SEC * t)
def get_time(self):
return self.round_time(time.time() - self.init_time_raw)
def get_world_hour(self):
# returns a fraction of a day. 0 is sunrise, .5 is sunset, 1.0 is next day
return (time.localtime()[3] - 8 + time.localtime()[4] / 60) / 24
def add_tick(self, ticks=1):
time.sleep(ticks / TICKS_PER_SEC)
class ErrorWithResponse(Exception):
def __init__(self, chat):
self.chat = chat
class NextDialogueStep(Exception):
pass
class TimingWarn(object):
"""Context manager which logs a warning if elapsed time exceeds some threshold"""
def __init__(self, max_time: float):
self.max_time = max_time
def __enter__(self):
self.start_time = time.time()
def __exit__(self, exc_type, exc_value, exc_traceback):
self.elapsed_time = time.time() - self.start_time
if self.elapsed_time >= self.max_time:
logging.warn(
"Timing exceeded threshold: {}".format(self.elapsed_time)
+ "\n"
+ "".join(traceback.format_stack(limit=2))
)
def number_from_span(s):
try:
n = float(s)
except:
try:
n = float(word_to_num(s))
except:
return
return n
def check_username(hashed_username, username):
"""Compare the username with the hash to check if they
are same"""
user, salt = hashed_username.split(":")
return user == hashlib.sha256(salt.encode() + username.encode()).hexdigest()
def get_bounds(locs):
M = np.max(locs, axis=0)
m = np.min(locs, axis=0)
return m[0], M[0], m[1], M[1], m[2], M[2]
def group_by(items, key_fn):
"""Return a dict of {k: list[x]}, where key_fn(x) == k"""
d = defaultdict(list)
for x in items:
d[key_fn(x)].append(x)
return d
def hash_user(username):
"""Encrypt username"""
# uuid is used to generate a random number
salt = uuid.uuid4().hex
return hashlib.sha256(salt.encode() + username.encode()).hexdigest() + ":" + salt
def manhat_dist(a, b):
"""Return mahattan ditance between a and b"""
return abs(a[0] - b[0]) + abs(a[1] - b[1]) + abs(a[2] - b[2])
def pos_to_np(pos):
"""Convert pos to numpy array"""
if pos is None:
return None
return np.array((pos.x, pos.y, pos.z))
def shasum_file(path):
"""Retrn shasum of the file at path"""
sha = hashlib.sha1()
with open(path, "rb") as f:
block = f.read(2 ** 16)
while len(block) != 0:
sha.update(block)
block = f.read(2 ** 16)
return binascii.hexlify(sha.digest())
# TODO make this just a dict, and change in memory and agent
# eg in object_looked_at and PlayerNode
def to_player_struct(pos, yaw, pitch, eid, name):
if len(pos) == 2:
pos = Pos(pos[0], 0.0, pos[1])
else:
pos = Pos(pos[0], pos[1], pos[2])
look = Look(yaw, pitch)
return Player(eid, name, pos, look)
| craftassist-master | python/base_agent/base_util.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
MAP_YES = [
"yes",
"true",
"i agree",
"tru dat",
"yep",
"ya",
"yah",
"yeah",
"definitely",
"def",
"sure",
"ok",
"o k",
]
MAP_NO = ["no", "nope", "false", "definitely not"]
MAP_MAYBE = ["maybe", "unknown", "i don ' t know", "i do not know"]
ACTION_ING_MAPPING = {
"build": "building",
"dance": "dancing",
"destroy": "destroying",
"dig": "digging",
"fill": "filling",
"move": "moving",
}
| craftassist-master | python/base_agent/string_lists.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
"""This file has functions to preprocess the chat from user before
querying the dialogue manager"""
import string
from spacy.lang.en import English
from typing import List
tokenizer = English().Defaults.create_tokenizer()
def word_tokenize(st) -> str:
chat_with_spaces = insert_spaces(st)
return " ".join([str(x) for x in tokenizer(chat_with_spaces)])
def sentence_split(st):
st = st.replace(" ?", " .")
st = st.replace(" !", " .")
st = st.replace(" ...", " .")
res = [
" ".join([x for x in sen.lower().split() if x not in string.punctuation])
for sen in st.split(" .")
]
return [x for x in res if x != ""]
def insert_spaces(chat):
updated_chat = ""
for i, c in enumerate(chat):
# [num , (num , {num , ,num , :num
if (
(c in ["[", "(", "{", ",", ":", "x"])
and (i != len(chat) - 1)
and (chat[i + 1].isdigit())
):
updated_chat += c + " "
# num, , num] , num) , num}, num:
# 4x -> 4 x
elif (
(c.isdigit())
and (i != len(chat) - 1)
and (chat[i + 1] in [",", "]", ")", "}", ":", "x"])
):
updated_chat += c + " "
else:
updated_chat += c
return updated_chat
def preprocess_chat(chat: str) -> List[str]:
# For debug mode, return as is.
if chat == "_debug_" or chat.startswith("_ttad_"):
return [chat]
# Tokenize
tokenized_line = word_tokenize(chat)
tokenized_sentences = [sen for sen in sentence_split(tokenized_line)]
return tokenized_sentences
if __name__ == "__main__":
import fileinput
for line in fileinput.input():
try:
print(preprocess_chat(line)[0])
except IndexError:
pass
| craftassist-master | python/base_agent/preprocess.py |
from typing import List
SELFID = "0" * 32
def maybe_and(sql, a):
if a:
return sql + " AND "
else:
return sql
def maybe_or(sql, a):
if a:
return sql + " OR "
else:
return sql
# TODO counts
def get_property_value(agent_memory, mem, prop):
# order of precedence:
# 1: main memory table
# 2: table corresponding to the nodes .TABLE
# 3: triple with the nodes memid as subject and prop as predicate
# is it in the main memory table?
cols = [c[1] for c in agent_memory._db_read("PRAGMA table_info(Memories)")]
if prop in cols:
cmd = "SELECT " + prop + " FROM Memories WHERE uuid=?"
r = agent_memory._db_read(cmd, mem.memid)
return r[0][0]
# is it in the mem.TABLE?
T = mem.TABLE
cols = [c[1] for c in agent_memory._db_read("PRAGMA table_info({})".format(T))]
if prop in cols:
cmd = "SELECT " + prop + " FROM " + T + " WHERE uuid=?"
r = agent_memory._db_read(cmd, mem.memid)
return r[0][0]
# is it a triple?
triples = agent_memory.get_triples(subj=mem.memid, pred_text=prop, return_obj_text="always")
if len(triples) > 0:
return triples[0][2]
return None
class MemorySearcher:
def __init__(self, self_memid=SELFID, search_data=None):
self.self_memid = self_memid
self.search_data = search_data
def search(self, memory, search_data=None) -> List["ReferenceObjectNode"]: # noqa T484
raise NotImplementedError
class ReferenceObjectSearcher(MemorySearcher):
def __init__(self, self_memid=SELFID, search_data=None):
super().__init__(self_memid=SELFID, search_data=None)
def is_filter_empty(self, filter_dict):
r = filter_dict.get("special")
if r and len(r) > 0:
return False
r = filter_dict.get("ref_obj_range")
if r and len(r) > 0:
return False
r = filter_dict.get("ref_obj_exact")
if r and len(r) > 0:
return False
r = filter_dict.get("memories_range")
if r and len(r) > 0:
return False
r = filter_dict.get("memories_exact")
if r and len(r) > 0:
return False
t = filter_dict.get("triples")
if t and len(t) > 0:
return False
return True
def range_queries(self, r, table, a=False):
""" this does x, y, z, pitch, yaw, etc.
input format for generates is
{"xmin": float, xmax: float, ... , yawmin: float, yawmax: float}
"""
sql = ""
vals = []
for k, v in r.items():
if "min" in k:
sql = maybe_and(sql, len(vals) > 0)
sql += table + "." + k.replace("min", "") + ">? "
vals.append(v)
if "max" in k:
sql = maybe_and(sql, len(vals) > 0)
sql += table + "." + k.replace("max", "") + "<? "
vals.append(v)
return sql, vals
def exact_matches(self, m, table, a=False):
sql = ""
vals = []
for k, v in m.items():
sql = maybe_and(sql, len(vals) > 0)
sql += table + "." + k + "=? "
vals.append(v)
return sql, vals
def triples(self, triples, a=False):
# currently does an "and": the memory needs to satisfy all triples
vals = []
if not triples:
return "", vals
sql = "ReferenceObjects.uuid IN (SELECT subj FROM Triples WHERE "
for t in triples:
sql = maybe_or(sql, len(vals) > 0)
vals.append(t["pred_text"])
if t.get("obj_text"):
sql += "(pred_text, obj_text)=(?, ?)"
vals.append(t["obj_text"])
else:
sql += "(pred_text, obj)=(?, ?)"
vals.append(t["obj"])
sql += " GROUP BY subj HAVING COUNT(subj)=? )"
vals.append(len(triples))
return sql, vals
def get_query(self, filter_dict, ignore_self=True):
if self.is_filter_empty(filter_dict):
query = "SELECT uuid FROM ReferenceObjects"
if ignore_self:
query += " WHERE uuid !=?"
return query, [self.self_memid]
else:
return query, []
query = (
"SELECT ReferenceObjects.uuid FROM ReferenceObjects"
" INNER JOIN Memories as M on M.uuid=ReferenceObjects.uuid"
" WHERE "
)
args = []
fragment, vals = self.range_queries(
filter_dict.get("ref_obj_range", {}), "ReferenceObjects"
)
query = maybe_and(query, len(args) > 0)
args.extend(vals)
query += fragment
fragment, vals = self.exact_matches(
filter_dict.get("ref_obj_exact", {}), "ReferenceObjects"
)
query = maybe_and(query, len(args) > 0 and len(vals) > 0)
args.extend(vals)
query += fragment
fragment, vals = self.range_queries(filter_dict.get("memories_range", {}), "M")
query = maybe_and(query, len(args) > 0 and len(vals) > 0)
args.extend(vals)
query += fragment
fragment, vals = self.exact_matches(filter_dict.get("memories_exact", {}), "M")
query = maybe_and(query, len(args) > 0 and len(vals) > 0)
args.extend(vals)
query += fragment
fragment, vals = self.triples(filter_dict.get("triples", []))
query = maybe_and(query, len(args) > 0 and len(vals) > 0)
args.extend(vals)
query += fragment
if ignore_self:
query += " AND ReferenceObjects.uuid !=?"
args.append(self.self_memid)
return query, args
# flag (default) so that it makes a copy of speaker_look etc so that if the searcher is called
# later so it doesn't return the new position of the agent/speaker/speakerlook
# how to parse this distinction?
def handle_special(self, memory, search_data):
d = search_data.get("special")
if not d:
return []
if d.get("SPEAKER"):
return [memory.get_player_by_eid(d["SPEAKER"])]
if d.get("SPEAKER_LOOK"):
memids = memory._db_read_one(
'SELECT uuid FROM ReferenceObjects WHERE ref_type="attention" AND type_name=?',
d["SPEAKER_LOOK"],
)
if memids:
memid = memids[0]
mem = memory.get_location_by_id(memid)
return [mem]
if d.get("AGENT"):
return [memory.get_player_by_eid(d["AGENT"])]
if d.get("DUMMY"):
return [d["DUMMY"]]
return []
def search(self, memory, search_data=None) -> List["ReferenceObjectNode"]: # noqa T484
"""Find ref_objs matching the given filters
filter_dict has children:
"ref_obj_range", dict, with keys "min<column_name>" or "max<column_name>",
(that is the string "min" prepended to the column name)
and float values vmin and vmax respectively.
<column_name> is any column in the ReferenceObjects table that
is a numerical value. filters on rows satisfying the inequality
<column_entry> > vmin or <column_entry> < vmax
"ref_obj_exact", dict, with keys "<column_name>"
<column_name> is any column in the ReferenceObjects table
checks exact matches to the value
"memories_range" and "memories_exact" are the same, but columns in the Memories table
"triples" list [t0, t1, ...,, tm]. each t in the list is a dict
with form t = {"pred_text": <pred>, "obj_text": <obj>}
or t = {"pred_text": <pred>, "obj": <obj_memid>}
currently returns memories with all triples matched
"""
if not search_data:
search_data = self.search_data
assert search_data
if search_data.get("special"):
return self.handle_special(memory, search_data)
query, args = self.get_query(search_data)
self.search_data = search_data
memids = [m[0] for m in memory._db_read(query, *args)]
return [memory.get_mem_by_id(memid) for memid in memids]
if __name__ == "__main__":
filter_dict = {
"ref_obj_range": {"minx": 3},
"memories_exact": {"create_time": 1},
"triples": [
{"pred_text": "has_tag", "obj_text": "cow"},
{"pred_text": "has_name", "obj_text": "eddie"},
],
}
| craftassist-master | python/base_agent/memory_filters.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
"""This file defines the DialogueStack class and helper functions
to support it."""
import logging
from base_agent.base_util import NextDialogueStep, ErrorWithResponse
class DialogueStack(object):
"""This class represents a dialogue stack that holds DialogueObjects on it."""
def __init__(self, agent, memory):
self.agent = agent
self.memory = memory
self.stack = []
def __getitem__(self, i):
"""Get the ith item on the stack """
return self.stack[i]
def peek(self):
"""Get the item on top of the DialogueStack"""
if self.stack:
return self.stack[-1]
else:
return None
def clear(self):
"""clear current stack"""
self.old_stack = self.stack
self.stack = []
def append(self, dialogue_object):
"""Append a dialogue_object to stack"""
self.stack.append(dialogue_object)
def append_new(self, cls, *args, **kwargs):
"""Construct a new DialogueObject and append to stack"""
self.stack.append(
cls(agent=self.agent, memory=self.memory, dialogue_stack=self, *args, **kwargs)
)
def step(self):
"""Process and step through the top-of-stack dialogue object."""
if len(self.stack) > 0:
# WARNING: check_finished increments the DialogueObject's current_step counter
while len(self.stack) > 0 and self.stack[-1].check_finished():
del self.stack[-1]
if len(self.stack) == 0:
return
try:
output_chat, step_data = self.stack[-1].step()
if output_chat:
self.agent.send_chat(output_chat)
# Update progeny_data of the current DialogueObject
if len(self.stack) > 1 and step_data is not None:
logging.info("Update progeny_data={} stack={}".format(step_data, self.stack))
self.stack[-2].update_progeny_data(step_data)
except NextDialogueStep:
return
except ErrorWithResponse as err:
self.stack[-1].finished = True
self.agent.send_chat(err.chat)
return
def __len__(self):
"""Length of stack"""
return len(self.stack)
| craftassist-master | python/base_agent/dialogue_stack.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
###TODO put dances back
import gzip
import logging
import numpy as np
import os
import pickle
import sqlite3
import uuid
from itertools import zip_longest
from typing import cast, Optional, List, Tuple, Sequence, Union
from base_agent.base_util import XYZ, Time
from base_agent.task import Task
from base_agent.memory_filters import ReferenceObjectSearcher
from base_agent.memory_nodes import ( # noqa
TaskNode,
PlayerNode,
MemoryNode,
ChatNode,
TimeNode,
LocationNode,
ReferenceObjectNode,
NamedAbstractionNode,
NODELIST,
)
SCHEMAS = [os.path.join(os.path.dirname(__file__), "memory_schema.sql")]
# TODO when a memory is removed, its last state should be snapshotted to prevent tag weirdness
class AgentMemory:
def __init__(
self,
db_file=":memory:",
schema_paths=SCHEMAS,
db_log_path=None,
nodelist=NODELIST,
agent_time=None,
):
if db_log_path:
self._db_log_file = gzip.open(db_log_path + ".gz", "w")
self._db_log_idx = 0
self.sql_queries = []
if os.path.isfile(db_file):
os.remove(db_file)
self.db = sqlite3.connect(db_file)
self.task_db = {}
self._safe_pickle_saved_attrs = {}
self.init_time_interface(agent_time)
for schema_path in schema_paths:
with open(schema_path, "r") as f:
self._db_script(f.read())
self.all_tables = [
c[0] for c in self._db_read("SELECT name FROM sqlite_master WHERE type='table';")
]
self.nodes = {}
for node in nodelist:
self.nodes[node.NODE_TYPE] = node
# create a "self" memory to reference in Triples
self.self_memid = "0" * len(uuid.uuid4().hex)
self._db_write(
"INSERT INTO Memories VALUES (?,?,?,?,?,?)", self.self_memid, "Self", 0, 0, -1, False
)
self.tag(self.self_memid, "_agent")
self.tag(self.self_memid, "_self")
self.ref_searcher = ReferenceObjectSearcher(self_memid=self.self_memid)
def __del__(self):
if getattr(self, "_db_log_file", None):
self._db_log_file.close()
def init_time_interface(self, agent_time=None):
self.time = agent_time or Time()
def get_time(self):
return self.time.get_time()
def get_world_time(self):
return self.time.get_world_time()
def add_tick(self, ticks=1):
self.time.add_tick(ticks)
# TODO list of all "updatable" mems, do a mem.update() ?
def update(self, agent):
pass
########################
### Workspace memory ###
########################
def set_memory_updated_time(self, memid):
self._db_write("UPDATE Memories SET updated_time=? WHERE uuid=?", self.get_time(), memid)
def set_memory_attended_time(self, memid):
self._db_write("UPDATE Memories SET attended_time=? WHERE uuid=?", self.get_time(), memid)
def update_recent_entities(self, mems=[]):
logging.info("update_recent_entities {}".format(mems))
for mem in mems:
mem.update_recently_attended()
# for now, no archives in recent entities
def get_recent_entities(self, memtype, time_window=12000) -> List["MemoryNode"]:
r = self._db_read(
"""SELECT uuid
FROM Memories
WHERE node_type=? AND attended_time >= ? and is_snapshot=0
ORDER BY attended_time DESC""",
memtype,
self.get_time() - time_window,
)
return [self.get_mem_by_id(memid, memtype) for memid, in r]
###############
### General ###
###############
def get_node_from_memid(self, memid: str) -> str:
(r,) = self._db_read_one("SELECT node_type FROM Memories WHERE uuid=?", memid)
return r
def get_mem_by_id(self, memid: str, node_type: str = None) -> "MemoryNode":
if node_type is None:
node_type = self.get_node_from_memid(memid)
if node_type is None:
return MemoryNode(self, memid)
return self.nodes.get(node_type, MemoryNode)(self, memid)
# does not search archived mems for now
def get_all_tagged_mems(self, tag: str) -> List["MemoryNode"]:
memids = self.get_memids_by_tag(tag)
return [self.get_mem_by_id(memid) for memid in memids]
def check_memid_exists(self, memid: str, table: str) -> bool:
return bool(self._db_read_one("SELECT * FROM {} WHERE uuid=?".format(table), memid))
# TODO forget should be a method of the memory object
def forget(self, memid: str, hard=True):
if not hard:
self.add_triple(subj=memid, pred_text="has_tag", obj_text="_forgotten")
else:
self._db_write("DELETE FROM Memories WHERE uuid=?", memid)
# TODO this less brutally. might want to remember some
# triples where the subject or object has been removed
# eventually we might have second-order relations etc, this could set
# off a chain reaction
self.remove_memid_triple(memid, role="both")
##########################
### ReferenceObjects ###
##########################
def get_reference_objects(self, filter_dict):
return self.ref_searcher.search(self, search_data=filter_dict)
#################
### Triples ###
#################
# TODO should add a MemoryNode and a .create()
def add_triple(
self,
subj: str = "", # this is a memid if given
obj: str = "", # this is a memid if given
subj_text: str = "",
pred_text: str = "has_tag",
obj_text: str = "",
confidence: float = 1.0,
):
""" adds (subj, pred, obj) triple to the triplestore.
*_text is the name field of a NamedAbstraction; if
such a NamedAbstraction does not exist, this builds it as a side effect.
subj and obj can be memids or text, but pred_text is required """
assert subj or subj_text
assert obj or obj_text
assert not (subj and subj_text)
assert not (obj and obj_text)
memid = uuid.uuid4().hex
pred = NamedAbstractionNode.create(self, pred_text)
if not obj:
obj = NamedAbstractionNode.create(self, obj_text)
if not subj:
subj = NamedAbstractionNode.create(self, subj_text)
if not subj_text:
subj_text = None # noqa T484
if not obj_text:
obj_text = None # noqa T484
self._db_write(
"INSERT INTO Triples VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
memid,
subj,
subj_text,
pred,
pred_text,
obj,
obj_text,
confidence,
)
def tag(self, subj_memid: str, tag_text: str):
self.add_triple(subj=subj_memid, pred_text="has_tag", obj_text=tag_text)
def untag(self, subj_memid: str, tag_text: str):
self._db_write(
'DELETE FROM Triples WHERE subj=? AND pred_text="has_tag" AND obj_text=?',
subj_memid,
tag_text,
)
# does not search archived mems for now
# assumes tag is tag text
def get_memids_by_tag(self, tag: str) -> List[str]:
r = self._db_read(
'SELECT DISTINCT(Memories.uuid) FROM Memories INNER JOIN Triples as T ON T.subj=Memories.uuid WHERE T.pred_text="has_tag" AND T.obj_text=? AND Memories.is_snapshot=0',
tag,
)
return [x for (x,) in r]
def get_tags_by_memid(self, subj_memid: str, return_text: bool = True) -> List[str]:
if return_text:
return_clause = "obj_text"
else:
return_clause = "obj"
q = (
"SELECT DISTINCT("
+ return_clause
+ ') FROM Triples WHERE pred_text="has_tag" AND subj=?'
)
r = self._db_read(q, subj_memid)
return [x for (x,) in r]
# does not search archived mems for now
# TODO clean up input?
def get_triples(
self,
subj: str = None,
obj: str = None,
subj_text: str = None,
pred_text: str = None,
obj_text: str = None,
return_obj_text: str = "if_exists",
) -> List[Tuple[str, str, str]]:
""" gets triples from the triplestore.
if return_obj_text == "if_exists", will return the obj_text
if it exists, and the memid otherwise
if return_obj_text == "always", returns the obj_text even if it is None
if return_obj_text == "never", returns the obj memid
subj is always returned as a memid even when searched as text.
need at least one non-None part of the triple, and
text should not not be input for a part of a triple where a memid is set
"""
assert any([subj or subj_text, pred_text, obj or obj_text])
# search by memid or by text, but not both
assert not (subj and subj_text)
assert not (obj and obj_text)
pairs = [
("subj", subj),
("subj_text", subj_text),
("pred_text", pred_text),
("obj", obj),
("obj_text", obj_text),
]
args = [x[1] for x in pairs if x[1] is not None]
where = [x[0] + "=?" for x in pairs if x[1] is not None]
if len(where) == 1:
where_clause = where[0]
else:
where_clause = " AND ".join(where)
return_clause = "subj, pred_text, obj, obj_text "
sql = (
"SELECT "
+ return_clause
+ "FROM Triples INNER JOIN Memories as M ON Triples.subj=M.uuid WHERE M.is_snapshot=0 AND "
+ where_clause
)
r = self._db_read(sql, *args)
# subj is always returned as memid, even if pred and obj are returned as text
# pred is always returned as text
if return_obj_text == "if_exists":
l = [(s, pt, ot) if ot else (s, pt, o) for (s, pt, o, ot) in r]
elif return_obj_text == "always":
l = [(s, pt, ot) for (s, pt, o, ot) in r]
else:
l = [(s, pt, o) for (s, pt, o, ot) in r]
return cast(List[Tuple[str, str, str]], l)
def remove_memid_triple(self, memid: str, role="subj"):
if role == "subj" or role == "both":
self._db_write("DELETE FROM Triples WHERE subj=?", memid)
if role == "obj" or role == "both":
self._db_write("DELETE FROM Triples WHERE obj=?", memid)
###############
### Chats ###
###############
def add_chat(self, speaker_memid: str, chat: str) -> str:
return ChatNode.create(self, speaker_memid, chat)
def get_chat_by_id(self, memid: str) -> "ChatNode":
return ChatNode(self, memid)
def get_recent_chats(self, n=1) -> List["ChatNode"]:
"""Return a list of at most n chats"""
r = self._db_read("SELECT uuid FROM Chats ORDER BY time DESC LIMIT ?", n)
return [ChatNode(self, m) for m, in reversed(r)]
def get_most_recent_incoming_chat(self, after=-1) -> Optional["ChatNode"]:
r = self._db_read_one(
"""
SELECT uuid
FROM Chats
WHERE speaker != ? AND time >= ?
ORDER BY time DESC
LIMIT 1
""",
self.self_memid,
after,
)
if r:
return ChatNode(self, r[0])
else:
return None
#################
### Players ###
#################
# TODO consolidate anything using eid
def get_player_by_eid(self, eid) -> Optional["PlayerNode"]:
r = self._db_read_one("SELECT uuid FROM ReferenceObjects WHERE eid=?", eid)
if r:
return PlayerNode(self, r[0])
else:
return None
def get_player_by_name(self, name) -> Optional["PlayerNode"]:
r = self._db_read_one(
'SELECT uuid FROM ReferenceObjects WHERE ref_type="player" AND name=?', name
)
# r = self._db_read_one("SELECT uuid FROM Players WHERE name=?", name)
if r:
return PlayerNode(self, r[0])
else:
return None
def get_players_tagged(self, *tags) -> List["PlayerNode"]:
tags += ("_player",)
memids = set.intersection(*[set(self.get_memids_by_tag(t)) for t in tags])
return [self.get_player_by_id(memid) for memid in memids]
def get_player_by_id(self, memid) -> "PlayerNode":
return PlayerNode(self, memid)
###################
### Locations ###
###################
def add_location(self, xyz: XYZ) -> str:
return LocationNode.create(self, xyz)
def get_location_by_id(self, memid: str) -> "LocationNode":
return LocationNode(self, memid)
###############
### Times ###
###############
def add_time(self, t: int) -> str:
return TimeNode.create(self, t)
def get_time_by_id(self, memid: str) -> "TimeNode":
return TimeNode(self, memid)
# ###############
# ### Sets ###
# ###############
#
# def add_set(self, memid_list):
# set_memid = SetNode.create(self)
# self.add_objs_to_set(set_memid, memid_list)
# return SetNode(self, set_memid)
#
# def add_objs_to_set(self, set_memid, memid_list):
# for mid in memid_list:
# self.add_triple(mid, "set_member_", set_memid)
###############
### Tasks ###
###############
def task_stack_push(
self, task: Task, parent_memid: str = None, chat_effect: bool = False
) -> "TaskNode":
memid = TaskNode.create(self, task)
# Relations
if parent_memid:
self.add_triple(subj=memid, pred_text="_has_parent_task", obj=parent_memid)
if chat_effect:
chat = self.get_most_recent_incoming_chat()
assert chat is not None, "chat_effect=True with no incoming chats"
self.add_triple(subj=chat.memid, pred_text="chat_effect_", obj=memid)
# Return newly created object
return TaskNode(self, memid)
def task_stack_update_task(self, memid: str, task: Task):
self._db_write("UPDATE Tasks SET pickled=? WHERE uuid=?", self.safe_pickle(task), memid)
def task_stack_peek(self) -> Optional["TaskNode"]:
r = self._db_read_one(
"""
SELECT uuid
FROM Tasks
WHERE finished_at < 0 AND paused = 0
ORDER BY created_at DESC
LIMIT 1
"""
)
if r:
return TaskNode(self, r[0])
else:
return None
def task_stack_pop(self) -> Optional["TaskNode"]:
"""Return the 'TaskNode' of the stack head and mark finished"""
mem = self.task_stack_peek()
if mem is None:
raise ValueError("Called task_stack_pop with empty stack")
self._db_write("UPDATE Tasks SET finished_at=? WHERE uuid=?", self.get_time(), mem.memid)
return mem
def task_stack_pause(self) -> bool:
"""Pause the stack and return True iff anything was stopped"""
return self._db_write("UPDATE Tasks SET paused=1 WHERE finished_at < 0") > 0
def task_stack_clear(self):
self._db_write("DELETE FROM Tasks WHERE finished_at < 0")
def task_stack_resume(self) -> bool:
"""Resume stopped tasks. Return True if there was something to resume."""
return self._db_write("UPDATE Tasks SET paused=0") > 0
def task_stack_find_lowest_instance(
self, cls_names: Union[str, Sequence[str]]
) -> Optional["TaskNode"]:
"""Find and return the lowest item in the stack of the given class(es)"""
names = [cls_names] if type(cls_names) == str else cls_names
(memid,) = self._db_read_one(
"SELECT uuid FROM Tasks WHERE {} ORDER BY created_at LIMIT 1".format(
" OR ".join(["action_name=?" for _ in names])
),
*names,
)
if memid is not None:
return TaskNode(self, memid)
else:
return None
def task_stack_get_all(self) -> List["TaskNode"]:
r = self._db_read(
"""
SELECT uuid
FROM Tasks
WHERE paused=0 AND finished_at<0
ORDER BY created_at
"""
)
return [TaskNode(self, memid) for memid, in r]
def get_last_finished_root_task(self, action_name: str = None, recency: int = None):
q = """
SELECT uuid
FROM Tasks
WHERE finished_at >= ? {}
ORDER BY created_at DESC
""".format(
" AND action_name=?" if action_name else ""
)
if recency is None:
recency = self.time.round_time(300)
args: List = [self.get_time() - recency]
if action_name:
args.append(action_name)
memids = [r[0] for r in self._db_read(q, *args)]
for memid in memids:
if self._db_read_one(
"SELECT uuid FROM Triples WHERE pred_text='_has_parent_task' AND subj=?", memid
):
# not a root task
continue
return TaskNode(self, memid)
# raise ValueError("Called get_last_finished_root_task with no finished root tasks")
def get_task_by_id(self, memid: str) -> "TaskNode":
return TaskNode(self, memid)
#################
### Time ###
#################
def hurry_up(self):
if self.task_stack_peek() is None:
return # send chat?
task_mem = self.task_stack_peek()
task_mem.task.hurry_up()
self.task_stack_update_task(task_mem.memid, task_mem.task)
def slow_down(self):
if self.task_stack_peek() is None:
return # send chat?
task_mem = self.task_stack_peek()
task_mem.task.slow_down()
self.task_stack_update_task(task_mem.memid, task_mem.task)
#########################
### Database Access ###
#########################
def _db_read(self, query, *args) -> List[Tuple]:
args = tuple(a.item() if isinstance(a, np.number) else a for a in args)
try:
c = self.db.cursor()
c.execute(query, args)
query = query.replace("?", "{}").format(*args)
if query not in self.sql_queries:
self.sql_queries.append(query)
r = c.fetchall()
c.close()
return r
except:
logging.error("Bad read: {} : {}".format(query, args))
raise
def _db_read_one(self, query, *args) -> Tuple:
args = tuple(a.item() if isinstance(a, np.number) else a for a in args)
try:
c = self.db.cursor()
c.execute(query, args)
query = query.replace("?", "{}").format(*args)
if query not in self.sql_queries:
self.sql_queries.append(query)
r = c.fetchone()
c.close()
return r
except:
logging.error("Bad read: {} : {}".format(query, args))
raise
def _db_write(self, query: str, *args) -> int:
"""Return the number of rows affected"""
args = tuple(a.item() if isinstance(a, np.number) else a for a in args)
try:
c = self.db.cursor()
c.execute(query, args)
query = query.replace("?", "{}").format(*args)
if query not in self.sql_queries:
self.sql_queries.append(query)
self.db.commit()
c.close()
self._write_to_db_log(query, *args)
return c.rowcount
except:
logging.error("Bad write: {} : {}".format(query, args))
raise
def _db_script(self, script: str):
c = self.db.cursor()
c.executescript(script)
self.db.commit()
c.close()
self._write_to_db_log(script, no_format=True)
####################
### DB LOGGING ###
####################
def get_db_log_idx(self):
return self._db_log_idx
def _write_to_db_log(self, s: str, *args, no_format=False):
if not getattr(self, "_db_log_file", None):
return
# sub args in for ?
split = s.split("?")
final = b""
for sub, arg in zip_longest(split, args, fillvalue=""):
final += str(sub).encode("utf-8")
if type(arg) == str and arg != "":
# put quotes around string args
final += '"{}"'.format(arg).encode("utf-8")
else:
final += str(arg).encode("utf-8")
# remove newlines, add semicolon
if not no_format:
final = final.strip().replace(b"\n", b" ") + b";\n"
# write to file
self._db_log_file.write(final)
self._db_log_file.flush()
self._db_log_idx += 1
######################
### MISC HELPERS ###
######################
def dump(self, sql_file, dict_memory_file=None):
sql_file.write("\n".join(self.db.iterdump()))
if dict_memory_file is not None:
import io
import pickle
assert type(dict_memory_file) == io.BufferedWriter
dict_memory = {"task_db": self.task_db}
pickle.dump(dict_memory, dict_memory_file)
def safe_pickle(self, obj):
# little bit scary...
if not hasattr(obj, "pickled_attrs_id"):
if hasattr(obj, "memid"):
obj.pickled_attrs_id = obj.memid
else:
try:
obj.pickled_attrs_id = uuid.uuid4().hex
except:
pass
for attr in ["memory", "agent_memory", "new_tasks_fn", "stop_condition", "movement"]:
if hasattr(obj, attr):
if self._safe_pickle_saved_attrs.get(obj.pickled_attrs_id) is None:
self._safe_pickle_saved_attrs[obj.pickled_attrs_id] = {}
val = getattr(obj, attr)
delattr(obj, attr)
setattr(obj, "__had_attr_" + attr, True)
self._safe_pickle_saved_attrs[obj.pickled_attrs_id][attr] = val
return pickle.dumps(obj)
def safe_unpickle(self, bs):
obj = pickle.loads(bs)
if hasattr(obj, "pickled_attrs_id"):
for attr in ["memory", "agent_memory", "new_tasks_fn", "stop_condition", "movement"]:
if hasattr(obj, "__had_attr_" + attr):
delattr(obj, "__had_attr_" + attr)
setattr(obj, attr, self._safe_pickle_saved_attrs[obj.pickled_attrs_id][attr])
return obj
| craftassist-master | python/base_agent/sql_memory.py |
import os
import sys
sys.path.append(os.path.dirname(__file__))
| craftassist-master | python/base_agent/__init__.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
class BaseAgent:
def __init__(self, opts, name=None):
self.opts = opts
self.name = name or "bot"
self.count = 0
self.init_memory()
self.init_controller()
self.init_perception()
def start(self):
while True: # count forever
try:
self.step()
except Exception as e:
self.handle_exception(e)
def step(self):
self.perceive()
self.memory.update(self)
# maybe place tasks on the stack, based on memory/perception
self.controller_step()
# step topmost task on stack
self.task_step()
self.count += 1
def perceive(self):
"""
Get information from the world and store it in memory.
"""
raise NotImplementedError
def controller_step(self):
"""
interpret commands, carry out dialogues, etc. place tasks on the task stack.
"""
raise NotImplementedError
def task_step(self):
"""
run the current task on the stack. interact with the world
"""
raise NotImplementedError
def handle_exception(self, e):
"""
handle/log exceptions
"""
raise NotImplementedError
| craftassist-master | python/base_agent/core.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import numpy as np
from memory_filters import ReferenceObjectSearcher, get_property_value
from base_util import TICKS_PER_SEC, TICKS_PER_MINUTE, TICKS_PER_HOUR
# attribute has function signature list(mems) --> list(float)
class Attribute:
def __init__(self, agent):
self.agent = agent
def __call__(self, mems):
raise NotImplementedError("Implemented by subclass")
class TableColumn(Attribute):
def __init__(self, agent, attribute):
super().__init__(agent)
self.attribute = attribute
def __call__(self, mems):
return [get_property_value(self.agent.memory, mem, self.attribute) for mem in mems]
class LinearExtentAttribute(Attribute):
"""
computes the (perhaps signed) length between two points in space.
if "relative_direction"=="AWAY", unsigned length
if "relative_direction" in ["LEFT", "RIGHT" ...] projected onto a special direction
and signed. the "arrow" goes from "source" to "destination",
e.g. if destination is more LEFT than source, "LEFT" will be positive
if "relative_direction" in ["INSIDE", "OUTSIDE"], signed length is shifted towards zero
so that 0 is at the boundary of the source.
This is not implemented yet FIXME!!
One of the two points in space is given by the positions of a reference object
either given directly as a memory, or given as FILTERs to search
the other is the list element input into the call
"""
def __init__(self, agent, location_data, mem=None, fixed_role="source"):
super().__init__(agent)
self.coordinate_transforms = agent.coordinate_transforms
self.location_data = location_data
self.fixed_role = fixed_role
self.frame = location_data.get("frame") or "AGENT"
# TODO generalize/formalize this
# TODO: currently stores look vecs/orientations at creation,
# build mechanism to update orientations, e.g. if giving directions
# "first you turn left, then go 7 steps forward, turn right, go 7 steps forward"
# need this in grammar too
# TODO store fixed pitch/yaw etc. with arxiv memories, not raw
try:
if self.frame == "AGENT":
# TODO handle this appropriately!
yaw, pitch = agent.memory._db_read(
"SELECT yaw, pitch FROM ReferenceObjects WHERE uuid=?", agent.memory.self_memid
)[0]
elif self.frame == "ABSOLUTE":
yaw, pitch = self.coordinate_transforms.yaw_pitch(
self.coordinate_transforms.DIRECTIONS["FRONT"]
)
# this is another player/agent; it is assumed that the frame has been replaced with
# with the eid of the player/agent
else:
# TODO error if eid not found; but then parent/helper should have caught it?
# TODO error properly if eid is a ref object, but pitch or yaw are null
yaw, pitch = agent.memory._db_read(
"SELECT yaw, pitch FROM ReferenceObjects WHERE eid=?", self.frame
)[0]
except:
# TODO handle this better
raise Exception(
"Unable to find the yaw, pitch in the given frame; maybe can't find the eid?"
)
self.yaw = yaw
self.pitch = pitch
self.mem = mem
self.searcher = "mem"
# put a "NULL" mem in input to not build a searcher
if not self.mem:
d = self.location_data.get(fixed_role)
if d:
self.searchers = ReferenceObjectSearcher(search_data=d)
def extent(self, source, destination):
# source and destination are arrays in this function
# arrow goes from source to destination:
diff = np.subtract(source, destination)
if self.location_data["relative_direction"] in ["INSIDE", "OUTSIDE"]:
raise Exception("inside and outside not yet implemented in linear extent")
if self.location_data["relative_direction"] in [
"LEFT",
"RIGHT",
"UP",
"DOWN",
"FRONT",
"BACK",
]:
reldir_vec = self.coordinate_transforms.DIRECTIONS[
self.location_data["relative_direction"]
]
# this should be an inverse transform so we set inverted=True
dir_vec = self.coordinate_transforms.transform(
reldir_vec, self.yaw, self.pitch, inverted=True
)
return diff @ dir_vec
else: # AWAY
return np.linalg.norm(diff)
def __call__(self, mems):
if not self.mem:
fixed_mem = self.searcher.search(self.agent.memory)
# FIXME!!! handle mem not found
else:
fixed_mem = self.mem
# FIMXE TODO store and use an arxiv if we don't want position to track!
if self.fixed_role == "source":
return [self.extent(fixed_mem.get_pos(), mem.get_pos()) for mem in mems]
else:
return [self.extent(mem.get_pos(), fixed_mem.get_pos()) for mem in mems]
# a value has a get_value() method; and get_value should not have
# any inputs
class ComparisonValue:
def __init__(self, agent):
self.agent = agent
def get_value(self):
raise NotImplementedError("Implemented by subclass")
# TODO more composable less ugly, more ML friendly
class ScaledValue(ComparisonValue):
def __init__(self, value, scale):
self.value = value
self.scale = scale
def get_value(self):
return self.scale * self.value.get_value()
# TODO feet, meters, inches, centimeters, degrees, etc.
# each of these converts a measure into the agents internal units,
# e.g. seconds/minutes/hours to ticks
# inches/centimeters/feet to meters or blocks (assume 1 block in mc equals 1 meter in real world)
conversion_factors = {
"seconds": TICKS_PER_SEC,
"minutes": TICKS_PER_MINUTE,
"hours": TICKS_PER_HOUR,
}
def convert_comparison_value(comparison_value, unit):
if not unit:
return comparison_value
assert conversion_factors.get(unit)
return ScaledValue(comparison_value, conversion_factors[unit])
class FixedValue(ComparisonValue):
def __init__(self, agent, value):
super().__init__(agent)
self.value = value
def get_value(self):
return self.value
# TODO store more in memory,
# or at least
# make some TimeNodes as side effects
# WARNING: elapsed mode uses get_time at construction as 0
class TimeValue(ComparisonValue):
"""
modes are elapsed, time, and world_time.
if "elapsed" or "time" uses memory.get_time as timer
if "elapsed", value is offset by time at creation
if "world_time" uses memory.get_world_time
"""
def __init__(self, agent, mode="elapsed"):
self.mode = mode
self.offset = 0.0
if self.mode == "elapsed":
self.offset = agent.memory.get_time()
self.get_time = agent.memory.get_time
elif self.mode == "time":
self.get_time = agent.memory.get_time
else: # world_time
self.get_time = agent.memory.get_world_time
def get_value(self):
return self.get_time() - self.offset
# TODO unit conversions?
class MemoryColumnValue(ComparisonValue):
def __init__(self, agent, search_data, mem=None):
super().__init__(agent)
self.search_data = search_data
# TODO expand beyond ref objects
self.mem = mem
if not self.mem:
self.searcher = ReferenceObjectSearcher(search_data=search_data)
def get_value(self):
if self.mem:
return self.search_data["attribute"]([self.mem])[0]
mems = self.searcher.search(self.agent.memory)
if len(mems) > 0:
# TODO/FIXME! deal with more than 1 better
return self.search_data["attribute"](mems)[0]
else:
return
class LinearExtentValue(ComparisonValue):
# this is a linear extent with both source and destination filled.
# e.g. "when you are as far from the house as the cow is from the house"
# but NOT for "when the cow is 3 steps from the house"
# in the latter case, one of the two entities will be given by the filters
def __init__(self, agent, linear_exent_attribute, mem=None, search_data=None):
super().__init__(agent)
self.linear_extent_attribute = linear_exent_attribute
assert mem or search_data
self.searcher = None
self.mem = mem
if not self.mem:
self.searcher = ReferenceObjectSearcher(search_data=search_data)
def get_value(self):
if self.mem:
mems = [self.mem]
else:
mems = self.searcher.search(self.agent.memory)
if len(mems) > 0:
# TODO/FIXME! deal with more than 1 better
return self.linear_extent_attribute(mems)[0]
else:
return
class Condition:
def __init__(self, agent):
self.agent = agent
def check(self) -> bool:
raise NotImplementedError("Implemented by subclass")
class NeverCondition(Condition):
def __init__(self, agent):
super().__init__(agent)
self.name = "never"
def check(self):
return False
class AndCondition(Condition):
""" conditions should be an iterable"""
def __init__(self, agent, conditions):
super().__init__(agent)
self.name = "and"
self.conditions = conditions
def check(self):
for c in self.conditions:
if not c.check():
return False
return True
class OrCondition(Condition):
""" conditions should be an iterable"""
def __init__(self, agent, conditions):
super().__init__(agent)
self.name = "or"
self.conditions = conditions
def check(self):
for c in self.conditions:
if c.check():
return True
return False
# start_time and end_time are in (0, 1)
# 0 is sunrise, .5 is sunset
def build_special_time_condition(agent, start_time, end_time, epsilon=0.01):
value_left = TimeValue(agent, mode="world_time")
if end_time > 0:
start = Comparator(
comparison_type="GREATER_THAN_EQUAL", value_left=value_left, value_right=start_time
)
end = Comparator(
comparison_type="LESS_THAN_EQUAL", value_left=value_left, value_right=end_time
)
return AndCondition(agent, [start, end])
else:
return Comparator(
comparison_type="CLOSE_TO",
value_left=value_left,
value_right=start_time,
epsilon=epsilon,
)
# TODO make this more ML friendly?
# eventually do "x minutes before condition"? how?
# KEEPS state (did the event occur- starts timer then)
class TimeCondition(Condition):
"""
if event is None, the timer starts now
if event is not None, it should be a condition, timer starts on the condition being true
This time condition is true when the comparator between
timer (as value_left) and the comparator's value_right is true
if comparator is a string, it should be "SUNSET" / "SUNRISE" / "DAY" / "NIGHT" / "AFTERNOON" / "MORNING"
else it should be built in the parent, and the value_right should be commeasurable (properly scaled)
"""
def __init__(self, agent, comparator, event=None):
super().__init__(agent)
self.special = None
self.event = event
if type(comparator) is str:
if comparator == "SUNSET":
self.special = build_special_time_condition(agent, 0.5, -1)
elif comparator == "SUNRISE":
self.special = build_special_time_condition(agent, 0.0, -1)
elif comparator == "MORNING":
self.special = build_special_time_condition(agent, 0, 0.25)
elif comparator == "AFTERNOON":
self.special = build_special_time_condition(agent, 0.25, 0.5)
elif comparator == "DAY":
self.special = build_special_time_condition(agent, 0.0, 0.5)
elif comparator == "NIGHT":
self.special = build_special_time_condition(agent, 0.5, 1.0)
else:
raise NotImplementedError("unknown special time condition type: " + comparator)
else:
if not event:
comparator.value_left = TimeValue(agent, mode="elapsed")
self.comparator = comparator
def check(self):
if not self.event:
return self.comparator.check()
else:
if self.event.check():
self.comparator.value_left = TimeValue(self.agent, mode="elapsed")
self.event = None
return self.comparator.check()
class Comparator(Condition):
def __init__(
self, agent, comparison_type="EQUAL", value_left=None, value_right=None, epsilon=0
):
super().__init__(agent)
self.comparison_type = comparison_type
self.value_left = value_left
self.value_right = value_right
self.epsilon = epsilon
# raise errors if no value left or right?
# raise errors if strings compared with > < etc.?
# FIXME handle type mismatches
# TODO less types, use NotCondition
# TODO MOD_EQUAL, MOD_CLOSE
def check(self):
value_left = self.value_left.get_value()
value_right = self.value_right.get_value()
if not self.value_left:
return False
if not value_right:
return False
if self.comparison_type == "GREATER_THAN_EQUAL":
return value_left >= value_right
elif self.comparison_type == "GREATER_THAN":
return value_left > value_right
elif self.comparison_type == "EQUAL":
return value_left == value_right
elif self.comparison_type == "NOT_EQUAL":
return value_left != value_right
elif self.comparison_type == "LESS_THAN":
return value_left < value_right
elif self.comparison_type == "CLOSE_TO":
return abs(value_left - value_right) <= self.epsilon
else:
# self.comparison_type == "LESS_THAN_EQUAL":
return value_left <= value_right
| craftassist-master | python/base_agent/condition.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import copy
import json
import logging
import os
import re
import spacy
from typing import Tuple, Dict, Optional
from glob import glob
import sentry_sdk
import preprocess
from base_agent.memory_nodes import ProgramNode
from base_agent.dialogue_manager import DialogueManager
from base_agent.dialogue_objects import (
BotCapabilities,
BotGreet,
DialogueObject,
Say,
coref_resolve,
process_spans,
)
from mcevent import sio
from post_process_logical_form import post_process_logical_form
###TODO wrap these, clean up
# For QA model
dirname = os.path.dirname(__file__)
web_app_filename = os.path.join(dirname, "../craftassist/webapp_data.json")
from base_util import hash_user
sp = spacy.load("en_core_web_sm")
class NSPDialogueManager(DialogueManager):
def __init__(self, agent, dialogue_object_classes, opts):
super(NSPDialogueManager, self).__init__(agent, None)
# "dialogue_object_classes" should be a dict with keys
# interpeter, get_memory, and put_memory;
# the values are the corresponding classes
self.dialogue_objects = dialogue_object_classes
self.QA_model = None
# the following are still scripted and are handled directly from here
self.botCapabilityQuery = [
"what can you do",
"what else can you do",
"what do you know",
"tell me what you can do",
"what things can you do",
"what are your capabilities",
"show me what you can do",
"what are you capable of",
"help me",
"help",
"do something",
]
safety_words_path = opts.ground_truth_data_dir + "safety.txt"
if os.path.isfile(safety_words_path):
self.safety_words = self.get_safety_words(safety_words_path)
else:
self.safety_words = []
# Load bot greetings
greetings_path = opts.ground_truth_data_dir + "greetings.json"
if os.path.isfile(greetings_path):
self.botGreetings = json.load(open(greetings_path))
else:
self.botGreetings = {"hello": ["hi", "hello", "hey"], "goodbye": ["bye"]}
logging.info("using QA_model_path={}".format(opts.QA_nsp_model_path))
logging.info("using model_dir={}".format(opts.nsp_model_dir))
# Instantiate the QA model
if opts.QA_nsp_model_path:
from ttad.ttad_model.ttad_model_wrapper import ActionDictBuilder
self.QA_model = ActionDictBuilder(
opts.QA_nsp_model_path,
embeddings_path=opts.nsp_embeddings_path,
action_tree_path=opts.nsp_grammar_path,
)
# Instantiate the main model
if opts.nsp_data_dir is not None:
from ttad.ttad_transformer_model.query_model import TTADBertModel as Model
self.model = Model(model_dir=opts.nsp_model_dir, data_dir=opts.nsp_data_dir)
self.debug_mode = False
# if web_app option is enabled
self.webapp_dict = {}
self.web_app = opts.web_app
if self.web_app:
logging.info("web_app flag has been enabled")
logging.info("writing to file: %r " % (web_app_filename))
# os.system("python ./python/craftassist/web_app_socket.py &")
# ground_truth_data is the ground truth action dict from templated
# generations and will be queried first if checked in.
self.ground_truth_actions = {}
if not opts.no_ground_truth:
if os.path.isdir(opts.ground_truth_data_dir):
files = glob(opts.ground_truth_data_dir + "datasets/*.txt")
for dataset in files:
with open(dataset) as f:
for line in f.readlines():
text, logical_form = line.strip().split("|")
clean_text = text.strip('"')
self.ground_truth_actions[clean_text] = json.loads(logical_form)
self.dialogue_object_parameters = {
"agent": self.agent,
"memory": self.agent.memory,
"dialogue_stack": self.dialogue_stack,
}
@sio.on("queryParser")
def query_parser(sid, data):
logging.info("inside query parser.....")
logging.info(data)
x = self.get_logical_form(s=data["chat"], model=self.model)
logging.info(x)
payload = {"action_dict": x}
sio.emit("render_parser_output", payload)
def add_to_dict(self, chat_message, action_dict): # , text):
print("adding %r dict for message : %r" % (action_dict, chat_message))
self.webapp_dict[chat_message] = {"action_dict": action_dict} # , "text": text}
with open(web_app_filename, "w") as f:
json.dump(self.webapp_dict, f)
def maybe_get_dialogue_obj(self, chat: Tuple[str, str]) -> Optional[DialogueObject]:
"""Process a chat and maybe modify the dialogue stack"""
if len(self.dialogue_stack) > 0 and self.dialogue_stack[-1].awaiting_response:
return None
# chat is a single line command
speaker, chatstr = chat
preprocessed_chatstrs = preprocess.preprocess_chat(chatstr)
# Push appropriate DialogueObjects to stack if incomign chat
# is one of the scripted ones
if any([chat in self.botCapabilityQuery for chat in preprocessed_chatstrs]):
return BotCapabilities(**self.dialogue_object_parameters)
for greeting_type in self.botGreetings:
if any([chat in self.botGreetings[greeting_type] for chat in preprocessed_chatstrs]):
return BotGreet(greeting_type, **self.dialogue_object_parameters)
# NOTE: preprocessing in model code is different, this shouldn't break anything
logical_form = self.get_logical_form(s=preprocessed_chatstrs[0], model=self.model)
return self.handle_logical_form(speaker, logical_form, preprocessed_chatstrs[0])
def handle_logical_form(self, speaker: str, d: Dict, chatstr: str) -> Optional[DialogueObject]:
"""Return the appropriate DialogueObject to handle an action dict "d"
"d" should have spans resolved by corefs not yet resolved to a specific
MemoryObject
"""
coref_resolve(self.agent.memory, d, chatstr)
logging.info('logical form post-coref "{}" -> {}'.format(hash_user(speaker), d))
ProgramNode.create(self.agent.memory, d)
if d["dialogue_type"] == "NOOP":
return Say("I don't know how to answer that.", **self.dialogue_object_parameters)
elif d["dialogue_type"] == "HUMAN_GIVE_COMMAND":
return self.dialogue_objects["interpreter"](
speaker, d, **self.dialogue_object_parameters
)
elif d["dialogue_type"] == "PUT_MEMORY":
return self.dialogue_objects["put_memory"](
speaker, d, **self.dialogue_object_parameters
)
elif d["dialogue_type"] == "GET_MEMORY":
logging.info("this model out: %r" % (d))
logging.info("querying QA model now")
if self.QA_model:
QA_model_d = self.get_logical_form(
s=chatstr, model=self.QA_model, chat_as_list=True
)
logging.info("QA model out: %r" % (QA_model_d))
if (
QA_model_d["dialogue_type"] != "GET_MEMORY"
): # this happens sometimes when new model sayas its an Answer action but previous says noop
return Say(
"I don't know how to answer that.", **self.dialogue_object_parameters
)
return self.dialogue_objects["get_memory"](
speaker, QA_model_d, **self.dialogue_object_parameters
)
else:
return self.dialogue_objects["get_memory"](
speaker, d, **self.dialogue_object_parameters
)
else:
raise ValueError("Bad dialogue_type={}".format(d["dialogue_type"]))
def get_logical_form(self, s: str, model, chat_as_list=False) -> Dict:
"""Query model to get the logical form"""
if s in self.ground_truth_actions:
d = self.ground_truth_actions[s]
logging.info('Found gt action for "{}"'.format(s))
else:
logging.info("Querying the semantic parsing model")
if chat_as_list:
d = model.parse([s])
else:
d = model.parse(chat=s) # self.ttad_model.parse(chat=s)
# perform lemmatization on the chat
logging.info('chat before lemmatization "{}"'.format(s))
lemmatized_chat = sp(s)
chat = " ".join(str(word.lemma_) for word in lemmatized_chat)
logging.info('chat after lemmatization "{}"'.format(chat))
# Get the words from indices in spans
process_spans(d, re.split(r" +", s), re.split(r" +", chat))
logging.info('ttad pre-coref "{}" -> {}'.format(chat, d))
# web app
if self.web_app:
# get adtt output
# t = ""
# try:
# t = adtt.adtt(d)
# except:
# t = ""
self.add_to_dict(chat_message=s, action_dict=d)
# log to sentry
sentry_sdk.capture_message(
json.dumps({"type": "ttad_pre_coref", "in_original": s, "out": d})
)
sentry_sdk.capture_message(
json.dumps({"type": "ttad_pre_coref", "in_lemmatized": chat, "out": d})
)
logging.info('logical form before grammar update "{}'.format(d))
d = post_process_logical_form(copy.deepcopy(d))
logging.info('logical form after grammar fix "{}"'.format(d))
return d
| craftassist-master | python/base_agent/nsp_dialogue_manager.py |
import uuid
import ast
from typing import Optional, List, Dict, cast
from base_util import XYZ, POINT_AT_TARGET, to_player_struct
from task import Task
class MemoryNode:
TABLE_COLUMNS = ["uuid"]
PROPERTIES_BLACKLIST = ["agent_memory", "forgetme"]
NODE_TYPE: Optional[str] = None
@classmethod
def new(cls, agent_memory, snapshot=False) -> str:
memid = uuid.uuid4().hex
t = agent_memory.get_time()
agent_memory._db_write(
"INSERT INTO Memories VALUES (?,?,?,?,?,?)", memid, cls.NODE_TYPE, t, t, t, snapshot
)
return memid
def __init__(self, agent_memory, memid: str):
self.agent_memory = agent_memory
self.memid = memid
def get_tags(self) -> List[str]:
return self.agent_memory.get_tags_by_memid(self.memid)
def get_properties(self) -> Dict[str, str]:
blacklist = self.PROPERTIES_BLACKLIST + self._more_properties_blacklist()
return {k: v for k, v in self.__dict__.items() if k not in blacklist}
def update_recently_attended(self) -> None:
self.agent_memory.set_memory_attended_time(self.memid)
self.snapshot(self.agent_memory)
def _more_properties_blacklist(self) -> List[str]:
"""Override in subclasses to add additional keys to the properties blacklist"""
return []
def snapshot(self, agent_memory):
"""Override in subclasses if necessary to properly snapshot."""
read_cmd = "SELECT "
for r in self.TABLE_COLUMNS:
read_cmd += r + ", "
read_cmd = read_cmd.strip(", ")
read_cmd += " FROM " + self.TABLE + " WHERE uuid=?"
data = agent_memory._db_read_one(read_cmd, self.memid)
if not data:
raise ("tried to snapshot nonexistent memory")
archive_memid = self.new(agent_memory, snapshot=True)
new_data = list(data)
new_data[0] = archive_memid
if hasattr(self, "ARCHIVE_TABLE"):
archive_table = self.ARCHIVE_TABLE
else:
archive_table = self.TABLE
write_cmd = "INSERT INTO " + archive_table + "("
qs = ""
for r in self.TABLE_COLUMNS:
write_cmd += r + ", "
qs += "?, "
write_cmd = write_cmd.strip(", ")
write_cmd += ") VALUES (" + qs.strip(", ") + ")"
agent_memory._db_write(write_cmd, *new_data)
link_archive_to_mem(agent_memory, self.memid, archive_memid)
def link_archive_to_mem(agent_memory, memid, archive_memid):
agent_memory.add_triple(subj=archive_memid, pred_text="_archive_of", obj=memid)
agent_memory.add_triple(subj=memid, pred_text="_has_archive", obj=archive_memid)
class ProgramNode(MemoryNode):
"""represents logical forms (outputs from the semantic parser)"""
TABLE_COLUMNS = ["uuid", "logical_form"]
TABLE = "Programs"
NODE_TYPE = "Program"
def __init__(self, agent_memory, memid: str):
super().__init__(agent_memory, memid)
text = self.agent_memory._db_read_one(
"SELECT logical_form FROM Programs WHERE uuid=?", self.memid
)
self.logical_form = ast.literal_eval(text)
@classmethod
def create(cls, memory, logical_form, snapshot=False) -> str:
memid = cls.new(memory, snapshot=snapshot)
memory._db_write(
"INSERT INTO Programs(uuid, logical_form) VALUES (?,?)", memid, format(logical_form)
)
return memid
class NamedAbstractionNode(MemoryNode):
"""a abstract concept with a name, to be used in triples"""
TABLE_COLUMNS = ["uuid", "name"]
TABLE = "NamedAbstractions"
NODE_TYPE = "NamedAbstraction"
def __init__(self, agent_memory, memid: str):
super().__init__(agent_memory, memid)
name = self.agent_memory._db_read_one(
"SELECT name FROM NamedAbstractions WHERE uuid=?", self.memid
)
self.name = name
@classmethod
def create(cls, memory, name, snapshot=False) -> str:
memid = memory._db_read_one("SELECT uuid FROM NamedAbstractions WHERE name=?", name)
if memid:
return memid[0]
memid = cls.new(memory, snapshot=snapshot)
memory._db_write("INSERT INTO NamedAbstractions(uuid, name) VALUES (?,?)", memid, name)
return memid
# the table entry just has the memid and a modification time,
# actual set elements are handled as triples
class SetNode(MemoryNode):
""" for representing sets of objects, so that it is easier to build complex relations
using RDF/triplestore format. is currently fetal- not used in main codebase yet """
TABLE_COLUMNS = ["uuid"]
TABLE = "SetMems"
NODE_TYPE = "Set"
def __init__(self, agent_memory, memid: str):
super().__init__(agent_memory, memid)
# FIXME put the member triples
@classmethod
def create(cls, memory, snapshot=False) -> str:
memid = cls.new(memory, snapshot=snapshot)
memory._db_write("INSERT INTO SetMems(uuid) VALUES (?)", memid, memory.get_time())
return memid
def get_members(self):
return self.agent_memory.get_triples(pred_text="set_member_", obj=self.memid)
def snapshot(self, agent_memory):
return SetNode.create(agent_memory, snapshot=True)
class ReferenceObjectNode(MemoryNode):
""" generic memory node for anything that has a spatial location and can be
used a spatial reference (e.g. to the left of the x)."""
TABLE = "ReferenceObjects"
NODE_TYPE = "ReferenceObject"
ARCHIVE_TABLE = "ArchivedReferenceObjects"
def get_pos(self) -> XYZ:
raise NotImplementedError("must be implemented in subclass")
def get_point_at_target(self) -> POINT_AT_TARGET:
raise NotImplementedError("must be implemented in subclass")
def get_bounds(self):
raise NotImplementedError("must be implemented in subclass")
class PlayerNode(ReferenceObjectNode):
""" represents humans and other agents that can affect the world """
TABLE_COLUMNS = ["uuid", "eid", "name", "x", "y", "z", "pitch", "yaw", "ref_type"]
NODE_TYPE = "Player"
def __init__(self, agent_memory, memid: str):
super().__init__(agent_memory, memid)
eid, name, x, y, z, pitch, yaw = self.agent_memory._db_read_one(
"SELECT eid, name, x, y, z, pitch, yaw FROM ReferenceObjects WHERE uuid=?", self.memid
)
self.eid = eid
self.name = name
self.pos = (x, y, z)
self.pitch = pitch
self.yaw = yaw
@classmethod
def create(cls, memory, player_struct) -> str:
memid = cls.new(memory)
memory._db_write(
"INSERT INTO ReferenceObjects(uuid, eid, name, x, y, z, pitch, yaw, ref_type) VALUES (?,?,?,?,?,?,?,?,?)",
memid,
player_struct.entityId,
player_struct.name,
player_struct.pos.x,
player_struct.pos.y,
player_struct.pos.z,
player_struct.look.pitch,
player_struct.look.yaw,
"player",
)
memory.tag(memid, "_player")
memory.tag(memid, "_physical_object")
memory.tag(memid, "_animate")
# this is a hack until memory_filters does "not"
memory.tag(memid, "_not_location")
return memid
@classmethod
def update(cls, memory, p, memid) -> str:
cmd = "UPDATE ReferenceObjects SET eid=?, name=?, x=?, y=?, z=?, pitch=?, yaw=? WHERE "
cmd = cmd + "uuid=?"
memory._db_write(
cmd, p.entityId, p.name, p.pos.x, p.pos.y, p.pos.z, p.look.pitch, p.look.yaw, memid
)
return memid
def get_pos(self) -> XYZ:
x, y, z = self.agent_memory._db_read_one(
"SELECT x, y, z FROM ReferenceObjects WHERE uuid=?", self.memid
)
self.pos = (x, y, z)
return self.pos
# TODO: use a smarter way to get point_at_target
def get_point_at_target(self) -> POINT_AT_TARGET:
x, y, z = self.pos
# use the block above the player as point_at_target
return cast(POINT_AT_TARGET, (x, y + 1, z, x, y + 1, z))
def get_bounds(self):
x, y, z = self.pos
return x, x, y, y, z, z
def get_struct(self):
return to_player_struct(self.pos, self.yaw, self.pitch, self.eid, self.name)
class SelfNode(PlayerNode):
"""special PLayerNode for representing the agent's self"""
TABLE_COLUMNS = ["uuid", "eid", "name", "x", "y", "z", "pitch", "yaw", "ref_type"]
NODE_TYPE = "Self"
# locations should always be archives?
class LocationNode(ReferenceObjectNode):
"""ReferenceObjectNode representing a raw location (a point in space) """
TABLE_COLUMNS = ["uuid", "x", "y", "z", "ref_type"]
NODE_TYPE = "Location"
def __init__(self, agent_memory, memid: str):
super().__init__(agent_memory, memid)
x, y, z = self.agent_memory._db_read_one(
"SELECT x, y, z FROM ReferenceObjects WHERE uuid=?", self.memid
)
self.location = (x, y, z)
self.pos = (x, y, z)
@classmethod
def create(cls, memory, xyz: XYZ) -> str:
memid = cls.new(memory)
memory._db_write(
"INSERT INTO ReferenceObjects(uuid, x, y, z, ref_type) VALUES (?, ?, ?, ?, ?)",
memid,
xyz[0],
xyz[1],
xyz[2],
"location",
)
return memid
def get_bounds(self):
x, y, z = self.pos
return x, x, y, y, z, z
def get_pos(self) -> XYZ:
return self.pos
def get_point_at_target(self) -> POINT_AT_TARGET:
x, y, z = self.pos
return cast(POINT_AT_TARGET, (x, y, z, x, y, z))
# locations should always be archives?
class AttentionNode(LocationNode):
"""ReferenceObjectNode representing spatial attention"""
TABLE_COLUMNS = ["uuid", "x", "y", "z", "type_name", "ref_type"]
NODE_TYPE = "Attention"
def __init__(self, agent_memory, memid: str):
super().__init__(agent_memory, memid)
# we use the type_name field to store whose attention it is
attender = self.agent_memory._db_read_one(
"SELECT type_name FROM ReferenceObjects WHERE uuid=?", self.memid
)
self.attender = attender
@classmethod
def create(cls, memory, xyz: XYZ, attender=None) -> str:
memid = cls.new(memory)
memory._db_write(
"INSERT INTO ReferenceObjects(uuid, x, y, z, type_name, ref_type) VALUES (?, ?, ?, ?, ?, ?)",
memid,
xyz[0],
xyz[1],
xyz[2],
attender,
"attention",
)
return memid
class TimeNode(MemoryNode):
"""represents a temporal 'location' """
TABLE_COLUMNS = ["uuid", "time"]
TABLE = "Times"
NODE_TYPE = "Time"
def __init__(self, agent_memory, memid: str):
super().__init__(agent_memory, memid)
t = self.agent_memory._db_read_one("SELECT time FROM Times WHERE uuid=?", self.memid)
self.time = t
@classmethod
def create(cls, memory, time: int) -> str:
memid = cls.new(memory)
memory._db_write("INSERT INTO Times(uuid, time) VALUES (?, ?)", memid, time)
return memid
class ChatNode(MemoryNode):
"""represents a chat/utterance from another agent/human """
TABLE_COLUMNS = ["uuid", "speaker", "chat", "time"]
TABLE = "Chats"
NODE_TYPE = "Time"
def __init__(self, agent_memory, memid: str):
super().__init__(agent_memory, memid)
speaker, chat_text, time = self.agent_memory._db_read_one(
"SELECT speaker, chat, time FROM Chats WHERE uuid=?", self.memid
)
self.speaker_id = speaker
self.chat_text = chat_text
self.time = time
@classmethod
def create(cls, memory, speaker: str, chat: str) -> str:
memid = cls.new(memory)
memory._db_write(
"INSERT INTO Chats(uuid, speaker, chat, time) VALUES (?, ?, ?, ?)",
memid,
speaker,
chat,
memory.get_time(),
)
return memid
class TaskNode(MemoryNode):
""" represents a task object that was placed on the agent's task_stack """
TABLE_COLUMNS = ["uuid", "action_name", "pickled", "paused", "created_at", "finished_at"]
TABLE = "Tasks"
NODE_TYPE = "Task"
def __init__(self, agent_memory, memid: str):
super().__init__(agent_memory, memid)
pickled, created_at, finished_at, action_name = self.agent_memory._db_read_one(
"SELECT pickled, created_at, finished_at, action_name FROM Tasks WHERE uuid=?", memid
)
self.task = self.agent_memory.safe_unpickle(pickled)
self.created_at = created_at
self.finished_at = finished_at
self.action_name = action_name
@classmethod
def create(cls, memory, task: Task) -> str:
memid = cls.new(memory)
task.memid = memid # FIXME: this shouldn't be necessary, merge Task and TaskNode?
memory._db_write(
"INSERT INTO Tasks (uuid, action_name, pickled, created_at) VALUES (?,?,?,?)",
memid,
task.__class__.__name__,
memory.safe_pickle(task),
memory.get_time(),
)
return memid
def get_chat(self) -> Optional[ChatNode]:
"""Return the memory of the chat that caused this task's creation, or None"""
triples = self.agent_memory.get_triples(pred_text="chat_effect_", obj=self.memid)
if triples:
chat_id, _, _ = triples[0]
return ChatNode(self.agent_memory, chat_id)
else:
return None
def get_parent_task(self) -> Optional["TaskNode"]:
"""Return the 'TaskNode' of the parent task, or None"""
triples = self.agent_memory.get_triples(subj=self.memid, pred_text="_has_parent_task")
if len(triples) == 0:
return None
elif len(triples) == 1:
_, _, parent_memid = triples[0]
return TaskNode(self.agent_memory, parent_memid)
else:
raise AssertionError("Task {} has multiple parents: {}".format(self.memid, triples))
def get_root_task(self) -> Optional["TaskNode"]:
mem = self
parent = self.get_parent_task()
while parent is not None:
mem = parent
parent = mem.get_parent_task()
return mem
def get_child_tasks(self) -> List["TaskNode"]:
"""Return tasks that were spawned beause of this task"""
r = self.agent_memory.get_triples(pred_text="_has_parent_task", obj=self.memid)
memids = [m for m, _, _ in r]
return [TaskNode(self.agent_memory, m) for m in memids]
def all_descendent_tasks(self, include_root=False) -> List["TaskNode"]:
"""Return a list of 'TaskNode' objects whose _has_parent_task root is this task
If include_root is True, include this node in the list.
Tasks are returned in the order they were finished.
"""
descendents = []
q = [self]
while q:
task = q.pop()
children = task.get_child_tasks()
descendents.extend(children)
q.extend(children)
if include_root:
descendents.append(self)
return sorted(descendents, key=lambda t: t.finished_at)
def __repr__(self):
return "<TaskNode: {}>".format(self.task)
# list of nodes to register in memory
NODELIST = [
TaskNode,
ChatNode,
LocationNode,
AttentionNode,
SetNode,
TimeNode,
PlayerNode,
SelfNode,
ProgramNode,
NamedAbstractionNode,
]
| craftassist-master | python/base_agent/memory_nodes.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
The current control flow of dialogue is:
1. A chat comes in and Dialogue manager reads it or the bot triggers a
dialogue because of memory/perception/task state
2. The dialogue manager puts a DialogueObject on the DialogueStack.
3. The DialogueStack calls .step() which in turn calls the DialogueObject.step()
that performs some action as implemented in the step method. The step could
also possibly interact with the agent's memory. And finally the step() makes
a call and decides if the DialogueObject is finished.
- The step() returns a string: maybe_chat, a dict: maybe_data.
- The step()'s outputs are read by the manager which can decide to put another
DialogueObject on the stack.
The maybe_data from the output of the dialogue object's step() can
contain a 'push' key; this overrides the manager's decision on what to push to
the stack.
Control flow for interpreter and clarification:
The interpreter is also a kind of DialogueObject, and a clarification step is
the interpreter returning control to the DialogueManager, which pushes a
ConfirmTask or ConfirmReferenceObject as a DialogueObject onto the DialogueStack.
The manager takes as an input: the agent and the model used for manager.
It creates a DialogueStack.
agent_mem, dialogue_stack, dialogue_object_data, where
dialogue_object_data is for explicit commands to force the manager to
return a specific Dialogue object to put on the stack.
"""
import logging
from typing import Tuple, Optional
from dialogue_stack import DialogueStack
from .dialogue_objects import DialogueObject, Say
class DialogueManager(object):
def __init__(self, agent, model):
self.agent = agent
self.dialogue_stack = DialogueStack(agent, agent.memory)
self.model = model
def get_safety_words(self, safety_words_path):
"""Read a list of safety words to prevent abuse."""
with open(safety_words_path) as f:
safety_lines = f.readlines()
safety_words = []
for l in safety_lines:
w = l.strip("\n").lower()
if w != "" and w[0] != "<" and w[0] != "#":
safety_words.append(w)
return safety_words
def is_safe(self, string):
safety_set = set(self.safety_words)
cmd_set = set(string.lower().split())
notsafe = len(cmd_set & safety_set) > 0
return not notsafe
# the dialogue manager model should access the task stack and chat history
# through the agent's memory, adding the most recent chat here as convenience
# maybe add a get_new_chat to memory and don't pass?
# chat is a (speaker, str) tuple
def step(self, chat: Tuple[str, str]):
# check safety
if not self.is_safe(chat[1]):
self.dialogue_stack.append_new(Say, "Please don't be rude.")
return
if chat[1]:
logging.info("Dialogue stack pre-run_model: {}".format(self.dialogue_stack.stack))
# NOTE: the model is responsible for not putting a new
# object on the stack if it sees that whatever is on
# the stack should continue.
# TODO: Maybe we need a HoldOn dialogue object?
obj = self.maybe_get_dialogue_obj(chat)
if obj is not None:
self.dialogue_stack.append(obj)
# Always call dialogue_stack.step(), even if chat is empty
if len(self.dialogue_stack) > 0:
self.dialogue_stack.step()
def maybe_get_dialogue_obj(self, chat: Tuple[str, str]) -> Optional[DialogueObject]:
raise NotImplementedError("Must implement maybe_get_dialogue_object in subclass")
| craftassist-master | python/base_agent/dialogue_manager.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.