python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| pysc2-master | pysc2/env/converter/cc/game_data/python/__init__.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This replaces google's gfile used for network storage.
A more complete public version of gfile:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/platform/gfile.py
"""
import os
# pylint: disable=invalid-name
Exists = os.path.exists
IsDirectory = os.path.isdir
ListDir = os.listdir
MakeDirs = os.makedirs
Open = open
| pysc2-master | pysc2/lib/gfile.py |
#!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the point library."""
from absl.testing import absltest
from pysc2.lib import point
class FakePoint(object):
def __init__(self):
self.x = 5
self.y = 8
class PointTest(absltest.TestCase):
def testBuild(self):
self.assertEqual(point.Point(5, 8), point.Point.build(FakePoint()))
def testAssignTo(self):
f = FakePoint()
self.assertEqual(5, f.x)
self.assertEqual(8, f.y)
point.Point(1, 2).assign_to(f)
self.assertEqual(1, f.x)
self.assertEqual(2, f.y)
def testDist(self):
a = point.Point(1, 1)
b = point.Point(4, 5)
self.assertEqual(5, a.dist(b))
def testDistSq(self):
a = point.Point(1, 1)
b = point.Point(4, 5)
self.assertEqual(25, a.dist_sq(b))
def testLen(self):
p = point.Point(3, 4)
self.assertEqual(5, p.len())
def testScale(self):
p = point.Point(3, 4)
self.assertAlmostEqual(2, p.scale(2).len())
def testScaleMaxSize(self):
p = point.Point(3, 4)
self.assertEqual(p, p.scale_max_size(p))
self.assertEqual(point.Point(6, 8), p.scale_max_size(point.Point(8, 8)))
self.assertEqual(point.Point(6, 8), p.scale_max_size(point.Point(100, 8)))
self.assertEqual(point.Point(6, 8), p.scale_max_size(point.Point(6, 100)))
def testScaleMinSize(self):
p = point.Point(3, 4)
self.assertEqual(p, p.scale_min_size(p))
self.assertEqual(point.Point(6, 8), p.scale_min_size(point.Point(6, 6)))
self.assertEqual(point.Point(6, 8), p.scale_min_size(point.Point(2, 8)))
self.assertEqual(point.Point(6, 8), p.scale_min_size(point.Point(6, 2)))
def testMinDim(self):
self.assertEqual(5, point.Point(5, 10).min_dim())
def testMaxDim(self):
self.assertEqual(10, point.Point(5, 10).max_dim())
def testTranspose(self):
self.assertEqual(point.Point(4, 3), point.Point(3, 4).transpose())
def testRound(self):
p = point.Point(1.3, 2.6).round()
self.assertEqual(point.Point(1, 3), p)
self.assertIsInstance(p.x, int)
self.assertIsInstance(p.y, int)
def testCeil(self):
p = point.Point(1.3, 2.6).ceil()
self.assertEqual(point.Point(2, 3), p)
self.assertIsInstance(p.x, int)
self.assertIsInstance(p.y, int)
def testFloor(self):
p = point.Point(1.3, 2.6).floor()
self.assertEqual(point.Point(1, 2), p)
self.assertIsInstance(p.x, int)
self.assertIsInstance(p.y, int)
def testRotate(self):
p = point.Point(0, 100)
self.assertEqual(point.Point(-100, 0), p.rotate_deg(90).round())
self.assertEqual(point.Point(100, 0), p.rotate_deg(-90).round())
self.assertEqual(point.Point(0, -100), p.rotate_deg(180).round())
def testContainedCircle(self):
self.assertTrue(point.Point(2, 2).contained_circle(point.Point(1, 1), 2))
self.assertFalse(point.Point(2, 2).contained_circle(point.Point(1, 1), 0.5))
def testBound(self):
tl = point.Point(1, 2)
br = point.Point(3, 4)
self.assertEqual(tl, point.Point(0, 0).bound(tl, br))
self.assertEqual(br, point.Point(10, 10).bound(tl, br))
self.assertEqual(point.Point(1.5, 2), point.Point(1.5, 0).bound(tl, br))
class RectTest(absltest.TestCase):
def testInit(self):
r = point.Rect(1, 2, 3, 4)
self.assertEqual(r.t, 1)
self.assertEqual(r.l, 2)
self.assertEqual(r.b, 3)
self.assertEqual(r.r, 4)
self.assertEqual(r.tl, point.Point(2, 1))
self.assertEqual(r.tr, point.Point(4, 1))
self.assertEqual(r.bl, point.Point(2, 3))
self.assertEqual(r.br, point.Point(4, 3))
def testInitBad(self):
with self.assertRaises(TypeError):
point.Rect(4, 3, 2, 1) # require t <= b, l <= r
with self.assertRaises(TypeError):
point.Rect(1)
with self.assertRaises(TypeError):
point.Rect(1, 2, 3)
with self.assertRaises(TypeError):
point.Rect()
def testInitOnePoint(self):
r = point.Rect(point.Point(1, 2))
self.assertEqual(r.t, 0)
self.assertEqual(r.l, 0)
self.assertEqual(r.b, 2)
self.assertEqual(r.r, 1)
self.assertEqual(r.tl, point.Point(0, 0))
self.assertEqual(r.tr, point.Point(1, 0))
self.assertEqual(r.bl, point.Point(0, 2))
self.assertEqual(r.br, point.Point(1, 2))
self.assertEqual(r.size, point.Point(1, 2))
self.assertEqual(r.center, point.Point(1, 2) / 2)
self.assertEqual(r.area, 2)
def testInitTwoPoints(self):
r = point.Rect(point.Point(1, 2), point.Point(3, 4))
self.assertEqual(r.t, 2)
self.assertEqual(r.l, 1)
self.assertEqual(r.b, 4)
self.assertEqual(r.r, 3)
self.assertEqual(r.tl, point.Point(1, 2))
self.assertEqual(r.tr, point.Point(3, 2))
self.assertEqual(r.bl, point.Point(1, 4))
self.assertEqual(r.br, point.Point(3, 4))
self.assertEqual(r.size, point.Point(2, 2))
self.assertEqual(r.center, point.Point(2, 3))
self.assertEqual(r.area, 4)
def testInitTwoPointsReversed(self):
r = point.Rect(point.Point(3, 4), point.Point(1, 2))
self.assertEqual(r.t, 2)
self.assertEqual(r.l, 1)
self.assertEqual(r.b, 4)
self.assertEqual(r.r, 3)
self.assertEqual(r.tl, point.Point(1, 2))
self.assertEqual(r.tr, point.Point(3, 2))
self.assertEqual(r.bl, point.Point(1, 4))
self.assertEqual(r.br, point.Point(3, 4))
self.assertEqual(r.size, point.Point(2, 2))
self.assertEqual(r.center, point.Point(2, 3))
self.assertEqual(r.area, 4)
def testArea(self):
r = point.Rect(point.Point(1, 1), point.Point(3, 4))
self.assertEqual(r.area, 6)
def testContains(self):
r = point.Rect(point.Point(1, 1), point.Point(3, 3))
self.assertTrue(r.contains_point(point.Point(2, 2)))
self.assertFalse(r.contains_circle(point.Point(2, 2), 5))
self.assertFalse(r.contains_point(point.Point(4, 4)))
self.assertFalse(r.contains_circle(point.Point(4, 4), 5))
def testIntersectsCircle(self):
r = point.Rect(point.Point(1, 1), point.Point(3, 3))
self.assertFalse(r.intersects_circle(point.Point(0, 0), 0.5))
self.assertFalse(r.intersects_circle(point.Point(0, 0), 1))
self.assertTrue(r.intersects_circle(point.Point(0, 0), 1.5))
self.assertTrue(r.intersects_circle(point.Point(0, 0), 2))
if __name__ == '__main__':
absltest.main()
| pysc2-master | pysc2/lib/point_test.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for tracking the number and/or latency of episodes and steps."""
class _EventTimer(object):
"""Example event timer to measure step and observation times."""
def __enter__(self):
pass
def __exit__(self, unused_exception_type, unused_exc_value, unused_traceback):
pass
class Metrics(object):
"""Interface for tracking the number and/or latency of episodes and steps."""
def __init__(self, map_name):
pass
def increment_instance(self):
pass
def increment_episode(self):
pass
def measure_step_time(self, num_steps=1):
"""Return a context manager to measure the time to perform N game steps."""
del num_steps
return _EventTimer()
def measure_observation_time(self):
"""Return a context manager to measure the time to get an observation."""
return _EventTimer()
def close(self):
pass
def __del__(self):
self.close()
| pysc2-master | pysc2/lib/metrics.py |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A memoization decorator."""
def memoize(func):
"""Memoization decorator."""
class Memodict(dict):
"""A memoization decorator dict."""
__slots__ = ()
__name__ = func.__name__
__doc__ = func.__doc__
def __call__(self, *args):
return self[args]
def __missing__(self, args):
ret = self[args] = func(*args)
return ret
return Memodict()
| pysc2-master | pysc2/lib/memoize.py |
#!/usr/bin/python
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for image_differencer.py."""
from absl.testing import absltest
import numpy as np
from pysc2.lib import image_differencer
from pysc2.lib import proto_diff
from s2clientprotocol import common_pb2
from s2clientprotocol import sc2api_pb2 as sc_pb
from s2clientprotocol import spatial_pb2
class ImageDifferencerTest(absltest.TestCase):
def testFilteredOut(self):
result = image_differencer.image_differencer(
path=proto_diff.ProtoPath(("observation", "actions", 1)),
proto_a=None,
proto_b=None)
self.assertIsNone(result)
def testFilteredIn(self):
a = sc_pb.ResponseObservation(
observation=sc_pb.Observation(
feature_layer_data=spatial_pb2.ObservationFeatureLayer(
renders=spatial_pb2.FeatureLayers(
height_map=common_pb2.ImageData(
bits_per_pixel=32,
size=common_pb2.Size2DI(x=4, y=4),
data=np.array([[0, 0, 0, 0],
[1, 0, 1, 0],
[0, 0, 0, 1],
[1, 1, 1, 1]], dtype=np.int32).tobytes()
)
)
)))
b = sc_pb.ResponseObservation(
observation=sc_pb.Observation(
feature_layer_data=spatial_pb2.ObservationFeatureLayer(
renders=spatial_pb2.FeatureLayers(
height_map=common_pb2.ImageData(
bits_per_pixel=32,
size=common_pb2.Size2DI(x=4, y=4),
data=np.array([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 1],
[1, 1, 1, 0]], dtype=np.int32).tobytes()
)
)
)))
result = image_differencer.image_differencer(
path=proto_diff.ProtoPath((
"observation",
"feature_layer_data",
"renders",
"height_map",
"data")),
proto_a=a,
proto_b=b)
self.assertEqual(
result,
"3 element(s) changed - [1][0]: 1 -> 0; [1][1]: 0 -> 1; [3][3]: 1 -> 0")
if __name__ == "__main__":
absltest.main()
| pysc2-master | pysc2/lib/image_differencer_test.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define the static list of units for SC2. Generated by bin/gen_data.py."""
import enum
# pylint: disable=invalid-name
class Neutral(enum.IntEnum):
"""Neutral units."""
BattleStationMineralField = 886
BattleStationMineralField750 = 887
CarrionBird = 322
CleaningBot = 612
CollapsibleRockTower = 609
CollapsibleRockTowerDebris = 490
CollapsibleRockTowerDebrisRampLeft = 518
CollapsibleRockTowerDebrisRampRight = 517
CollapsibleRockTowerDiagonal = 588
CollapsibleRockTowerPushUnit = 561
CollapsibleRockTowerPushUnitRampLeft = 564
CollapsibleRockTowerPushUnitRampRight = 563
CollapsibleRockTowerRampLeft = 664
CollapsibleRockTowerRampRight = 663
CollapsibleTerranTower = 610
CollapsibleTerranTowerDebris = 485
CollapsibleTerranTowerDiagonal = 589
CollapsibleTerranTowerPushUnit = 562
CollapsibleTerranTowerPushUnitRampLeft = 559
CollapsibleTerranTowerPushUnitRampRight = 560
CollapsibleTerranTowerRampLeft = 590
CollapsibleTerranTowerRampRight = 591
Crabeetle = 662
Debris2x2NonConjoined = 475
DebrisRampLeft = 486
DebrisRampRight = 487
DestructibleBillboardTall = 350
DestructibleCityDebris4x4 = 628
DestructibleCityDebris6x6 = 629
DestructibleCityDebrisHugeDiagonalBLUR = 630
DestructibleDebris4x4 = 364
DestructibleDebris6x6 = 365
DestructibleDebrisRampDiagonalHugeBLUR = 377
DestructibleDebrisRampDiagonalHugeULBR = 376
DestructibleIce4x4 = 648
DestructibleIce6x6 = 649
DestructibleIceDiagonalHugeBLUR = 651
DestructibleRampDiagonalHugeBLUR = 373
DestructibleRampDiagonalHugeULBR = 372
DestructibleRock6x6 = 371
DestructibleRockEx14x4 = 638
DestructibleRockEx16x6 = 639
DestructibleRockEx1DiagonalHugeBLUR = 641
DestructibleRockEx1DiagonalHugeULBR = 640
DestructibleRockEx1HorizontalHuge = 643
DestructibleRockEx1VerticalHuge = 642
Dog = 336
InhibitorZoneMedium = 1958
InhibitorZoneSmall = 1957
KarakFemale = 324
LabBot = 661
LabMineralField = 665
LabMineralField750 = 666
Lyote = 321
MineralField = 341
MineralField450 = 1961
MineralField750 = 483
ProtossVespeneGeyser = 608
PurifierMineralField = 884
PurifierMineralField750 = 885
PurifierRichMineralField = 796
PurifierRichMineralField750 = 797
PurifierVespeneGeyser = 880
ReptileCrate = 877
RichMineralField = 146
RichMineralField750 = 147
RichVespeneGeyser = 344
Scantipede = 335
ShakurasVespeneGeyser = 881
SpacePlatformGeyser = 343
UnbuildableBricksDestructible = 473
UnbuildablePlatesDestructible = 474
UnbuildableRocksDestructible = 472
UtilityBot = 330
VespeneGeyser = 342
XelNagaDestructibleBlocker8NE = 1904
XelNagaDestructibleBlocker8SW = 1908
XelNagaTower = 149
class Protoss(enum.IntEnum):
"""Protoss units."""
Adept = 311
AdeptPhaseShift = 801
Archon = 141
Assimilator = 61
AssimilatorRich = 1955
Carrier = 79
Colossus = 4
CyberneticsCore = 72
DarkShrine = 69
DarkTemplar = 76
Disruptor = 694
DisruptorPhased = 733
FleetBeacon = 64
ForceField = 135
Forge = 63
Gateway = 62
HighTemplar = 75
Immortal = 83
Interceptor = 85
Mothership = 10
MothershipCore = 488
Nexus = 59
Observer = 82
ObserverSurveillanceMode = 1911
Oracle = 495
Phoenix = 78
PhotonCannon = 66
Probe = 84
Pylon = 60
PylonOvercharged = 894
RoboticsBay = 70
RoboticsFacility = 71
Sentry = 77
ShieldBattery = 1910
Stalker = 74
Stargate = 67
StasisTrap = 732
Tempest = 496
TemplarArchive = 68
TwilightCouncil = 65
VoidRay = 80
WarpGate = 133
WarpPrism = 81
WarpPrismPhasing = 136
Zealot = 73
class Terran(enum.IntEnum):
"""Terran units."""
Armory = 29
AutoTurret = 31
Banshee = 55
Barracks = 21
BarracksFlying = 46
BarracksReactor = 38
BarracksTechLab = 37
Battlecruiser = 57
Bunker = 24
CommandCenter = 18
CommandCenterFlying = 36
Cyclone = 692
EngineeringBay = 22
Factory = 27
FactoryFlying = 43
FactoryReactor = 40
FactoryTechLab = 39
FusionCore = 30
Ghost = 50
GhostAcademy = 26
GhostAlternate = 144
GhostNova = 145
Hellion = 53
Hellbat = 484
KD8Charge = 830
Liberator = 689
LiberatorAG = 734
MULE = 268
Marauder = 51
Marine = 48
Medivac = 54
MissileTurret = 23
Nuke = 58
OrbitalCommand = 132
OrbitalCommandFlying = 134
PlanetaryFortress = 130
PointDefenseDrone = 11
Raven = 56
Reactor = 6
Reaper = 49
Refinery = 20
RefineryRich = 1960
RepairDrone = 1913
SCV = 45
SensorTower = 25
SiegeTank = 33
SiegeTankSieged = 32
Starport = 28
StarportFlying = 44
StarportReactor = 42
StarportTechLab = 41
SupplyDepot = 19
SupplyDepotLowered = 47
TechLab = 5
Thor = 52
ThorHighImpactMode = 691
VikingAssault = 34
VikingFighter = 35
WidowMine = 498
WidowMineBurrowed = 500
class Zerg(enum.IntEnum):
"""Zerg units."""
Baneling = 9
BanelingBurrowed = 115
BanelingCocoon = 8
BanelingNest = 96
BroodLord = 114
BroodLordCocoon = 113
Broodling = 289
BroodlingEscort = 143
Changeling = 12
ChangelingMarine = 15
ChangelingMarineShield = 14
ChangelingZealot = 13
ChangelingZergling = 17
ChangelingZerglingWings = 16
Cocoon = 103
Corruptor = 112
CreepTumor = 87
CreepTumorBurrowed = 137
CreepTumorQueen = 138
Drone = 104
DroneBurrowed = 116
EvolutionChamber = 90
Extractor = 88
ExtractorRich = 1956
GreaterSpire = 102
Hatchery = 86
Hive = 101
Hydralisk = 107
HydraliskBurrowed = 117
HydraliskDen = 91
InfestationPit = 94
InfestedTerran = 7
InfestedTerranBurrowed = 120
InfestedTerranCocoon = 150
Infestor = 111
InfestorBurrowed = 127
Lair = 100
Larva = 151
Locust = 489
LocustFlying = 693
Lurker = 502
LurkerBurrowed = 503
LurkerDen = 504
LurkerCocoon = 501
Mutalisk = 108
NydusCanal = 142
NydusNetwork = 95
Overlord = 106
OverlordTransport = 893
OverlordTransportCocoon = 892
Overseer = 129
OverseerCocoon = 128
OverseerOversightMode = 1912
ParasiticBombDummy = 824
Queen = 126
QueenBurrowed = 125
Ravager = 688
RavagerBurrowed = 690
RavagerCocoon = 687
Roach = 110
RoachBurrowed = 118
RoachWarren = 97
SpawningPool = 89
SpineCrawler = 98
SpineCrawlerUprooted = 139
Spire = 92
SporeCrawler = 99
SporeCrawlerUprooted = 140
SwarmHost = 494
SwarmHostBurrowed = 493
Ultralisk = 109
UltraliskBurrowed = 131
UltraliskCavern = 93
Viper = 499
Zergling = 105
ZerglingBurrowed = 119
def get_unit_type(unit_id):
for race in (Neutral, Protoss, Terran, Zerg):
try:
return race(unit_id)
except ValueError:
pass # Wrong race.
| pysc2-master | pysc2/lib/units.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language gwritererning permissions and
# limitations under the License.
"""Write a video based on a numpy array."""
from skvideo import io
class VideoWriter(io.FFmpegWriter):
"""Write a video based on a numpy array.
Subclass/wrap FFmpegWriter to make it easy to switch to a different library.
"""
def __init__(self, filename, frame_rate):
super(VideoWriter, self).__init__(
filename, outputdict={"-r": str(frame_rate)})
def add(self, frame):
"""Add a frame to the video based on a numpy array."""
self.writeFrame(frame)
def __del__(self):
self.close()
| pysc2-master | pysc2/lib/video_writer.py |
#!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for stopwatch."""
import os
from absl.testing import absltest
import mock
from pysc2.lib import stopwatch
def ham_dist(str1, str2):
"""Hamming distance. Count the number of differences between str1 and str2."""
assert len(str1) == len(str2)
return sum(c1 != c2 for c1, c2 in zip(str1, str2))
class StatTest(absltest.TestCase):
def testRange(self):
stat = stopwatch.Stat()
stat.add(1)
stat.add(5)
stat.add(3)
self.assertEqual(stat.num, 3)
self.assertEqual(stat.sum, 9)
self.assertEqual(stat.min, 1)
self.assertEqual(stat.max, 5)
self.assertEqual(stat.avg, 3)
def testParse(self):
stat = stopwatch.Stat()
stat.add(1)
stat.add(3)
out = str(stat)
self.assertEqual(out, "sum: 4.0000, avg: 2.0000, dev: 1.0000, "
"min: 1.0000, max: 3.0000, num: 2")
# Allow a few small rounding errors
self.assertLess(ham_dist(out, str(stopwatch.Stat.parse(out))), 5)
class StopwatchTest(absltest.TestCase):
@mock.patch("time.time")
def testStopwatch(self, mock_time):
mock_time.return_value = 0
sw = stopwatch.StopWatch()
with sw("one"):
mock_time.return_value += 0.002
with sw("one"):
mock_time.return_value += 0.004
with sw("two"):
with sw("three"):
mock_time.return_value += 0.006
@sw.decorate
def four():
mock_time.return_value += 0.004
four()
@sw.decorate("five")
def foo():
mock_time.return_value += 0.005
foo()
out = str(sw)
# The names should be in sorted order.
names = [l.split(None)[0] for l in out.splitlines()[1:]]
self.assertEqual(names, ["five", "four", "one", "two", "two.three"])
one_line = out.splitlines()[3].split(None)
self.assertLess(one_line[5], one_line[6]) # min < max
self.assertEqual(one_line[7], "2") # num
# Can't test the rest since they'll be flaky.
# Allow a few small rounding errors for the round trip.
round_trip = str(stopwatch.StopWatch.parse(out))
self.assertLess(ham_dist(out, round_trip), 15,
"%s != %s" % (out, round_trip))
def testDivideZero(self):
sw = stopwatch.StopWatch()
with sw("zero"):
pass
# Just make sure this doesn't have a divide by 0 for when the total is 0.
self.assertIn("zero", str(sw))
@mock.patch.dict(os.environ, {"SC2_NO_STOPWATCH": "1"})
def testDecoratorDisabled(self):
sw = stopwatch.StopWatch()
self.assertEqual(round, sw.decorate(round))
self.assertEqual(round, sw.decorate("name")(round))
@mock.patch.dict(os.environ, {"SC2_NO_STOPWATCH": ""})
def testDecoratorEnabled(self):
sw = stopwatch.StopWatch()
self.assertNotEqual(round, sw.decorate(round))
self.assertNotEqual(round, sw.decorate("name")(round))
def testSpeed(self):
count = 100
def run():
for _ in range(count):
with sw("name"):
pass
sw = stopwatch.StopWatch()
for _ in range(10):
sw.enable()
with sw("enabled"):
run()
sw.trace()
with sw("trace"):
run()
sw.enable() # To catch "disabled".
with sw("disabled"):
sw.disable()
run()
# No asserts. Succeed but print the timings.
print(sw)
if __name__ == "__main__":
absltest.main()
| pysc2-master | pysc2/lib/stopwatch_test.py |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Named numpy arrays for easier access to the observation data.
https://docs.scipy.org/doc/numpy/user/basics.rec.html are not enough since they
actually change the type and don't interoperate well with tensorflow.
"""
import enum
import numbers
import re
import numpy as np
class NamedDict(dict):
"""A dict where you can use `d["element"]` or `d.element`."""
def __init__(self, *args, **kwargs):
super(NamedDict, self).__init__(*args, **kwargs)
self.__dict__ = self
_NULL_SLICE = slice(None, None, None)
# pylint: disable=protected-access
class NamedNumpyArray(np.ndarray):
"""A subclass of ndarray that lets you give names to indices.
This is a normal ndarray in the sense that you can always index by numbers and
slices, though elipses don't work. Also, all elements have the same type,
unlike a record array.
Names should be a list of names per dimension in the ndarray shape. The names
should be a list or tuple of strings, a namedtuple class (with names taken
from _fields), or an IntEnum. Alternatively if you don't want to give a name
to a particular dimension, use None. If your array only has one dimension, the
second level of list can be skipped.
Example usage:
a = named_array.NamedNumpyArray([1, 3, 6], ["a", "b", "c"])
a.a, a[1], a["c"] => 1, 3, 6
b = named_array.NamedNumpyArray([[1, 3], [6, 8]], [["a", "b"], None])
b.a, b[1], b["a", 1] => [1, 3], [6, 8], 3
c = named_array.NamedNumpyArray([[1, 3], [6, 8]], [None, ["a", "b"]])
c[0].a, b[1, 0], b[1, "b"] => 1, 6, 8
Look at the tests for more examples including using enums and named tuples.
"""
# Details of how to subclass an ndarray are at:
# https://docs.scipy.org/doc/numpy-1.13.0/user/basics.subclassing.html
def __new__(cls, values, names, *args, **kwargs):
obj = np.array(values, *args, **kwargs)
if len(obj.shape) == 0: # pylint: disable=g-explicit-length-test
raise ValueError("Scalar arrays are unsupported.")
if len(obj.shape) == 1:
if obj.shape[0] == 0 and names and names[0] is None:
# Support arrays of length 0.
names = [None]
else:
# Allow just a single dimension if the array is also single dimension.
try:
if len(names) > 1:
names = [names]
except TypeError: # len of a namedtuple is a TypeError
names = [names]
# Validate names!
if not isinstance(names, (list, tuple)) or len(names) != len(obj.shape):
raise ValueError(
"Names must be a list of length equal to the array shape: %s != %s." %
(len(names), len(obj.shape)))
index_names = []
only_none = obj.shape[0] > 0
for i, o in enumerate(names):
if o is None:
index_names.append(o)
else:
only_none = False
if isinstance(o, enum.EnumMeta):
for j, n in enumerate(o._member_names_):
if j != o[n]:
raise ValueError("Enum has holes or doesn't start from 0.")
o = o._member_names_
elif isinstance(o, type): # Assume namedtuple
try:
o = o._fields
except AttributeError:
raise ValueError("Bad names. Must be None, a list of strings, "
"a namedtuple, or IntEnum.")
elif isinstance(o, (list, tuple)):
for n in o:
if not isinstance(n, str):
raise ValueError(
"Bad name, must be a list of strings, not %s" % type(n))
else:
raise ValueError("Bad names. Must be None, a list of strings, "
"a namedtuple, or IntEnum.")
if obj.shape[i] != len(o):
raise ValueError(
"Wrong number of names in dimension %s. Got %s, expected %s." % (
i, len(o), obj.shape[i]))
index_names.append({n: j for j, n in enumerate(o)})
if only_none:
raise ValueError("No names given. Use a normal numpy.ndarray instead.")
# Finally convert to a NamedNumpyArray.
obj = obj.view(cls)
obj._index_names = index_names # [{name: index}, ...], dict per dimension.
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self._index_names = getattr(obj, "_index_names", None)
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError("Bad attribute name: %s" % name)
def __setattr__(self, name, value):
if name == "_index_names": # Need special handling to avoid recursion.
super(NamedNumpyArray, self).__setattr__(name, value)
else:
self.__setitem__(name, value)
def __getitem__(self, indices):
"""Get by indexing lookup."""
indices = self._indices(indices)
obj = super(NamedNumpyArray, self).__getitem__(indices)
if (isinstance(indices, np.ndarray) and len(indices.shape) > 1 and
indices.dtype == bool):
# Is this a multi-dimensional mask, eg: obj[obj == 5] ?
# Multi-dimensional masks return a single dimensional array, and it's
# unclear what it means for the result to have names, so return a normal
# numpy array.
return np.array(obj)
if isinstance(obj, np.ndarray): # If this is a view, index the names too.
if not isinstance(indices, tuple):
indices = (indices,)
new_names = []
dim = 0
for i, index in enumerate(indices):
if isinstance(index, numbers.Integral):
dim += 1 # Drop this dimension's names.
elif index is Ellipsis:
# Copy all the dimensions' names through.
end = len(self.shape) - len(indices) + i + 1
for j in range(dim, end):
new_names.append(self._index_names[j])
dim = end
elif index is np.newaxis: # Add an unnamed dimension.
new_names.append(None)
# Don't modify dim, as we're still working on the same one.
elif (self._index_names[dim] is None or
(isinstance(index, slice) and index == _NULL_SLICE)):
# Keep unnamed dimensions or ones where the slice is a no-op.
new_names.append(self._index_names[dim])
dim += 1
elif isinstance(index, (slice, list, np.ndarray)):
if isinstance(index, np.ndarray) and len(index.shape) > 1:
raise TypeError("What does it mean to index into a named array by "
"a multidimensional array? %s" % index)
# Rebuild the index of names for the various forms of slicing.
names = sorted(self._index_names[dim].items(),
key=lambda item: item[1])
names = np.array(names, dtype=object) # Support full numpy slicing.
sliced = names[index] # Actually slice it.
indexed = {n: j for j, (n, _) in enumerate(sliced)} # Reindex.
if len(sliced) != len(indexed):
# Names aren't unique, so drop the names for this dimension.
indexed = None
new_names.append(indexed)
dim += 1
else:
raise TypeError("Unknown index: %s; %s" % (type(index), index))
obj._index_names = new_names + self._index_names[dim:]
if len(obj._index_names) != len(obj.shape):
raise IndexError("Names don't match object shape: %s != %s" % (
len(obj.shape), len(obj._index_names)))
return obj
def __setitem__(self, indices, value):
super(NamedNumpyArray, self).__setitem__(self._indices(indices), value)
def __getslice__(self, i, j): # deprecated, but still needed...
# https://docs.python.org/2.0/ref/sequence-methods.html
return self[max(0, i):max(0, j):]
def __setslice__(self, i, j, seq): # deprecated, but still needed...
self[max(0, i):max(0, j):] = seq
def __repr__(self):
"""A repr, parsing the original and adding the names param."""
names = []
for dim_names in self._index_names:
if dim_names:
dim_names = [n for n, _ in sorted(dim_names.items(),
key=lambda item: item[1])]
if len(dim_names) > 11:
dim_names = dim_names[:5] + ["..."] + dim_names[-5:]
names.append(dim_names)
if len(names) == 1:
names = names[0]
# "NamedNumpyArray([1, 3, 6], dtype=int32)" ->
# ["NamedNumpyArray", "[1, 3, 6]", ", dtype=int32"]
matches = re.findall(r"^(\w+)\(([\d\., \n\[\]]*)(,\s+\w+=.+)?\)$",
np.array_repr(self))[0]
space = "\n " if matches[2] and matches[2][1] == "\n" else ""
return "%s(%s,%s %s%s)" % (
matches[0], matches[1], space, names, matches[2])
def __reduce__(self):
# Support pickling: https://stackoverflow.com/a/26599346
state = super(NamedNumpyArray, self).__reduce__() # pytype: disable=attribute-error
assert len(state) == 3 # Verify numpy hasn't changed their protocol.
return (state[0], state[1], state[2] + (self._index_names,))
def __setstate__(self, state):
# Support pickling: https://stackoverflow.com/a/26599346
self._index_names = state[-1]
super(NamedNumpyArray, self).__setstate__(state[0:-1]) # pytype: disable=attribute-error
def _indices(self, indices):
"""Turn all string indices into int indices, preserving ellipsis."""
if isinstance(indices, tuple):
out = []
dim = 0
for i, index in enumerate(indices):
if index is Ellipsis:
out.append(index)
dim = len(self.shape) - len(indices) + i + 1
elif index is np.newaxis:
out.append(None)
else:
out.append(self._get_index(dim, index))
dim += 1
return tuple(out)
else:
return self._get_index(0, indices)
def _get_index(self, dim, index):
"""Turn a string into a real index, otherwise return the index."""
if isinstance(index, str):
try:
return self._index_names[dim][index]
except KeyError:
raise KeyError("Name '%s' is invalid for axis %s." % (index, dim))
except TypeError:
raise TypeError(
"Trying to access an unnamed axis %s by name: '%s'" % (dim, index))
else:
return index
| pysc2-master | pysc2/lib/named_array.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Protocol library to make communication easy."""
import contextlib
import enum
import itertools
import os
import socket
import sys
import time
from absl import flags
from absl import logging
from pysc2.lib import stopwatch
import websocket
from s2clientprotocol import sc2api_pb2 as sc_pb
flags.DEFINE_integer("sc2_verbose_protocol", 0,
("Print the communication packets with SC2. 0 disables. "
"-1 means all. >0 will print that many lines per "
"packet. 20 is a good starting value."))
FLAGS = flags.FLAGS
sw = stopwatch.sw
# Create a python version of the Status enum in the proto.
Status = enum.Enum("Status", sc_pb.Status.items()) # pylint: disable=invalid-name
MAX_WIDTH = int(os.getenv("COLUMNS", "200")) # Get your TTY width.
class ConnectionError(Exception): # pylint: disable=redefined-builtin
"""Failed to read/write a message, details in the error string."""
pass
class ProtocolError(Exception): # pylint: disable=g-bad-exception-name
"""SC2 responded with an error message likely due to a bad request or bug."""
pass
@contextlib.contextmanager
def catch_websocket_connection_errors():
"""A context manager that translates websocket errors into ConnectionError."""
try:
yield
except websocket.WebSocketConnectionClosedException as e:
raise ConnectionError("Connection already closed. SC2 probably crashed. "
"Check the error log.") from e
except websocket.WebSocketTimeoutException as e:
raise ConnectionError("Websocket timed out.") from e
except socket.error as e:
raise ConnectionError("Socket error: %s" % e) from e
class StarcraftProtocol(object):
"""Defines the protocol for chatting with starcraft."""
def __init__(self, sock):
self._status = Status.launched
self._sock = sock
self._port = sock.sock.getpeername()[1]
self._count = itertools.count(1)
@property
def status(self):
return self._status
def close(self):
if self._sock:
self._sock.close()
self._sock = None
self._status = Status.quit
@sw.decorate
def read(self):
"""Read a Response, do some validation, and return it."""
if FLAGS.sc2_verbose_protocol:
self._log("-------------- [%s] Reading response --------------",
self._port)
start = time.time()
response = self._read()
if FLAGS.sc2_verbose_protocol:
self._log("-------------- [%s] Read %s in %0.1f msec --------------\n%s",
self._port, response.WhichOneof("response"),
1000 * (time.time() - start), self._packet_str(response))
if not response.HasField("status"):
raise ProtocolError("Got an incomplete response without a status.")
prev_status = self._status
self._status = Status(response.status) # pytype: disable=not-callable
if response.error:
err_str = ("Error in RPC response (likely a bug). "
"Prev status: %s, new status: %s, error:\n%s" % (
prev_status, self._status, "\n".join(response.error)))
logging.error(err_str)
raise ProtocolError(err_str)
return response
@sw.decorate
def write(self, request):
"""Write a Request."""
if FLAGS.sc2_verbose_protocol:
self._log("-------------- [%s] Writing request: %s --------------\n%s",
self._port, request.WhichOneof("request"),
self._packet_str(request))
self._write(request)
def send_req(self, request):
"""Write a pre-filled Request and return the Response."""
self.write(request)
return self.read()
def send(self, **kwargs):
"""Create and send a specific request, and return the response.
For example: send(ping=sc_pb.RequestPing()) => sc_pb.ResponsePing
Args:
**kwargs: A single kwarg with the name and value to fill in to Request.
Returns:
The Response corresponding to your request.
Raises:
ConnectionError: if it gets a different response.
"""
assert len(kwargs) == 1, "Must make a single request."
name = list(kwargs.keys())[0]
req = sc_pb.Request(**kwargs)
req.id = next(self._count)
try:
res = self.send_req(req)
except ConnectionError as e:
raise ConnectionError("Error during %s: %s" % (name, e)) from e
if res.HasField("id") and res.id != req.id:
raise ConnectionError(
"Error during %s: Got a response with a different id" % name)
return getattr(res, name)
def _packet_str(self, packet):
"""Return a string form of this packet."""
max_lines = FLAGS.sc2_verbose_protocol
packet_str = str(packet).strip()
if max_lines <= 0:
return packet_str
lines = packet_str.split("\n")
line_count = len(lines)
lines = [line[:MAX_WIDTH] for line in lines[:max_lines + 1]]
if line_count > max_lines + 1: # +1 to prefer the last line to skipped msg.
lines[-1] = "***** %s lines skipped *****" % (line_count - max_lines)
return "\n".join(lines)
def _log(self, s, *args):
r"""Log a string. It flushes but doesn't append \n, so do that yourself."""
# TODO(tewalds): Should this be using logging.info instead? How to see them
# outside of google infrastructure?
sys.stderr.write((s + "\n") % args)
sys.stderr.flush()
def _read(self):
"""Actually read the response and parse it, returning a Response."""
with sw("read_response"):
with catch_websocket_connection_errors():
response_str = self._sock.recv()
if not response_str:
raise ProtocolError("Got an empty response from SC2.")
with sw("parse_response"):
response = sc_pb.Response.FromString(response_str)
return response
def _write(self, request):
"""Actually serialize and write the request."""
with sw("serialize_request"):
request_str = request.SerializeToString()
with sw("write_request"):
with catch_websocket_connection_errors():
self._sock.send(request_str)
| pysc2-master | pysc2/lib/protocol.py |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define the static list of buffs for SC2."""
import enum
# pylint: disable=invalid-name
class Buffs(enum.IntEnum):
"""The list of buffs, as generated by bin/gen_data.py."""
BansheeCloak = 7
BlindingCloud = 83
BlindingCloudStructure = 38
CarryHarvestableVespeneGeyserGas = 273
CarryHarvestableVespeneGeyserGasProtoss = 274
CarryHarvestableVespeneGeyserGasZerg = 275
CarryHighYieldMineralFieldMinerals = 272
CarryMineralFieldMinerals = 271
ChannelSnipeCombat = 145
Charging = 30
ChronoBoostEnergyCost = 281
CloakFieldEffect = 29
Contaminated = 36
EMPDecloak = 16
FungalGrowth = 17
GhostCloak = 6
GhostHoldFire = 12
GhostHoldFireB = 13
GravitonBeam = 5
GuardianShield = 18
ImmortalOverload = 102
InhibitorZoneTemporalField = 289
LockOn = 116
LurkerHoldFire = 136
LurkerHoldFireB = 137
MedivacSpeedBoost = 89
NeuralParasite = 22
OracleRevelation = 49
OracleStasisTrapTarget = 129
OracleWeapon = 99
ParasiticBomb = 132
ParasiticBombSecondaryUnitSearch = 134
ParasiticBombUnitKU = 133
PowerUserWarpable = 8
PsiStorm = 28
QueenSpawnLarvaTimer = 11
RavenScramblerMissile = 277
RavenShredderMissileArmorReduction = 280
RavenShredderMissileTint = 279
Slow = 33
Stimpack = 27
StimpackMarauder = 24
SupplyDrop = 25
TemporalField = 121
ViperConsumeStructure = 59
VoidRaySpeedUpgrade = 288
VoidRaySwarmDamageBoost = 122
| pysc2-master | pysc2/lib/buffs.py |
# Copyright 2017-2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch the game and set up communication."""
import os
import platform
import shutil
import subprocess
import tempfile
import time
from absl import flags
from absl import logging
import portpicker
from pysc2.lib import remote_controller
from pysc2.lib import stopwatch
flags.DEFINE_bool(
"sc2_verbose", False, "Enable SC2 verbose logging.", allow_hide_cpp=True)
flags.DEFINE_bool(
"sc2_verbose_mp", False, "Enable SC2 verbose multiplayer logging.")
flags.DEFINE_bool("sc2_gdb", False, "Run SC2 in gdb.")
flags.DEFINE_bool("sc2_strace", False, "Run SC2 in strace.")
flags.DEFINE_integer("sc2_port", None,
"If set, connect to the instance on "
"localhost:sc2_port instead of launching one.")
FLAGS = flags.FLAGS
sw = stopwatch.sw
class SC2LaunchError(Exception):
pass
class StarcraftProcess(object):
"""Launch a starcraft server, initialize a controller, and later, clean up.
This is best used from run_configs, which decides which version to run, and
where to find it.
It is important to call `close` or use it as a context manager, otherwise
you'll likely leak temp files and SC2 processes.
"""
def __init__(self, run_config, exec_path, version, full_screen=False,
extra_args=None, verbose=False, host=None, port=None,
connect=True, timeout_seconds=None, window_size=(640, 480),
window_loc=(50, 50), **kwargs):
"""Launch the SC2 process.
Args:
run_config: `run_configs.lib.RunConfig` object.
exec_path: Path to the binary to run.
version: `run_configs.lib.Version` object.
full_screen: Whether to launch the game window full_screen on win/mac.
extra_args: List of additional args for the SC2 process.
verbose: Whether to have the SC2 process do verbose logging.
host: IP for the game to listen on for its websocket. This is
usually "127.0.0.1", or "::1", but could be others as well.
port: Port SC2 should listen on for the websocket.
connect: Whether to create a RemoteController to connect.
timeout_seconds: Timeout for the remote controller.
window_size: Screen size if not full screen.
window_loc: Screen location if not full screen.
**kwargs: Extra arguments for _launch (useful for subclasses).
"""
self._proc = None
self._controller = None
self._check_exists(exec_path)
self._tmp_dir = tempfile.mkdtemp(prefix="sc-", dir=run_config.tmp_dir)
self._host = host or "127.0.0.1"
self._port = FLAGS.sc2_port or port or portpicker.pick_unused_port()
self._version = version
args = [
exec_path,
"-listen", self._host,
"-port", str(self._port),
"-dataDir", os.path.join(run_config.data_dir, ""),
"-tempDir", os.path.join(self._tmp_dir, ""),
]
if ":" in self._host:
args += ["-ipv6"]
if platform.system() != "Linux":
if full_screen:
args += ["-displayMode", "1"]
else:
args += [
"-displayMode", "0",
"-windowwidth", str(window_size[0]),
"-windowheight", str(window_size[1]),
"-windowx", str(window_loc[0]),
"-windowy", str(window_loc[1]),
]
if verbose or FLAGS.sc2_verbose:
args += ["-verbose"]
if FLAGS.sc2_verbose_mp:
args += ["-verboseMP"]
if self._version and self._version.data_version:
args += ["-dataVersion", self._version.data_version.upper()]
if extra_args:
args += extra_args
if FLAGS.sc2_gdb:
print("Launching: gdb", args[0])
print("GDB run command:")
print(" run %s" % " ".join(args[1:]))
print("\n")
args = ["gdb", args[0]]
timeout_seconds = 3600 * 6
elif FLAGS.sc2_strace:
strace_out = "/tmp/sc2-strace.txt"
print("Launching in strace. Redirecting output to", strace_out)
args = ["strace", "-f", "-o", strace_out] + args
else:
logging.info("Launching SC2: %s", " ".join(args))
try:
with sw("startup"):
if not FLAGS.sc2_port:
self._proc = self._launch(run_config, args, **kwargs)
if connect:
self._controller = remote_controller.RemoteController(
self._host, self._port, self, timeout_seconds=timeout_seconds)
except:
self.close()
raise
@sw.decorate
def close(self):
"""Shut down the game and clean up."""
if hasattr(self, "_controller") and self._controller:
self._controller.quit()
self._controller.close()
self._controller = None
self._shutdown()
if hasattr(self, "_port") and self._port:
if not FLAGS.sc2_port:
portpicker.return_port(self._port)
self._port = None
if hasattr(self, "_tmp_dir") and os.path.exists(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
@property
def controller(self):
return self._controller
@property
def host(self):
return self._host
@property
def port(self):
return self._port
@property
def version(self):
return self._version
def __enter__(self):
return self.controller
def __exit__(self, unused_exception_type, unused_exc_value, unused_traceback):
self.close()
def __del__(self):
# Prefer using a context manager, but this cleans most other cases.
self.close()
def _check_exists(self, exec_path):
if not os.path.isfile(exec_path):
raise RuntimeError("Trying to run '%s', but it doesn't exist" % exec_path)
if not os.access(exec_path, os.X_OK):
raise RuntimeError(
"Trying to run '%s', but it isn't executable." % exec_path)
def _launch(self, run_config, args, **kwargs):
"""Launch the process and return the process object."""
try:
with sw("popen"):
return subprocess.Popen(
args, cwd=run_config.cwd, env=run_config.env, **kwargs)
except OSError as e:
logging.exception("Failed to launch")
raise SC2LaunchError("Failed to launch: %s" % args) from e
def _shutdown(self):
"""Terminate the sub-process."""
if self._proc:
ret = _shutdown_proc(self._proc, 3)
logging.info("Shutdown with return code: %s", ret)
self._proc = None
@property
def running(self):
if FLAGS.sc2_port:
return True
# poll returns None if it's running, otherwise the exit code.
return self._proc and (self._proc.poll() is None)
@property
def pid(self):
return self._proc.pid if self.running else None
def _shutdown_proc(p, timeout):
"""Wait for a proc to shut down, then terminate or kill it after `timeout`."""
freq = 10 # how often to check per second
for _ in range(1 + timeout * freq):
p.terminate()
ret = p.poll()
if ret is not None:
logging.info("Shutdown gracefully.")
return ret
time.sleep(1 / freq)
logging.warning("Killing the process.")
p.kill()
return p.wait()
| pysc2-master | pysc2/lib/sc_process.py |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define a flag type for points."""
from absl import flags
from pysc2.lib import point
# Let absl.flags know that DEFINE_point should show up in the caller's module.
flags.disclaim_key_flags()
class PointParser(flags.ArgumentParser):
"""Parse a flag into a pysc2.lib.point.Point."""
def parse(self, argument):
if not argument or argument == "0":
return None
if isinstance(argument, int):
args = [argument]
elif isinstance(argument, (list, tuple)):
args = argument
elif isinstance(argument, str):
args = argument.split(",")
else:
raise ValueError(
"Invalid point: '%r'. Valid: '<int>' or '<int>,<int>'." % argument)
args = [int(v) for v in args]
if len(args) == 1:
args *= 2
if len(args) == 2:
return point.Point(args[0], args[1])
raise ValueError(
"Invalid point: '%s'. Valid: '<int>' or '<int>,<int>'." % argument)
def flag_type(self):
return "pysc2.lib.point.Point"
class PointSerializer(flags.ArgumentSerializer):
"""Custom serializer for pysc2.lib.point.Point."""
def serialize(self, value):
return str(value)
def DEFINE_point(name, default, help_string, flag_values=flags.FLAGS, **args): # pylint: disable=invalid-name,redefined-builtin
"""Registers a flag whose value parses as a point."""
flags.DEFINE(PointParser(), name, default, help_string, flag_values,
PointSerializer(), **args)
| pysc2-master | pysc2/lib/point_flag.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define the static list of types and actions for SC2."""
import collections
import enum
import numbers
import numpy
from pysc2.lib import point
from s2clientprotocol import spatial_pb2 as sc_spatial
from s2clientprotocol import ui_pb2 as sc_ui
class ActionSpace(enum.Enum):
FEATURES = 1 # Act in feature layer pixel space with FUNCTIONS below.
RGB = 2 # Act in RGB pixel space with FUNCTIONS below.
RAW = 3 # Act with unit tags with RAW_FUNCTIONS below.
def spatial(action, action_space):
"""Choose the action space for the action proto."""
if action_space == ActionSpace.FEATURES:
return action.action_feature_layer
elif action_space == ActionSpace.RGB:
return action.action_render
else:
raise ValueError("Unexpected value for action_space: %s" % action_space)
def no_op(action, action_space):
del action, action_space
def move_camera(action, action_space, minimap):
"""Move the camera."""
minimap.assign_to(spatial(action, action_space).camera_move.center_minimap)
def select_point(action, action_space, select_point_act, screen):
"""Select a unit at a point."""
select = spatial(action, action_space).unit_selection_point
screen.assign_to(select.selection_screen_coord)
select.type = select_point_act
def select_rect(action, action_space, select_add, screen, screen2):
"""Select units within a rectangle."""
select = spatial(action, action_space).unit_selection_rect
out_rect = select.selection_screen_coord.add()
screen_rect = point.Rect(screen, screen2)
screen_rect.tl.assign_to(out_rect.p0)
screen_rect.br.assign_to(out_rect.p1)
select.selection_add = bool(select_add)
def select_idle_worker(action, action_space, select_worker):
"""Select an idle worker."""
del action_space
action.action_ui.select_idle_worker.type = select_worker
def select_army(action, action_space, select_add):
"""Select the entire army."""
del action_space
action.action_ui.select_army.selection_add = select_add
def select_warp_gates(action, action_space, select_add):
"""Select all warp gates."""
del action_space
action.action_ui.select_warp_gates.selection_add = select_add
def select_larva(action, action_space):
"""Select all larva."""
del action_space
action.action_ui.select_larva.SetInParent() # Adds the empty proto field.
def select_unit(action, action_space, select_unit_act, select_unit_id):
"""Select a specific unit from the multi-unit selection."""
del action_space
select = action.action_ui.multi_panel
select.type = select_unit_act
select.unit_index = select_unit_id
def control_group(action, action_space, control_group_act, control_group_id):
"""Act on a control group, selecting, setting, etc."""
del action_space
select = action.action_ui.control_group
select.action = control_group_act
select.control_group_index = control_group_id
def unload(action, action_space, unload_id):
"""Unload a unit from a transport/bunker/nydus/etc."""
del action_space
action.action_ui.cargo_panel.unit_index = unload_id
def build_queue(action, action_space, build_queue_id):
"""Cancel a unit in the build queue."""
del action_space
action.action_ui.production_panel.unit_index = build_queue_id
def cmd_quick(action, action_space, ability_id, queued):
"""Do a quick command like 'Stop' or 'Stim'."""
action_cmd = spatial(action, action_space).unit_command
action_cmd.ability_id = ability_id
action_cmd.queue_command = queued
def cmd_screen(action, action_space, ability_id, queued, screen):
"""Do a command that needs a point on the screen."""
action_cmd = spatial(action, action_space).unit_command
action_cmd.ability_id = ability_id
action_cmd.queue_command = queued
screen.assign_to(action_cmd.target_screen_coord)
def cmd_minimap(action, action_space, ability_id, queued, minimap):
"""Do a command that needs a point on the minimap."""
action_cmd = spatial(action, action_space).unit_command
action_cmd.ability_id = ability_id
action_cmd.queue_command = queued
minimap.assign_to(action_cmd.target_minimap_coord)
def autocast(action, action_space, ability_id):
"""Toggle autocast."""
del action_space
action.action_ui.toggle_autocast.ability_id = ability_id
def raw_no_op(action):
del action
def raw_move_camera(action, world):
"""Move the camera."""
action_cmd = action.action_raw.camera_move
world.assign_to(action_cmd.center_world_space)
def raw_cmd(action, ability_id, queued, unit_tags):
"""Do a raw command to another unit."""
action_cmd = action.action_raw.unit_command
action_cmd.ability_id = ability_id
action_cmd.queue_command = queued
if not isinstance(unit_tags, (tuple, list)):
unit_tags = [unit_tags]
action_cmd.unit_tags.extend(unit_tags)
def raw_cmd_pt(action, ability_id, queued, unit_tags, world):
"""Do a raw command to another unit towards a point."""
action_cmd = action.action_raw.unit_command
action_cmd.ability_id = ability_id
action_cmd.queue_command = queued
if not isinstance(unit_tags, (tuple, list)):
unit_tags = [unit_tags]
action_cmd.unit_tags.extend(unit_tags)
world.assign_to(action_cmd.target_world_space_pos)
def raw_cmd_unit(action, ability_id, queued, unit_tags,
target_unit_tag):
"""Do a raw command to another unit towards a unit."""
action_cmd = action.action_raw.unit_command
action_cmd.ability_id = ability_id
action_cmd.queue_command = queued
if not isinstance(unit_tags, (tuple, list)):
unit_tags = [unit_tags]
action_cmd.unit_tags.extend(unit_tags)
action_cmd.target_unit_tag = target_unit_tag
def raw_autocast(action, ability_id, unit_tags):
"""Toggle autocast."""
action_cmd = action.action_raw.toggle_autocast
action_cmd.ability_id = ability_id
if not isinstance(unit_tags, (tuple, list)):
unit_tags = [unit_tags]
action_cmd.unit_tags.extend(unit_tags)
def numpy_to_python(val):
"""Convert numpy types to their corresponding python types."""
if isinstance(val, (int, float)):
return val
if isinstance(val, str):
return val
if (isinstance(val, numpy.number) or
isinstance(val, numpy.ndarray) and not val.shape): # numpy.array(1)
return val.item()
if isinstance(val, (list, tuple, numpy.ndarray)):
return [numpy_to_python(v) for v in val]
raise ValueError("Unknown value. Type: %s, repr: %s" % (type(val), repr(val)))
class ArgumentType(collections.namedtuple(
"ArgumentType", ["id", "name", "sizes", "fn", "values", "count"])):
"""Represents a single argument type.
Attributes:
id: The argument id. This is unique.
name: The name of the argument, also unique.
sizes: The max+1 of each of the dimensions this argument takes.
fn: The function to convert the list of integers into something more
meaningful to be set in the protos to send to the game.
values: An enum representing the values this argument type could hold. None
if this isn't an enum argument type.
count: Number of valid values. Only useful for unit_tags.
"""
__slots__ = ()
def __str__(self):
return "%s/%s %s" % (self.id, self.name, list(self.sizes))
def __reduce__(self):
return self.__class__, tuple(self)
@classmethod
def enum(cls, options, values):
"""Create an ArgumentType where you choose one of a set of known values."""
names, real = zip(*options)
del names # unused
def factory(i, name):
return cls(i, name, (len(real),), lambda a: real[a[0]], values, None)
return factory
@classmethod
def scalar(cls, value):
"""Create an ArgumentType with a single scalar in range(value)."""
return lambda i, name: cls(i, name, (value,), lambda a: a[0], None, None)
@classmethod
def point(cls): # No range because it's unknown at this time.
"""Create an ArgumentType that is represented by a point.Point."""
def factory(i, name):
return cls(i, name, (0, 0), lambda a: point.Point(*a).floor(), None, None)
return factory
@classmethod
def spec(cls, id_, name, sizes):
"""Create an ArgumentType to be used in ValidActions."""
return cls(id_, name, sizes, None, None, None)
@classmethod
def unit_tags(cls, count, size):
"""Create an ArgumentType with a list of unbounded ints."""
def clean(arg):
arg = numpy_to_python(arg)
if isinstance(arg, list) and len(arg) == 1 and isinstance(arg[0], list):
arg = arg[0] # Support [[list, of, tags]].
return arg[:count]
return lambda i, name: cls(i, name, (size,), clean, None, count)
class Arguments(collections.namedtuple("Arguments", [
"screen", "minimap", "screen2", "queued", "control_group_act",
"control_group_id", "select_point_act", "select_add", "select_unit_act",
"select_unit_id", "select_worker", "build_queue_id", "unload_id"])):
"""The full list of argument types.
Take a look at TYPES and FUNCTION_TYPES for more details.
Attributes:
screen: A point on the screen.
minimap: A point on the minimap.
screen2: The second point for a rectangle. This is needed so that no
function takes the same type twice.
queued: Whether the action should be done immediately or after all other
actions queued for this unit.
control_group_act: What to do with the control group.
control_group_id: Which control group to do it with.
select_point_act: What to do with the unit at the point.
select_add: Whether to add the unit to the selection or replace it.
select_unit_act: What to do when selecting a unit by id.
select_unit_id: Which unit to select by id.
select_worker: What to do when selecting a worker.
build_queue_id: Which build queue index to target.
unload_id: Which unit to target in a transport/nydus/command center.
"""
__slots__ = ()
@classmethod
def types(cls, **kwargs):
"""Create an Arguments of the possible Types."""
named = {name: factory(Arguments._fields.index(name), name)
for name, factory in kwargs.items()}
return cls(**named)
def __reduce__(self):
return self.__class__, tuple(self)
class RawArguments(collections.namedtuple("RawArguments", [
"world", "queued", "unit_tags", "target_unit_tag"])):
"""The full list of argument types.
Take a look at TYPES and FUNCTION_TYPES for more details.
Attributes:
world: A point in world coordinates
queued: Whether the action should be done immediately or after all other
actions queued for this unit.
unit_tags: Which units should execute this action.
target_unit_tag: The target unit of this action.
"""
__slots__ = ()
@classmethod
def types(cls, **kwargs):
"""Create an Arguments of the possible Types."""
named = {name: factory(RawArguments._fields.index(name), name)
for name, factory in kwargs.items()}
return cls(**named)
def __reduce__(self):
return self.__class__, tuple(self)
def _define_position_based_enum(name, options):
return enum.IntEnum(
name, {opt_name: i for i, (opt_name, _) in enumerate(options)})
QUEUED_OPTIONS = [
("now", False),
("queued", True),
]
Queued = _define_position_based_enum( # pylint: disable=invalid-name
"Queued", QUEUED_OPTIONS)
CONTROL_GROUP_ACT_OPTIONS = [
("recall", sc_ui.ActionControlGroup.Recall),
("set", sc_ui.ActionControlGroup.Set),
("append", sc_ui.ActionControlGroup.Append),
("set_and_steal", sc_ui.ActionControlGroup.SetAndSteal),
("append_and_steal", sc_ui.ActionControlGroup.AppendAndSteal),
]
ControlGroupAct = _define_position_based_enum( # pylint: disable=invalid-name
"ControlGroupAct", CONTROL_GROUP_ACT_OPTIONS)
SELECT_POINT_ACT_OPTIONS = [
("select", sc_spatial.ActionSpatialUnitSelectionPoint.Select),
("toggle", sc_spatial.ActionSpatialUnitSelectionPoint.Toggle),
("select_all_type", sc_spatial.ActionSpatialUnitSelectionPoint.AllType),
("add_all_type", sc_spatial.ActionSpatialUnitSelectionPoint.AddAllType),
]
SelectPointAct = _define_position_based_enum( # pylint: disable=invalid-name
"SelectPointAct", SELECT_POINT_ACT_OPTIONS)
SELECT_ADD_OPTIONS = [
("select", False),
("add", True),
]
SelectAdd = _define_position_based_enum( # pylint: disable=invalid-name
"SelectAdd", SELECT_ADD_OPTIONS)
SELECT_UNIT_ACT_OPTIONS = [
("select", sc_ui.ActionMultiPanel.SingleSelect),
("deselect", sc_ui.ActionMultiPanel.DeselectUnit),
("select_all_type", sc_ui.ActionMultiPanel.SelectAllOfType),
("deselect_all_type", sc_ui.ActionMultiPanel.DeselectAllOfType),
]
SelectUnitAct = _define_position_based_enum( # pylint: disable=invalid-name
"SelectUnitAct", SELECT_UNIT_ACT_OPTIONS)
SELECT_WORKER_OPTIONS = [
("select", sc_ui.ActionSelectIdleWorker.Set),
("add", sc_ui.ActionSelectIdleWorker.Add),
("select_all", sc_ui.ActionSelectIdleWorker.All),
("add_all", sc_ui.ActionSelectIdleWorker.AddAll),
]
SelectWorker = _define_position_based_enum( # pylint: disable=invalid-name
"SelectWorker", SELECT_WORKER_OPTIONS)
# The list of known types.
TYPES = Arguments.types(
screen=ArgumentType.point(),
minimap=ArgumentType.point(),
screen2=ArgumentType.point(),
queued=ArgumentType.enum(QUEUED_OPTIONS, Queued),
control_group_act=ArgumentType.enum(
CONTROL_GROUP_ACT_OPTIONS, ControlGroupAct),
control_group_id=ArgumentType.scalar(10),
select_point_act=ArgumentType.enum(
SELECT_POINT_ACT_OPTIONS, SelectPointAct),
select_add=ArgumentType.enum(SELECT_ADD_OPTIONS, SelectAdd),
select_unit_act=ArgumentType.enum(SELECT_UNIT_ACT_OPTIONS, SelectUnitAct),
select_unit_id=ArgumentType.scalar(500), # Depends on current selection.
select_worker=ArgumentType.enum(SELECT_WORKER_OPTIONS, SelectWorker),
build_queue_id=ArgumentType.scalar(10), # Depends on current build queue.
unload_id=ArgumentType.scalar(500), # Depends on the current loaded units.
)
RAW_TYPES = RawArguments.types(
world=ArgumentType.point(),
queued=ArgumentType.enum(QUEUED_OPTIONS, Queued),
unit_tags=ArgumentType.unit_tags(512, 512),
target_unit_tag=ArgumentType.unit_tags(1, 512),
)
# Which argument types do each function need?
FUNCTION_TYPES = {
no_op: [],
move_camera: [TYPES.minimap],
select_point: [TYPES.select_point_act, TYPES.screen],
select_rect: [TYPES.select_add, TYPES.screen, TYPES.screen2],
select_unit: [TYPES.select_unit_act, TYPES.select_unit_id],
control_group: [TYPES.control_group_act, TYPES.control_group_id],
select_idle_worker: [TYPES.select_worker],
select_army: [TYPES.select_add],
select_warp_gates: [TYPES.select_add],
select_larva: [],
unload: [TYPES.unload_id],
build_queue: [TYPES.build_queue_id],
cmd_quick: [TYPES.queued],
cmd_screen: [TYPES.queued, TYPES.screen],
cmd_minimap: [TYPES.queued, TYPES.minimap],
autocast: [],
raw_no_op: [],
raw_cmd: [RAW_TYPES.queued, RAW_TYPES.unit_tags],
raw_cmd_pt: [RAW_TYPES.queued, RAW_TYPES.unit_tags, RAW_TYPES.world],
raw_cmd_unit: [RAW_TYPES.queued, RAW_TYPES.unit_tags,
RAW_TYPES.target_unit_tag],
raw_move_camera: [RAW_TYPES.world],
raw_autocast: [RAW_TYPES.unit_tags],
}
# Which ones need an ability?
ABILITY_FUNCTIONS = {cmd_quick, cmd_screen, cmd_minimap, autocast}
RAW_ABILITY_FUNCTIONS = {raw_cmd, raw_cmd_pt, raw_cmd_unit, raw_autocast}
# Which ones require a point?
POINT_REQUIRED_FUNCS = {
False: {cmd_quick, autocast},
True: {cmd_screen, cmd_minimap, autocast}}
always = lambda _: True
class Function(collections.namedtuple(
"Function", ["id", "name", "ability_id", "general_id", "function_type",
"args", "avail_fn", "raw"])):
"""Represents a function action.
Attributes:
id: The function id, which is what the agent will use.
name: The name of the function. Should be unique.
ability_id: The ability id to pass to sc2.
general_id: 0 for normal abilities, and the ability_id of another ability if
it can be represented by a more general action.
function_type: One of the functions in FUNCTION_TYPES for how to construct
the sc2 action proto out of python types.
args: A list of the types of args passed to function_type.
avail_fn: For non-abilities, this function returns whether the function is
valid.
raw: Whether the function is raw or not.
"""
__slots__ = ()
@classmethod
def ui_func(cls, id_, name, function_type, avail_fn=always):
"""Define a function representing a ui action."""
return cls(id_, name, 0, 0, function_type, FUNCTION_TYPES[function_type],
avail_fn, False)
@classmethod
def ability(cls, id_, name, function_type, ability_id, general_id=0):
"""Define a function represented as a game ability."""
assert function_type in ABILITY_FUNCTIONS
return cls(id_, name, ability_id, general_id, function_type,
FUNCTION_TYPES[function_type], None, False)
@classmethod
def raw_ability(cls, id_, name, function_type, ability_id, general_id=0,
avail_fn=always):
"""Define a function represented as a game ability."""
assert function_type in RAW_ABILITY_FUNCTIONS
return cls(id_, name, ability_id, general_id, function_type,
FUNCTION_TYPES[function_type], avail_fn, True)
@classmethod
def raw_ui_func(cls, id_, name, function_type, avail_fn=always):
"""Define a function representing a ui action."""
return cls(id_, name, 0, 0, function_type, FUNCTION_TYPES[function_type],
avail_fn, True)
@classmethod
def spec(cls, id_, name, args):
"""Create a Function to be used in ValidActions."""
return cls(id_, name, None, None, None, args, None, False)
def __hash__(self): # So it can go in a set().
return self.id
def __str__(self):
return self.str()
def __call__(self, *args):
"""A convenient way to create a FunctionCall from this Function."""
return FunctionCall.init_with_validation(self.id, args, raw=self.raw)
def __reduce__(self):
return self.__class__, tuple(self)
def str(self, space=False):
"""String version. Set space=True to line them all up nicely."""
return "%s/%s (%s)" % (str(int(self.id)).rjust(space and 4),
self.name.ljust(space and 50),
"; ".join(str(a) for a in self.args))
class Functions(object):
"""Represents the full set of functions.
Can't use namedtuple since python3 has a limit of 255 function arguments, so
build something similar.
"""
def __init__(self, functions):
functions = sorted(functions, key=lambda f: f.id)
self._func_list = functions
self._func_dict = {f.name: f for f in functions}
if len(self._func_dict) != len(self._func_list):
raise ValueError("Function names must be unique.")
def __getattr__(self, name):
return self._func_dict[name]
def __getitem__(self, key):
if isinstance(key, numbers.Integral):
return self._func_list[key]
return self._func_dict[key]
def __getstate__(self):
# Support pickling, which otherwise conflicts with __getattr__.
return self._func_list
def __setstate__(self, functions):
# Support pickling, which otherwise conflicts with __getattr__.
self.__init__(functions)
def __iter__(self):
return iter(self._func_list)
def __len__(self):
return len(self._func_list)
def __eq__(self, other):
return self._func_list == other._func_list # pylint: disable=protected-access
# The semantic meaning of these actions can mainly be found by searching:
# http://liquipedia.net/starcraft2/ or http://starcraft.wikia.com/ .
# pylint: disable=line-too-long
_FUNCTIONS = [
Function.ui_func(0, "no_op", no_op),
Function.ui_func(1, "move_camera", move_camera),
Function.ui_func(2, "select_point", select_point),
Function.ui_func(3, "select_rect", select_rect),
Function.ui_func(4, "select_control_group", control_group),
Function.ui_func(5, "select_unit", select_unit,
lambda obs: obs.ui_data.HasField("multi")),
Function.ui_func(6, "select_idle_worker", select_idle_worker,
lambda obs: obs.player_common.idle_worker_count > 0),
Function.ui_func(7, "select_army", select_army,
lambda obs: obs.player_common.army_count > 0),
Function.ui_func(8, "select_warp_gates", select_warp_gates,
lambda obs: obs.player_common.warp_gate_count > 0),
Function.ui_func(9, "select_larva", select_larva,
lambda obs: obs.player_common.larva_count > 0),
Function.ui_func(10, "unload", unload,
lambda obs: obs.ui_data.HasField("cargo")),
Function.ui_func(11, "build_queue", build_queue,
lambda obs: obs.ui_data.HasField("production")),
# Everything below here is generated with gen_actions.py
Function.ability(12, "Attack_screen", cmd_screen, 3674),
Function.ability(13, "Attack_minimap", cmd_minimap, 3674),
Function.ability(14, "Attack_Attack_screen", cmd_screen, 23, 3674),
Function.ability(15, "Attack_Attack_minimap", cmd_minimap, 23, 3674),
Function.ability(16, "Attack_AttackBuilding_screen", cmd_screen, 2048, 3674),
Function.ability(17, "Attack_AttackBuilding_minimap", cmd_minimap, 2048, 3674),
Function.ability(555, "Attack_Battlecruiser_screen", cmd_screen, 3771, 3674),
Function.ability(556, "Attack_Battlecruiser_minimap", cmd_minimap, 3771, 3674),
Function.ability(18, "Attack_Redirect_screen", cmd_screen, 1682, 3674),
Function.ability(19, "Scan_Move_screen", cmd_screen, 19, 3674),
Function.ability(20, "Scan_Move_minimap", cmd_minimap, 19, 3674),
Function.ability(21, "Behavior_BuildingAttackOff_quick", cmd_quick, 2082),
Function.ability(22, "Behavior_BuildingAttackOn_quick", cmd_quick, 2081),
Function.ability(23, "Behavior_CloakOff_quick", cmd_quick, 3677),
Function.ability(24, "Behavior_CloakOff_Banshee_quick", cmd_quick, 393, 3677),
Function.ability(25, "Behavior_CloakOff_Ghost_quick", cmd_quick, 383, 3677),
Function.ability(26, "Behavior_CloakOn_quick", cmd_quick, 3676),
Function.ability(27, "Behavior_CloakOn_Banshee_quick", cmd_quick, 392, 3676),
Function.ability(28, "Behavior_CloakOn_Ghost_quick", cmd_quick, 382, 3676),
Function.ability(29, "Behavior_GenerateCreepOff_quick", cmd_quick, 1693),
Function.ability(30, "Behavior_GenerateCreepOn_quick", cmd_quick, 1692),
Function.ability(31, "Behavior_HoldFireOff_quick", cmd_quick, 3689),
Function.ability(32, "Behavior_HoldFireOff_Ghost_quick", cmd_quick, 38, 3689),
Function.ability(33, "Behavior_HoldFireOff_Lurker_quick", cmd_quick, 2552, 3689),
Function.ability(34, "Behavior_HoldFireOn_quick", cmd_quick, 3688),
Function.ability(35, "Behavior_HoldFireOn_Ghost_quick", cmd_quick, 36, 3688),
Function.ability(36, "Behavior_HoldFireOn_Lurker_quick", cmd_quick, 2550, 3688),
Function.ability(37, "Behavior_PulsarBeamOff_quick", cmd_quick, 2376),
Function.ability(38, "Behavior_PulsarBeamOn_quick", cmd_quick, 2375),
Function.ability(39, "Build_Armory_screen", cmd_screen, 331),
Function.ability(40, "Build_Assimilator_screen", cmd_screen, 882),
Function.ability(41, "Build_BanelingNest_screen", cmd_screen, 1162),
Function.ability(42, "Build_Barracks_screen", cmd_screen, 321),
Function.ability(43, "Build_Bunker_screen", cmd_screen, 324),
Function.ability(44, "Build_CommandCenter_screen", cmd_screen, 318),
Function.ability(45, "Build_CreepTumor_screen", cmd_screen, 3691),
Function.ability(46, "Build_CreepTumor_Queen_screen", cmd_screen, 1694, 3691),
Function.ability(47, "Build_CreepTumor_Tumor_screen", cmd_screen, 1733, 3691),
Function.ability(48, "Build_CyberneticsCore_screen", cmd_screen, 894),
Function.ability(49, "Build_DarkShrine_screen", cmd_screen, 891),
Function.ability(50, "Build_EngineeringBay_screen", cmd_screen, 322),
Function.ability(51, "Build_EvolutionChamber_screen", cmd_screen, 1156),
Function.ability(52, "Build_Extractor_screen", cmd_screen, 1154),
Function.ability(53, "Build_Factory_screen", cmd_screen, 328),
Function.ability(54, "Build_FleetBeacon_screen", cmd_screen, 885),
Function.ability(55, "Build_Forge_screen", cmd_screen, 884),
Function.ability(56, "Build_FusionCore_screen", cmd_screen, 333),
Function.ability(57, "Build_Gateway_screen", cmd_screen, 883),
Function.ability(58, "Build_GhostAcademy_screen", cmd_screen, 327),
Function.ability(59, "Build_Hatchery_screen", cmd_screen, 1152),
Function.ability(60, "Build_HydraliskDen_screen", cmd_screen, 1157),
Function.ability(61, "Build_InfestationPit_screen", cmd_screen, 1160),
Function.ability(62, "Build_Interceptors_quick", cmd_quick, 1042),
Function.ability(63, "Build_Interceptors_autocast", autocast, 1042),
Function.ability(524, "Build_LurkerDen_screen", cmd_screen, 1163),
Function.ability(64, "Build_MissileTurret_screen", cmd_screen, 323),
Function.ability(65, "Build_Nexus_screen", cmd_screen, 880),
Function.ability(66, "Build_Nuke_quick", cmd_quick, 710),
Function.ability(67, "Build_NydusNetwork_screen", cmd_screen, 1161),
Function.ability(68, "Build_NydusWorm_screen", cmd_screen, 1768),
Function.ability(69, "Build_PhotonCannon_screen", cmd_screen, 887),
Function.ability(70, "Build_Pylon_screen", cmd_screen, 881),
Function.ability(71, "Build_Reactor_quick", cmd_quick, 3683),
Function.ability(72, "Build_Reactor_screen", cmd_screen, 3683),
Function.ability(73, "Build_Reactor_Barracks_quick", cmd_quick, 422, 3683),
Function.ability(74, "Build_Reactor_Barracks_screen", cmd_screen, 422, 3683),
Function.ability(75, "Build_Reactor_Factory_quick", cmd_quick, 455, 3683),
Function.ability(76, "Build_Reactor_Factory_screen", cmd_screen, 455, 3683),
Function.ability(77, "Build_Reactor_Starport_quick", cmd_quick, 488, 3683),
Function.ability(78, "Build_Reactor_Starport_screen", cmd_screen, 488, 3683),
Function.ability(79, "Build_Refinery_screen", cmd_screen, 320),
Function.ability(80, "Build_RoachWarren_screen", cmd_screen, 1165),
Function.ability(81, "Build_RoboticsBay_screen", cmd_screen, 892),
Function.ability(82, "Build_RoboticsFacility_screen", cmd_screen, 893),
Function.ability(83, "Build_SensorTower_screen", cmd_screen, 326),
Function.ability(525, "Build_ShieldBattery_screen", cmd_screen, 895),
Function.ability(84, "Build_SpawningPool_screen", cmd_screen, 1155),
Function.ability(85, "Build_SpineCrawler_screen", cmd_screen, 1166),
Function.ability(86, "Build_Spire_screen", cmd_screen, 1158),
Function.ability(87, "Build_SporeCrawler_screen", cmd_screen, 1167),
Function.ability(88, "Build_Stargate_screen", cmd_screen, 889),
Function.ability(89, "Build_Starport_screen", cmd_screen, 329),
Function.ability(90, "Build_StasisTrap_screen", cmd_screen, 2505),
Function.ability(91, "Build_SupplyDepot_screen", cmd_screen, 319),
Function.ability(92, "Build_TechLab_quick", cmd_quick, 3682),
Function.ability(93, "Build_TechLab_screen", cmd_screen, 3682),
Function.ability(94, "Build_TechLab_Barracks_quick", cmd_quick, 421, 3682),
Function.ability(95, "Build_TechLab_Barracks_screen", cmd_screen, 421, 3682),
Function.ability(96, "Build_TechLab_Factory_quick", cmd_quick, 454, 3682),
Function.ability(97, "Build_TechLab_Factory_screen", cmd_screen, 454, 3682),
Function.ability(98, "Build_TechLab_Starport_quick", cmd_quick, 487, 3682),
Function.ability(99, "Build_TechLab_Starport_screen", cmd_screen, 487, 3682),
Function.ability(100, "Build_TemplarArchive_screen", cmd_screen, 890),
Function.ability(101, "Build_TwilightCouncil_screen", cmd_screen, 886),
Function.ability(102, "Build_UltraliskCavern_screen", cmd_screen, 1159),
Function.ability(103, "BurrowDown_quick", cmd_quick, 3661),
Function.ability(104, "BurrowDown_Baneling_quick", cmd_quick, 1374, 3661),
Function.ability(105, "BurrowDown_Drone_quick", cmd_quick, 1378, 3661),
Function.ability(106, "BurrowDown_Hydralisk_quick", cmd_quick, 1382, 3661),
Function.ability(107, "BurrowDown_Infestor_quick", cmd_quick, 1444, 3661),
Function.ability(108, "BurrowDown_InfestorTerran_quick", cmd_quick, 1394, 3661),
Function.ability(109, "BurrowDown_Lurker_quick", cmd_quick, 2108, 3661),
Function.ability(110, "BurrowDown_Queen_quick", cmd_quick, 1433, 3661),
Function.ability(111, "BurrowDown_Ravager_quick", cmd_quick, 2340, 3661),
Function.ability(112, "BurrowDown_Roach_quick", cmd_quick, 1386, 3661),
Function.ability(113, "BurrowDown_SwarmHost_quick", cmd_quick, 2014, 3661),
Function.ability(114, "BurrowDown_Ultralisk_quick", cmd_quick, 1512, 3661),
Function.ability(115, "BurrowDown_WidowMine_quick", cmd_quick, 2095, 3661),
Function.ability(116, "BurrowDown_Zergling_quick", cmd_quick, 1390, 3661),
Function.ability(117, "BurrowUp_quick", cmd_quick, 3662),
Function.ability(118, "BurrowUp_autocast", autocast, 3662),
Function.ability(119, "BurrowUp_Baneling_quick", cmd_quick, 1376, 3662),
Function.ability(120, "BurrowUp_Baneling_autocast", autocast, 1376, 3662),
Function.ability(121, "BurrowUp_Drone_quick", cmd_quick, 1380, 3662),
Function.ability(122, "BurrowUp_Hydralisk_quick", cmd_quick, 1384, 3662),
Function.ability(123, "BurrowUp_Hydralisk_autocast", autocast, 1384, 3662),
Function.ability(124, "BurrowUp_Infestor_quick", cmd_quick, 1446, 3662),
Function.ability(125, "BurrowUp_InfestorTerran_quick", cmd_quick, 1396, 3662),
Function.ability(126, "BurrowUp_InfestorTerran_autocast", autocast, 1396, 3662),
Function.ability(127, "BurrowUp_Lurker_quick", cmd_quick, 2110, 3662),
Function.ability(128, "BurrowUp_Queen_quick", cmd_quick, 1435, 3662),
Function.ability(129, "BurrowUp_Queen_autocast", autocast, 1435, 3662),
Function.ability(130, "BurrowUp_Ravager_quick", cmd_quick, 2342, 3662),
Function.ability(131, "BurrowUp_Ravager_autocast", autocast, 2342, 3662),
Function.ability(132, "BurrowUp_Roach_quick", cmd_quick, 1388, 3662),
Function.ability(133, "BurrowUp_Roach_autocast", autocast, 1388, 3662),
Function.ability(134, "BurrowUp_SwarmHost_quick", cmd_quick, 2016, 3662),
Function.ability(135, "BurrowUp_Ultralisk_quick", cmd_quick, 1514, 3662),
Function.ability(136, "BurrowUp_Ultralisk_autocast", autocast, 1514, 3662),
Function.ability(137, "BurrowUp_WidowMine_quick", cmd_quick, 2097, 3662),
Function.ability(138, "BurrowUp_Zergling_quick", cmd_quick, 1392, 3662),
Function.ability(139, "BurrowUp_Zergling_autocast", autocast, 1392, 3662),
Function.ability(140, "Cancel_quick", cmd_quick, 3659),
Function.ability(141, "Cancel_AdeptPhaseShift_quick", cmd_quick, 2594, 3659),
Function.ability(142, "Cancel_AdeptShadePhaseShift_quick", cmd_quick, 2596, 3659),
Function.ability(143, "Cancel_BarracksAddOn_quick", cmd_quick, 451, 3659),
Function.ability(144, "Cancel_BuildInProgress_quick", cmd_quick, 314, 3659),
Function.ability(145, "Cancel_CreepTumor_quick", cmd_quick, 1763, 3659),
Function.ability(146, "Cancel_FactoryAddOn_quick", cmd_quick, 484, 3659),
Function.ability(147, "Cancel_GravitonBeam_quick", cmd_quick, 174, 3659),
Function.ability(148, "Cancel_LockOn_quick", cmd_quick, 2354, 3659),
Function.ability(149, "Cancel_MorphBroodlord_quick", cmd_quick, 1373, 3659),
Function.ability(150, "Cancel_MorphGreaterSpire_quick", cmd_quick, 1221, 3659),
Function.ability(151, "Cancel_MorphHive_quick", cmd_quick, 1219, 3659),
Function.ability(152, "Cancel_MorphLair_quick", cmd_quick, 1217, 3659),
Function.ability(153, "Cancel_MorphLurker_quick", cmd_quick, 2333, 3659),
Function.ability(154, "Cancel_MorphLurkerDen_quick", cmd_quick, 2113, 3659),
Function.ability(155, "Cancel_MorphMothership_quick", cmd_quick, 1848, 3659),
Function.ability(156, "Cancel_MorphOrbital_quick", cmd_quick, 1517, 3659),
Function.ability(157, "Cancel_MorphOverlordTransport_quick", cmd_quick, 2709, 3659),
Function.ability(158, "Cancel_MorphOverseer_quick", cmd_quick, 1449, 3659),
Function.ability(159, "Cancel_MorphPlanetaryFortress_quick", cmd_quick, 1451, 3659),
Function.ability(160, "Cancel_MorphRavager_quick", cmd_quick, 2331, 3659),
Function.ability(161, "Cancel_MorphThorExplosiveMode_quick", cmd_quick, 2365, 3659),
Function.ability(162, "Cancel_NeuralParasite_quick", cmd_quick, 250, 3659),
Function.ability(163, "Cancel_Nuke_quick", cmd_quick, 1623, 3659),
Function.ability(164, "Cancel_SpineCrawlerRoot_quick", cmd_quick, 1730, 3659),
Function.ability(165, "Cancel_SporeCrawlerRoot_quick", cmd_quick, 1732, 3659),
Function.ability(166, "Cancel_StarportAddOn_quick", cmd_quick, 517, 3659),
Function.ability(167, "Cancel_StasisTrap_quick", cmd_quick, 2535, 3659),
Function.ability(546, "Cancel_VoidRayPrismaticAlignment_quick", cmd_quick, 3707, 3659),
Function.ability(168, "Cancel_Last_quick", cmd_quick, 3671),
Function.ability(169, "Cancel_HangarQueue5_quick", cmd_quick, 1038, 3671),
Function.ability(170, "Cancel_Queue1_quick", cmd_quick, 304, 3671),
Function.ability(171, "Cancel_Queue5_quick", cmd_quick, 306, 3671),
Function.ability(172, "Cancel_QueueAddOn_quick", cmd_quick, 312, 3671),
Function.ability(173, "Cancel_QueueCancelToSelection_quick", cmd_quick, 308, 3671),
Function.ability(174, "Cancel_QueuePassive_quick", cmd_quick, 1831, 3671),
Function.ability(175, "Cancel_QueuePassiveCancelToSelection_quick", cmd_quick, 1833, 3671),
Function.ability(176, "Effect_Abduct_screen", cmd_screen, 2067),
Function.ability(177, "Effect_AdeptPhaseShift_screen", cmd_screen, 2544),
Function.ability(547, "Effect_AdeptPhaseShift_minimap", cmd_minimap, 2544),
Function.ability(526, "Effect_AntiArmorMissile_screen", cmd_screen, 3753),
Function.ability(178, "Effect_AutoTurret_screen", cmd_screen, 1764),
Function.ability(179, "Effect_BlindingCloud_screen", cmd_screen, 2063),
Function.ability(180, "Effect_Blink_screen", cmd_screen, 3687),
Function.ability(543, "Effect_Blink_minimap", cmd_minimap, 3687),
Function.ability(181, "Effect_Blink_Stalker_screen", cmd_screen, 1442, 3687),
Function.ability(544, "Effect_Blink_Stalker_minimap", cmd_minimap, 1442, 3687),
Function.ability(182, "Effect_ShadowStride_screen", cmd_screen, 2700, 3687),
Function.ability(545, "Effect_ShadowStride_minimap", cmd_minimap, 2700, 3687),
Function.ability(183, "Effect_CalldownMULE_screen", cmd_screen, 171),
Function.ability(184, "Effect_CausticSpray_screen", cmd_screen, 2324),
Function.ability(185, "Effect_Charge_screen", cmd_screen, 1819),
Function.ability(186, "Effect_Charge_autocast", autocast, 1819),
Function.ability(187, "Effect_ChronoBoost_screen", cmd_screen, 261),
Function.ability(527, "Effect_ChronoBoostEnergyCost_screen", cmd_screen, 3755),
Function.ability(188, "Effect_Contaminate_screen", cmd_screen, 1825),
Function.ability(189, "Effect_CorrosiveBile_screen", cmd_screen, 2338),
Function.ability(190, "Effect_EMP_screen", cmd_screen, 1628),
Function.ability(191, "Effect_Explode_quick", cmd_quick, 42),
Function.ability(192, "Effect_Feedback_screen", cmd_screen, 140),
Function.ability(193, "Effect_ForceField_screen", cmd_screen, 1526),
Function.ability(194, "Effect_FungalGrowth_screen", cmd_screen, 74),
Function.ability(195, "Effect_GhostSnipe_screen", cmd_screen, 2714),
Function.ability(196, "Effect_GravitonBeam_screen", cmd_screen, 173),
Function.ability(197, "Effect_GuardianShield_quick", cmd_quick, 76),
Function.ability(198, "Effect_Heal_screen", cmd_screen, 386),
Function.ability(199, "Effect_Heal_autocast", autocast, 386),
Function.ability(200, "Effect_HunterSeekerMissile_screen", cmd_screen, 169),
Function.ability(201, "Effect_ImmortalBarrier_quick", cmd_quick, 2328),
Function.ability(202, "Effect_ImmortalBarrier_autocast", autocast, 2328),
Function.ability(203, "Effect_InfestedTerrans_screen", cmd_screen, 247),
Function.ability(204, "Effect_InjectLarva_screen", cmd_screen, 251),
Function.ability(528, "Effect_InterferenceMatrix_screen", cmd_screen, 3747),
Function.ability(205, "Effect_KD8Charge_screen", cmd_screen, 2588),
Function.ability(206, "Effect_LockOn_screen", cmd_screen, 2350),
Function.ability(557, "Effect_LockOn_autocast", autocast, 2350),
Function.ability(207, "Effect_LocustSwoop_screen", cmd_screen, 2387),
Function.ability(208, "Effect_MassRecall_screen", cmd_screen, 3686),
Function.ability(209, "Effect_MassRecall_Mothership_screen", cmd_screen, 2368, 3686),
Function.ability(210, "Effect_MassRecall_MothershipCore_screen", cmd_screen, 1974, 3686),
Function.ability(529, "Effect_MassRecall_Nexus_screen", cmd_screen, 3757, 3686),
Function.ability(548, "Effect_MassRecall_StrategicRecall_screen", cmd_screen, 142, 3686),
Function.ability(211, "Effect_MedivacIgniteAfterburners_quick", cmd_quick, 2116),
Function.ability(212, "Effect_NeuralParasite_screen", cmd_screen, 249),
Function.ability(213, "Effect_NukeCalldown_screen", cmd_screen, 1622),
Function.ability(214, "Effect_OracleRevelation_screen", cmd_screen, 2146),
Function.ability(215, "Effect_ParasiticBomb_screen", cmd_screen, 2542),
Function.ability(216, "Effect_PhotonOvercharge_screen", cmd_screen, 2162),
Function.ability(217, "Effect_PointDefenseDrone_screen", cmd_screen, 144),
Function.ability(218, "Effect_PsiStorm_screen", cmd_screen, 1036),
Function.ability(219, "Effect_PurificationNova_screen", cmd_screen, 2346),
Function.ability(220, "Effect_Repair_screen", cmd_screen, 3685),
Function.ability(221, "Effect_Repair_autocast", autocast, 3685),
Function.ability(222, "Effect_Repair_Mule_screen", cmd_screen, 78, 3685),
Function.ability(223, "Effect_Repair_Mule_autocast", autocast, 78, 3685),
Function.ability(530, "Effect_Repair_RepairDrone_screen", cmd_screen, 3751, 3685),
Function.ability(531, "Effect_Repair_RepairDrone_autocast", autocast, 3751, 3685),
Function.ability(224, "Effect_Repair_SCV_screen", cmd_screen, 316, 3685),
Function.ability(225, "Effect_Repair_SCV_autocast", autocast, 316, 3685),
Function.ability(532, "Effect_RepairDrone_screen", cmd_screen, 3749),
Function.ability(533, "Effect_Restore_screen", cmd_screen, 3765),
Function.ability(534, "Effect_Restore_autocast", autocast, 3765),
Function.ability(226, "Effect_Salvage_quick", cmd_quick, 32),
Function.ability(227, "Effect_Scan_screen", cmd_screen, 399),
Function.ability(542, "Effect_Scan_minimap", cmd_minimap, 399),
Function.ability(228, "Effect_SpawnChangeling_quick", cmd_quick, 181),
Function.ability(229, "Effect_SpawnLocusts_screen", cmd_screen, 2704),
Function.ability(230, "Effect_Spray_screen", cmd_screen, 3684),
Function.ability(231, "Effect_Spray_Protoss_screen", cmd_screen, 30, 3684),
Function.ability(232, "Effect_Spray_Terran_screen", cmd_screen, 26, 3684),
Function.ability(233, "Effect_Spray_Zerg_screen", cmd_screen, 28, 3684),
Function.ability(549, "Effect_Spray_minimap", cmd_minimap, 3684),
Function.ability(550, "Effect_Spray_Protoss_minimap", cmd_minimap, 30, 3684),
Function.ability(551, "Effect_Spray_Terran_minimap", cmd_minimap, 26, 3684),
Function.ability(552, "Effect_Spray_Zerg_minimap", cmd_minimap, 28, 3684),
Function.ability(234, "Effect_Stim_quick", cmd_quick, 3675),
Function.ability(235, "Effect_Stim_Marauder_quick", cmd_quick, 253, 3675),
Function.ability(236, "Effect_Stim_Marauder_Redirect_quick", cmd_quick, 1684, 3675),
Function.ability(237, "Effect_Stim_Marine_quick", cmd_quick, 380, 3675),
Function.ability(238, "Effect_Stim_Marine_Redirect_quick", cmd_quick, 1683, 3675),
Function.ability(239, "Effect_SupplyDrop_screen", cmd_screen, 255),
Function.ability(240, "Effect_TacticalJump_screen", cmd_screen, 2358),
Function.ability(553, "Effect_TacticalJump_minimap", cmd_minimap, 2358),
Function.ability(241, "Effect_TimeWarp_screen", cmd_screen, 2244),
Function.ability(242, "Effect_Transfusion_screen", cmd_screen, 1664),
Function.ability(243, "Effect_ViperConsume_screen", cmd_screen, 2073),
Function.ability(244, "Effect_VoidRayPrismaticAlignment_quick", cmd_quick, 2393),
Function.ability(245, "Effect_WidowMineAttack_screen", cmd_screen, 2099),
Function.ability(246, "Effect_WidowMineAttack_autocast", autocast, 2099),
Function.ability(247, "Effect_YamatoGun_screen", cmd_screen, 401),
Function.ability(248, "Hallucination_Adept_quick", cmd_quick, 2391),
Function.ability(249, "Hallucination_Archon_quick", cmd_quick, 146),
Function.ability(250, "Hallucination_Colossus_quick", cmd_quick, 148),
Function.ability(251, "Hallucination_Disruptor_quick", cmd_quick, 2389),
Function.ability(252, "Hallucination_HighTemplar_quick", cmd_quick, 150),
Function.ability(253, "Hallucination_Immortal_quick", cmd_quick, 152),
Function.ability(254, "Hallucination_Oracle_quick", cmd_quick, 2114),
Function.ability(255, "Hallucination_Phoenix_quick", cmd_quick, 154),
Function.ability(256, "Hallucination_Probe_quick", cmd_quick, 156),
Function.ability(257, "Hallucination_Stalker_quick", cmd_quick, 158),
Function.ability(258, "Hallucination_VoidRay_quick", cmd_quick, 160),
Function.ability(259, "Hallucination_WarpPrism_quick", cmd_quick, 162),
Function.ability(260, "Hallucination_Zealot_quick", cmd_quick, 164),
Function.ability(261, "Halt_quick", cmd_quick, 3660),
Function.ability(262, "Halt_Building_quick", cmd_quick, 315, 3660),
Function.ability(263, "Halt_TerranBuild_quick", cmd_quick, 348, 3660),
Function.ability(264, "Harvest_Gather_screen", cmd_screen, 3666),
Function.ability(265, "Harvest_Gather_Drone_screen", cmd_screen, 1183, 3666),
Function.ability(266, "Harvest_Gather_Mule_screen", cmd_screen, 166, 3666),
Function.ability(267, "Harvest_Gather_Probe_screen", cmd_screen, 298, 3666),
Function.ability(268, "Harvest_Gather_SCV_screen", cmd_screen, 295, 3666),
Function.ability(269, "Harvest_Return_quick", cmd_quick, 3667),
Function.ability(270, "Harvest_Return_Drone_quick", cmd_quick, 1184, 3667),
Function.ability(271, "Harvest_Return_Mule_quick", cmd_quick, 167, 3667),
Function.ability(272, "Harvest_Return_Probe_quick", cmd_quick, 299, 3667),
Function.ability(273, "Harvest_Return_SCV_quick", cmd_quick, 296, 3667),
Function.ability(274, "HoldPosition_quick", cmd_quick, 3793),
Function.ability(558, "HoldPosition_Battlecruiser_quick", cmd_quick, 3778, 3793),
Function.ability(559, "HoldPosition_Hold_quick", cmd_quick, 18, 3793),
Function.ability(275, "Land_screen", cmd_screen, 3678),
Function.ability(276, "Land_Barracks_screen", cmd_screen, 554, 3678),
Function.ability(277, "Land_CommandCenter_screen", cmd_screen, 419, 3678),
Function.ability(278, "Land_Factory_screen", cmd_screen, 520, 3678),
Function.ability(279, "Land_OrbitalCommand_screen", cmd_screen, 1524, 3678),
Function.ability(280, "Land_Starport_screen", cmd_screen, 522, 3678),
Function.ability(281, "Lift_quick", cmd_quick, 3679),
Function.ability(282, "Lift_Barracks_quick", cmd_quick, 452, 3679),
Function.ability(283, "Lift_CommandCenter_quick", cmd_quick, 417, 3679),
Function.ability(284, "Lift_Factory_quick", cmd_quick, 485, 3679),
Function.ability(285, "Lift_OrbitalCommand_quick", cmd_quick, 1522, 3679),
Function.ability(286, "Lift_Starport_quick", cmd_quick, 518, 3679),
Function.ability(287, "Load_screen", cmd_screen, 3668),
Function.ability(288, "Load_Bunker_screen", cmd_screen, 407, 3668),
Function.ability(289, "Load_Medivac_screen", cmd_screen, 394, 3668),
Function.ability(290, "Load_NydusNetwork_screen", cmd_screen, 1437, 3668),
Function.ability(291, "Load_NydusWorm_screen", cmd_screen, 2370, 3668),
Function.ability(292, "Load_Overlord_screen", cmd_screen, 1406, 3668),
Function.ability(293, "Load_WarpPrism_screen", cmd_screen, 911, 3668),
Function.ability(294, "LoadAll_quick", cmd_quick, 3663),
Function.ability(295, "LoadAll_CommandCenter_quick", cmd_quick, 416, 3663),
Function.ability(296, "Morph_Archon_quick", cmd_quick, 1766),
Function.ability(297, "Morph_BroodLord_quick", cmd_quick, 1372),
Function.ability(298, "Morph_Gateway_quick", cmd_quick, 1520),
Function.ability(299, "Morph_GreaterSpire_quick", cmd_quick, 1220),
Function.ability(300, "Morph_Hellbat_quick", cmd_quick, 1998),
Function.ability(301, "Morph_Hellion_quick", cmd_quick, 1978),
Function.ability(302, "Morph_Hive_quick", cmd_quick, 1218),
Function.ability(303, "Morph_Lair_quick", cmd_quick, 1216),
Function.ability(304, "Morph_LiberatorAAMode_quick", cmd_quick, 2560),
Function.ability(305, "Morph_LiberatorAGMode_screen", cmd_screen, 2558),
Function.ability(554, "Morph_LiberatorAGMode_minimap", cmd_minimap, 2558),
Function.ability(306, "Morph_Lurker_quick", cmd_quick, 2332),
Function.ability(307, "Morph_LurkerDen_quick", cmd_quick, 2112),
Function.ability(308, "Morph_Mothership_quick", cmd_quick, 1847),
Function.ability(535, "Morph_ObserverMode_quick", cmd_quick, 3739),
Function.ability(309, "Morph_OrbitalCommand_quick", cmd_quick, 1516),
Function.ability(310, "Morph_OverlordTransport_quick", cmd_quick, 2708),
Function.ability(311, "Morph_Overseer_quick", cmd_quick, 1448),
Function.ability(536, "Morph_OverseerMode_quick", cmd_quick, 3745),
Function.ability(537, "Morph_OversightMode_quick", cmd_quick, 3743),
Function.ability(312, "Morph_PlanetaryFortress_quick", cmd_quick, 1450),
Function.ability(313, "Morph_Ravager_quick", cmd_quick, 2330),
Function.ability(314, "Morph_Root_screen", cmd_screen, 3680),
Function.ability(315, "Morph_SpineCrawlerRoot_screen", cmd_screen, 1729, 3680),
Function.ability(316, "Morph_SporeCrawlerRoot_screen", cmd_screen, 1731, 3680),
Function.ability(317, "Morph_SiegeMode_quick", cmd_quick, 388),
Function.ability(318, "Morph_SupplyDepot_Lower_quick", cmd_quick, 556),
Function.ability(319, "Morph_SupplyDepot_Raise_quick", cmd_quick, 558),
Function.ability(538, "Morph_SurveillanceMode_quick", cmd_quick, 3741),
Function.ability(320, "Morph_ThorExplosiveMode_quick", cmd_quick, 2364),
Function.ability(321, "Morph_ThorHighImpactMode_quick", cmd_quick, 2362),
Function.ability(322, "Morph_Unsiege_quick", cmd_quick, 390),
Function.ability(323, "Morph_Uproot_quick", cmd_quick, 3681),
Function.ability(324, "Morph_SpineCrawlerUproot_quick", cmd_quick, 1725, 3681),
Function.ability(325, "Morph_SporeCrawlerUproot_quick", cmd_quick, 1727, 3681),
Function.ability(326, "Morph_VikingAssaultMode_quick", cmd_quick, 403),
Function.ability(327, "Morph_VikingFighterMode_quick", cmd_quick, 405),
Function.ability(328, "Morph_WarpGate_quick", cmd_quick, 1518),
Function.ability(560, "Morph_WarpGate_autocast", autocast, 1518),
Function.ability(329, "Morph_WarpPrismPhasingMode_quick", cmd_quick, 1528),
Function.ability(330, "Morph_WarpPrismTransportMode_quick", cmd_quick, 1530),
Function.ability(331, "Move_screen", cmd_screen, 3794),
Function.ability(332, "Move_minimap", cmd_minimap, 3794),
Function.ability(561, "Move_Battlecruiser_screen", cmd_screen, 3776, 3794),
Function.ability(562, "Move_Battlecruiser_minimap", cmd_minimap, 3776, 3794),
Function.ability(563, "Move_Move_screen", cmd_screen, 16, 3794),
Function.ability(564, "Move_Move_minimap", cmd_minimap, 16, 3794),
Function.ability(333, "Patrol_screen", cmd_screen, 3795),
Function.ability(334, "Patrol_minimap", cmd_minimap, 3795),
Function.ability(565, "Patrol_Battlecruiser_screen", cmd_screen, 3777, 3795),
Function.ability(566, "Patrol_Battlecruiser_minimap", cmd_minimap, 3777, 3795),
Function.ability(567, "Patrol_Patrol_screen", cmd_screen, 17, 3795),
Function.ability(568, "Patrol_Patrol_minimap", cmd_minimap, 17, 3795),
Function.ability(335, "Rally_Units_screen", cmd_screen, 3673),
Function.ability(336, "Rally_Units_minimap", cmd_minimap, 3673),
Function.ability(337, "Rally_Building_screen", cmd_screen, 195, 3673),
Function.ability(338, "Rally_Building_minimap", cmd_minimap, 195, 3673),
Function.ability(339, "Rally_Hatchery_Units_screen", cmd_screen, 211, 3673),
Function.ability(340, "Rally_Hatchery_Units_minimap", cmd_minimap, 211, 3673),
Function.ability(341, "Rally_Morphing_Unit_screen", cmd_screen, 199, 3673),
Function.ability(342, "Rally_Morphing_Unit_minimap", cmd_minimap, 199, 3673),
Function.ability(343, "Rally_Workers_screen", cmd_screen, 3690),
Function.ability(344, "Rally_Workers_minimap", cmd_minimap, 3690),
Function.ability(345, "Rally_CommandCenter_screen", cmd_screen, 203, 3690),
Function.ability(346, "Rally_CommandCenter_minimap", cmd_minimap, 203, 3690),
Function.ability(347, "Rally_Hatchery_Workers_screen", cmd_screen, 212, 3690),
Function.ability(348, "Rally_Hatchery_Workers_minimap", cmd_minimap, 212, 3690),
Function.ability(349, "Rally_Nexus_screen", cmd_screen, 207, 3690),
Function.ability(350, "Rally_Nexus_minimap", cmd_minimap, 207, 3690),
Function.ability(539, "Research_AdaptiveTalons_quick", cmd_quick, 3709),
Function.ability(351, "Research_AdeptResonatingGlaives_quick", cmd_quick, 1594),
Function.ability(352, "Research_AdvancedBallistics_quick", cmd_quick, 805),
Function.ability(569, "Research_AnabolicSynthesis_quick", cmd_quick, 263),
Function.ability(353, "Research_BansheeCloakingField_quick", cmd_quick, 790),
Function.ability(354, "Research_BansheeHyperflightRotors_quick", cmd_quick, 799),
Function.ability(355, "Research_BattlecruiserWeaponRefit_quick", cmd_quick, 1532),
Function.ability(356, "Research_Blink_quick", cmd_quick, 1593),
Function.ability(357, "Research_Burrow_quick", cmd_quick, 1225),
Function.ability(358, "Research_CentrifugalHooks_quick", cmd_quick, 1482),
Function.ability(359, "Research_Charge_quick", cmd_quick, 1592),
Function.ability(360, "Research_ChitinousPlating_quick", cmd_quick, 265),
Function.ability(361, "Research_CombatShield_quick", cmd_quick, 731),
Function.ability(362, "Research_ConcussiveShells_quick", cmd_quick, 732),
Function.ability(570, "Research_CycloneLockOnDamage_quick", cmd_quick, 769),
Function.ability(540, "Research_CycloneRapidFireLaunchers_quick", cmd_quick, 768),
Function.ability(363, "Research_DrillingClaws_quick", cmd_quick, 764),
Function.ability(572, "Research_EnhancedShockwaves_quick", cmd_quick, 822),
Function.ability(364, "Research_ExtendedThermalLance_quick", cmd_quick, 1097),
Function.ability(365, "Research_GlialRegeneration_quick", cmd_quick, 216),
Function.ability(366, "Research_GraviticBooster_quick", cmd_quick, 1093),
Function.ability(367, "Research_GraviticDrive_quick", cmd_quick, 1094),
Function.ability(368, "Research_GroovedSpines_quick", cmd_quick, 1282),
Function.ability(369, "Research_HiSecAutoTracking_quick", cmd_quick, 650),
Function.ability(370, "Research_HighCapacityFuelTanks_quick", cmd_quick, 804),
Function.ability(371, "Research_InfernalPreigniter_quick", cmd_quick, 761),
Function.ability(372, "Research_InterceptorGravitonCatapult_quick", cmd_quick, 44),
Function.ability(374, "Research_MuscularAugments_quick", cmd_quick, 1283),
Function.ability(375, "Research_NeosteelFrame_quick", cmd_quick, 655),
Function.ability(376, "Research_NeuralParasite_quick", cmd_quick, 1455),
Function.ability(377, "Research_PathogenGlands_quick", cmd_quick, 1454),
Function.ability(378, "Research_PersonalCloaking_quick", cmd_quick, 820),
Function.ability(379, "Research_PhoenixAnionPulseCrystals_quick", cmd_quick, 46),
Function.ability(380, "Research_PneumatizedCarapace_quick", cmd_quick, 1223),
Function.ability(381, "Research_ProtossAirArmor_quick", cmd_quick, 3692),
Function.ability(382, "Research_ProtossAirArmorLevel1_quick", cmd_quick, 1565, 3692),
Function.ability(383, "Research_ProtossAirArmorLevel2_quick", cmd_quick, 1566, 3692),
Function.ability(384, "Research_ProtossAirArmorLevel3_quick", cmd_quick, 1567, 3692),
Function.ability(385, "Research_ProtossAirWeapons_quick", cmd_quick, 3693),
Function.ability(386, "Research_ProtossAirWeaponsLevel1_quick", cmd_quick, 1562, 3693),
Function.ability(387, "Research_ProtossAirWeaponsLevel2_quick", cmd_quick, 1563, 3693),
Function.ability(388, "Research_ProtossAirWeaponsLevel3_quick", cmd_quick, 1564, 3693),
Function.ability(389, "Research_ProtossGroundArmor_quick", cmd_quick, 3694),
Function.ability(390, "Research_ProtossGroundArmorLevel1_quick", cmd_quick, 1065, 3694),
Function.ability(391, "Research_ProtossGroundArmorLevel2_quick", cmd_quick, 1066, 3694),
Function.ability(392, "Research_ProtossGroundArmorLevel3_quick", cmd_quick, 1067, 3694),
Function.ability(393, "Research_ProtossGroundWeapons_quick", cmd_quick, 3695),
Function.ability(394, "Research_ProtossGroundWeaponsLevel1_quick", cmd_quick, 1062, 3695),
Function.ability(395, "Research_ProtossGroundWeaponsLevel2_quick", cmd_quick, 1063, 3695),
Function.ability(396, "Research_ProtossGroundWeaponsLevel3_quick", cmd_quick, 1064, 3695),
Function.ability(397, "Research_ProtossShields_quick", cmd_quick, 3696),
Function.ability(398, "Research_ProtossShieldsLevel1_quick", cmd_quick, 1068, 3696),
Function.ability(399, "Research_ProtossShieldsLevel2_quick", cmd_quick, 1069, 3696),
Function.ability(400, "Research_ProtossShieldsLevel3_quick", cmd_quick, 1070, 3696),
Function.ability(401, "Research_PsiStorm_quick", cmd_quick, 1126),
Function.ability(402, "Research_RavenCorvidReactor_quick", cmd_quick, 793),
Function.ability(403, "Research_RavenRecalibratedExplosives_quick", cmd_quick, 803),
Function.ability(404, "Research_ShadowStrike_quick", cmd_quick, 2720),
Function.ability(373, "Research_SmartServos_quick", cmd_quick, 766),
Function.ability(405, "Research_Stimpack_quick", cmd_quick, 730),
Function.ability(406, "Research_TerranInfantryArmor_quick", cmd_quick, 3697),
Function.ability(407, "Research_TerranInfantryArmorLevel1_quick", cmd_quick, 656, 3697),
Function.ability(408, "Research_TerranInfantryArmorLevel2_quick", cmd_quick, 657, 3697),
Function.ability(409, "Research_TerranInfantryArmorLevel3_quick", cmd_quick, 658, 3697),
Function.ability(410, "Research_TerranInfantryWeapons_quick", cmd_quick, 3698),
Function.ability(411, "Research_TerranInfantryWeaponsLevel1_quick", cmd_quick, 652, 3698),
Function.ability(412, "Research_TerranInfantryWeaponsLevel2_quick", cmd_quick, 653, 3698),
Function.ability(413, "Research_TerranInfantryWeaponsLevel3_quick", cmd_quick, 654, 3698),
Function.ability(414, "Research_TerranShipWeapons_quick", cmd_quick, 3699),
Function.ability(415, "Research_TerranShipWeaponsLevel1_quick", cmd_quick, 861, 3699),
Function.ability(416, "Research_TerranShipWeaponsLevel2_quick", cmd_quick, 862, 3699),
Function.ability(417, "Research_TerranShipWeaponsLevel3_quick", cmd_quick, 863, 3699),
Function.ability(418, "Research_TerranStructureArmorUpgrade_quick", cmd_quick, 651),
Function.ability(419, "Research_TerranVehicleAndShipPlating_quick", cmd_quick, 3700),
Function.ability(420, "Research_TerranVehicleAndShipPlatingLevel1_quick", cmd_quick, 864, 3700),
Function.ability(421, "Research_TerranVehicleAndShipPlatingLevel2_quick", cmd_quick, 865, 3700),
Function.ability(422, "Research_TerranVehicleAndShipPlatingLevel3_quick", cmd_quick, 866, 3700),
Function.ability(423, "Research_TerranVehicleWeapons_quick", cmd_quick, 3701),
Function.ability(424, "Research_TerranVehicleWeaponsLevel1_quick", cmd_quick, 855, 3701),
Function.ability(425, "Research_TerranVehicleWeaponsLevel2_quick", cmd_quick, 856, 3701),
Function.ability(426, "Research_TerranVehicleWeaponsLevel3_quick", cmd_quick, 857, 3701),
Function.ability(427, "Research_TunnelingClaws_quick", cmd_quick, 217),
Function.ability(428, "Research_WarpGate_quick", cmd_quick, 1568),
Function.ability(429, "Research_ZergFlyerArmor_quick", cmd_quick, 3702),
Function.ability(430, "Research_ZergFlyerArmorLevel1_quick", cmd_quick, 1315, 3702),
Function.ability(431, "Research_ZergFlyerArmorLevel2_quick", cmd_quick, 1316, 3702),
Function.ability(432, "Research_ZergFlyerArmorLevel3_quick", cmd_quick, 1317, 3702),
Function.ability(433, "Research_ZergFlyerAttack_quick", cmd_quick, 3703),
Function.ability(434, "Research_ZergFlyerAttackLevel1_quick", cmd_quick, 1312, 3703),
Function.ability(435, "Research_ZergFlyerAttackLevel2_quick", cmd_quick, 1313, 3703),
Function.ability(436, "Research_ZergFlyerAttackLevel3_quick", cmd_quick, 1314, 3703),
Function.ability(437, "Research_ZergGroundArmor_quick", cmd_quick, 3704),
Function.ability(438, "Research_ZergGroundArmorLevel1_quick", cmd_quick, 1189, 3704),
Function.ability(439, "Research_ZergGroundArmorLevel2_quick", cmd_quick, 1190, 3704),
Function.ability(440, "Research_ZergGroundArmorLevel3_quick", cmd_quick, 1191, 3704),
Function.ability(441, "Research_ZergMeleeWeapons_quick", cmd_quick, 3705),
Function.ability(442, "Research_ZergMeleeWeaponsLevel1_quick", cmd_quick, 1186, 3705),
Function.ability(443, "Research_ZergMeleeWeaponsLevel2_quick", cmd_quick, 1187, 3705),
Function.ability(444, "Research_ZergMeleeWeaponsLevel3_quick", cmd_quick, 1188, 3705),
Function.ability(445, "Research_ZergMissileWeapons_quick", cmd_quick, 3706),
Function.ability(446, "Research_ZergMissileWeaponsLevel1_quick", cmd_quick, 1192, 3706),
Function.ability(447, "Research_ZergMissileWeaponsLevel2_quick", cmd_quick, 1193, 3706),
Function.ability(448, "Research_ZergMissileWeaponsLevel3_quick", cmd_quick, 1194, 3706),
Function.ability(449, "Research_ZerglingAdrenalGlands_quick", cmd_quick, 1252),
Function.ability(450, "Research_ZerglingMetabolicBoost_quick", cmd_quick, 1253),
Function.ability(451, "Smart_screen", cmd_screen, 1),
Function.ability(452, "Smart_minimap", cmd_minimap, 1),
Function.ability(453, "Stop_quick", cmd_quick, 3665),
Function.ability(571, "Stop_Battlecruiser_quick", cmd_quick, 3783, 3665),
Function.ability(454, "Stop_Building_quick", cmd_quick, 2057, 3665),
Function.ability(455, "Stop_Redirect_quick", cmd_quick, 1691, 3665),
Function.ability(456, "Stop_Stop_quick", cmd_quick, 4, 3665),
Function.ability(457, "Train_Adept_quick", cmd_quick, 922),
Function.ability(458, "Train_Baneling_quick", cmd_quick, 80),
Function.ability(459, "Train_Banshee_quick", cmd_quick, 621),
Function.ability(460, "Train_Battlecruiser_quick", cmd_quick, 623),
Function.ability(461, "Train_Carrier_quick", cmd_quick, 948),
Function.ability(462, "Train_Colossus_quick", cmd_quick, 978),
Function.ability(463, "Train_Corruptor_quick", cmd_quick, 1353),
Function.ability(464, "Train_Cyclone_quick", cmd_quick, 597),
Function.ability(465, "Train_DarkTemplar_quick", cmd_quick, 920),
Function.ability(466, "Train_Disruptor_quick", cmd_quick, 994),
Function.ability(467, "Train_Drone_quick", cmd_quick, 1342),
Function.ability(468, "Train_Ghost_quick", cmd_quick, 562),
Function.ability(469, "Train_Hellbat_quick", cmd_quick, 596),
Function.ability(470, "Train_Hellion_quick", cmd_quick, 595),
Function.ability(471, "Train_HighTemplar_quick", cmd_quick, 919),
Function.ability(472, "Train_Hydralisk_quick", cmd_quick, 1345),
Function.ability(473, "Train_Immortal_quick", cmd_quick, 979),
Function.ability(474, "Train_Infestor_quick", cmd_quick, 1352),
Function.ability(475, "Train_Liberator_quick", cmd_quick, 626),
Function.ability(476, "Train_Marauder_quick", cmd_quick, 563),
Function.ability(477, "Train_Marine_quick", cmd_quick, 560),
Function.ability(478, "Train_Medivac_quick", cmd_quick, 620),
Function.ability(541, "Train_Mothership_quick", cmd_quick, 110),
Function.ability(479, "Train_MothershipCore_quick", cmd_quick, 1853),
Function.ability(480, "Train_Mutalisk_quick", cmd_quick, 1346),
Function.ability(481, "Train_Observer_quick", cmd_quick, 977),
Function.ability(482, "Train_Oracle_quick", cmd_quick, 954),
Function.ability(483, "Train_Overlord_quick", cmd_quick, 1344),
Function.ability(484, "Train_Phoenix_quick", cmd_quick, 946),
Function.ability(485, "Train_Probe_quick", cmd_quick, 1006),
Function.ability(486, "Train_Queen_quick", cmd_quick, 1632),
Function.ability(487, "Train_Raven_quick", cmd_quick, 622),
Function.ability(488, "Train_Reaper_quick", cmd_quick, 561),
Function.ability(489, "Train_Roach_quick", cmd_quick, 1351),
Function.ability(490, "Train_SCV_quick", cmd_quick, 524),
Function.ability(491, "Train_Sentry_quick", cmd_quick, 921),
Function.ability(492, "Train_SiegeTank_quick", cmd_quick, 591),
Function.ability(493, "Train_Stalker_quick", cmd_quick, 917),
Function.ability(494, "Train_SwarmHost_quick", cmd_quick, 1356),
Function.ability(495, "Train_Tempest_quick", cmd_quick, 955),
Function.ability(496, "Train_Thor_quick", cmd_quick, 594),
Function.ability(497, "Train_Ultralisk_quick", cmd_quick, 1348),
Function.ability(498, "Train_VikingFighter_quick", cmd_quick, 624),
Function.ability(499, "Train_Viper_quick", cmd_quick, 1354),
Function.ability(500, "Train_VoidRay_quick", cmd_quick, 950),
Function.ability(501, "Train_WarpPrism_quick", cmd_quick, 976),
Function.ability(502, "Train_WidowMine_quick", cmd_quick, 614),
Function.ability(503, "Train_Zealot_quick", cmd_quick, 916),
Function.ability(504, "Train_Zergling_quick", cmd_quick, 1343),
Function.ability(505, "TrainWarp_Adept_screen", cmd_screen, 1419),
Function.ability(506, "TrainWarp_DarkTemplar_screen", cmd_screen, 1417),
Function.ability(507, "TrainWarp_HighTemplar_screen", cmd_screen, 1416),
Function.ability(508, "TrainWarp_Sentry_screen", cmd_screen, 1418),
Function.ability(509, "TrainWarp_Stalker_screen", cmd_screen, 1414),
Function.ability(510, "TrainWarp_Zealot_screen", cmd_screen, 1413),
Function.ability(511, "UnloadAll_quick", cmd_quick, 3664),
Function.ability(512, "UnloadAll_Bunker_quick", cmd_quick, 408, 3664),
Function.ability(513, "UnloadAll_CommandCenter_quick", cmd_quick, 413, 3664),
Function.ability(514, "UnloadAll_NydusNetwork_quick", cmd_quick, 1438, 3664),
Function.ability(515, "UnloadAll_NydusWorm_quick", cmd_quick, 2371, 3664),
Function.ability(516, "UnloadAllAt_screen", cmd_screen, 3669),
Function.ability(517, "UnloadAllAt_minimap", cmd_minimap, 3669),
Function.ability(518, "UnloadAllAt_Medivac_screen", cmd_screen, 396, 3669),
Function.ability(519, "UnloadAllAt_Medivac_minimap", cmd_minimap, 396, 3669),
Function.ability(520, "UnloadAllAt_Overlord_screen", cmd_screen, 1408, 3669),
Function.ability(521, "UnloadAllAt_Overlord_minimap", cmd_minimap, 1408, 3669),
Function.ability(522, "UnloadAllAt_WarpPrism_screen", cmd_screen, 913, 3669),
Function.ability(523, "UnloadAllAt_WarpPrism_minimap", cmd_minimap, 913, 3669),
]
# pylint: enable=line-too-long
# Create an IntEnum of the function names/ids so that printing the id will
# show something useful.
_Functions = enum.IntEnum( # pylint: disable=invalid-name
"_Functions", {f.name: f.id for f in _FUNCTIONS})
_FUNCTIONS = [f._replace(id=_Functions(f.id)) for f in _FUNCTIONS]
FUNCTIONS = Functions(_FUNCTIONS)
# Some indexes to support features.py and action conversion.
ABILITY_IDS = collections.defaultdict(set) # {ability_id: {funcs}}
for _func in FUNCTIONS:
if _func.ability_id >= 0:
ABILITY_IDS[_func.ability_id].add(_func)
ABILITY_IDS = {k: frozenset(v) for k, v in ABILITY_IDS.items()}
FUNCTIONS_AVAILABLE = {f.id: f for f in FUNCTIONS if f.avail_fn}
# pylint: disable=line-too-long
_RAW_FUNCTIONS = [
Function.raw_ui_func(0, "no_op", raw_no_op),
Function.raw_ui_func(168, "raw_move_camera", raw_move_camera),
Function.raw_ability(2, "Attack_pt", raw_cmd_pt, 3674),
Function.raw_ability(3, "Attack_unit", raw_cmd_unit, 3674),
Function.raw_ability(4, "Attack_Attack_pt", raw_cmd_pt, 23, 3674),
Function.raw_ability(6, "Attack_AttackBuilding_pt", raw_cmd_pt, 2048, 3674),
Function.raw_ability(5, "Attack_Attack_unit", raw_cmd_unit, 23, 3674),
Function.raw_ability(7, "Attack_AttackBuilding_unit", raw_cmd_unit, 2048, 3674),
Function.raw_ability(539, "Attack_Battlecruiser_pt", raw_cmd_pt, 3771, 3674),
Function.raw_ability(540, "Attack_Battlecruiser_unit", raw_cmd_unit, 3771, 3674),
Function.raw_ability(8, "Attack_Redirect_pt", raw_cmd_pt, 1682, 3674),
Function.raw_ability(9, "Attack_Redirect_unit", raw_cmd_unit, 1682, 3674),
Function.raw_ability(88, "Behavior_BuildingAttackOff_quick", raw_cmd, 2082), # wrong / baneling
Function.raw_ability(87, "Behavior_BuildingAttackOn_quick", raw_cmd, 2081), # wrong / baneling
Function.raw_ability(169, "Behavior_CloakOff_quick", raw_cmd, 3677),
Function.raw_ability(170, "Behavior_CloakOff_Banshee_quick", raw_cmd, 393, 3677),
Function.raw_ability(171, "Behavior_CloakOff_Ghost_quick", raw_cmd, 383, 3677),
Function.raw_ability(172, "Behavior_CloakOn_quick", raw_cmd, 3676),
Function.raw_ability(173, "Behavior_CloakOn_Banshee_quick", raw_cmd, 392, 3676),
Function.raw_ability(174, "Behavior_CloakOn_Ghost_quick", raw_cmd, 382, 3676),
Function.raw_ability(175, "Behavior_GenerateCreepOff_quick", raw_cmd, 1693),
Function.raw_ability(176, "Behavior_GenerateCreepOn_quick", raw_cmd, 1692),
Function.raw_ability(178, "Behavior_HoldFireOff_Ghost_quick", raw_cmd, 38, 3689),
Function.raw_ability(179, "Behavior_HoldFireOff_Lurker_quick", raw_cmd, 2552, 3689),
Function.raw_ability(177, "Behavior_HoldFireOff_quick", raw_cmd, 3689),
Function.raw_ability(181, "Behavior_HoldFireOn_Ghost_quick", raw_cmd, 36, 3688),
Function.raw_ability(182, "Behavior_HoldFireOn_Lurker_quick", raw_cmd, 2550, 3688),
Function.raw_ability(180, "Behavior_HoldFireOn_quick", raw_cmd, 3688),
Function.raw_ability(158, "Behavior_PulsarBeamOff_quick", raw_cmd, 2376),
Function.raw_ability(159, "Behavior_PulsarBeamOn_quick", raw_cmd, 2375),
Function.raw_ability(183, "Build_Armory_pt", raw_cmd_pt, 331),
Function.raw_ability(36, "Build_Assimilator_unit", raw_cmd_unit, 882),
Function.raw_ability(184, "Build_BanelingNest_pt", raw_cmd_pt, 1162),
Function.raw_ability(185, "Build_Barracks_pt", raw_cmd_pt, 321),
Function.raw_ability(186, "Build_Bunker_pt", raw_cmd_pt, 324),
Function.raw_ability(187, "Build_CommandCenter_pt", raw_cmd_pt, 318),
Function.raw_ability(188, "Build_CreepTumor_pt", raw_cmd_pt, 3691),
Function.raw_ability(189, "Build_CreepTumor_Queen_pt", raw_cmd_pt, 1694, 3691),
Function.raw_ability(190, "Build_CreepTumor_Tumor_pt", raw_cmd_pt, 1733, 3691),
Function.raw_ability(47, "Build_CyberneticsCore_pt", raw_cmd_pt, 894),
Function.raw_ability(44, "Build_DarkShrine_pt", raw_cmd_pt, 891),
Function.raw_ability(191, "Build_EngineeringBay_pt", raw_cmd_pt, 322),
Function.raw_ability(192, "Build_EvolutionChamber_pt", raw_cmd_pt, 1156),
Function.raw_ability(193, "Build_Extractor_unit", raw_cmd_unit, 1154),
Function.raw_ability(194, "Build_Factory_pt", raw_cmd_pt, 328),
Function.raw_ability(39, "Build_FleetBeacon_pt", raw_cmd_pt, 885),
Function.raw_ability(38, "Build_Forge_pt", raw_cmd_pt, 884),
Function.raw_ability(195, "Build_FusionCore_pt", raw_cmd_pt, 333),
Function.raw_ability(37, "Build_Gateway_pt", raw_cmd_pt, 883),
Function.raw_ability(196, "Build_GhostAcademy_pt", raw_cmd_pt, 327),
Function.raw_ability(197, "Build_Hatchery_pt", raw_cmd_pt, 1152),
Function.raw_ability(198, "Build_HydraliskDen_pt", raw_cmd_pt, 1157),
Function.raw_ability(199, "Build_InfestationPit_pt", raw_cmd_pt, 1160),
Function.raw_ability(200, "Build_Interceptors_autocast", raw_autocast, 1042),
Function.raw_ability(66, "Build_Interceptors_quick", raw_cmd, 1042),
Function.raw_ability(201, "Build_LurkerDen_pt", raw_cmd_pt, 1163),
Function.raw_ability(202, "Build_MissileTurret_pt", raw_cmd_pt, 323),
Function.raw_ability(34, "Build_Nexus_pt", raw_cmd_pt, 880),
Function.raw_ability(203, "Build_Nuke_quick", raw_cmd, 710),
Function.raw_ability(204, "Build_NydusNetwork_pt", raw_cmd_pt, 1161),
Function.raw_ability(205, "Build_NydusWorm_pt", raw_cmd_pt, 1768),
Function.raw_ability(41, "Build_PhotonCannon_pt", raw_cmd_pt, 887),
Function.raw_ability(35, "Build_Pylon_pt", raw_cmd_pt, 881),
Function.raw_ability(207, "Build_Reactor_pt", raw_cmd_pt, 3683),
Function.raw_ability(206, "Build_Reactor_quick", raw_cmd, 3683),
Function.raw_ability(209, "Build_Reactor_Barracks_pt", raw_cmd_pt, 422, 3683),
Function.raw_ability(208, "Build_Reactor_Barracks_quick", raw_cmd, 422, 3683),
Function.raw_ability(211, "Build_Reactor_Factory_pt", raw_cmd_pt, 455, 3683),
Function.raw_ability(210, "Build_Reactor_Factory_quick", raw_cmd, 455, 3683),
Function.raw_ability(213, "Build_Reactor_Starport_pt", raw_cmd_pt, 488, 3683),
Function.raw_ability(212, "Build_Reactor_Starport_quick", raw_cmd, 488, 3683),
Function.raw_ability(214, "Build_Refinery_pt", raw_cmd_unit, 320),
Function.raw_ability(215, "Build_RoachWarren_pt", raw_cmd_pt, 1165),
Function.raw_ability(45, "Build_RoboticsBay_pt", raw_cmd_pt, 892),
Function.raw_ability(46, "Build_RoboticsFacility_pt", raw_cmd_pt, 893),
Function.raw_ability(216, "Build_SensorTower_pt", raw_cmd_pt, 326),
Function.raw_ability(48, "Build_ShieldBattery_pt", raw_cmd_pt, 895),
Function.raw_ability(217, "Build_SpawningPool_pt", raw_cmd_pt, 1155),
Function.raw_ability(218, "Build_SpineCrawler_pt", raw_cmd_pt, 1166),
Function.raw_ability(219, "Build_Spire_pt", raw_cmd_pt, 1158),
Function.raw_ability(220, "Build_SporeCrawler_pt", raw_cmd_pt, 1167),
Function.raw_ability(42, "Build_Stargate_pt", raw_cmd_pt, 889),
Function.raw_ability(221, "Build_Starport_pt", raw_cmd_pt, 329),
Function.raw_ability(95, "Build_StasisTrap_pt", raw_cmd_pt, 2505),
Function.raw_ability(222, "Build_SupplyDepot_pt", raw_cmd_pt, 319),
Function.raw_ability(224, "Build_TechLab_pt", raw_cmd_pt, 3682),
Function.raw_ability(223, "Build_TechLab_quick", raw_cmd, 3682),
Function.raw_ability(226, "Build_TechLab_Barracks_pt", raw_cmd_pt, 421, 3682),
Function.raw_ability(225, "Build_TechLab_Barracks_quick", raw_cmd, 421, 3682),
Function.raw_ability(228, "Build_TechLab_Factory_pt", raw_cmd_pt, 454, 3682),
Function.raw_ability(227, "Build_TechLab_Factory_quick", raw_cmd, 454, 3682),
Function.raw_ability(230, "Build_TechLab_Starport_pt", raw_cmd_pt, 487, 3682),
Function.raw_ability(229, "Build_TechLab_Starport_quick", raw_cmd, 487, 3682),
Function.raw_ability(43, "Build_TemplarArchive_pt", raw_cmd_pt, 890),
Function.raw_ability(40, "Build_TwilightCouncil_pt", raw_cmd_pt, 886),
Function.raw_ability(231, "Build_UltraliskCavern_pt", raw_cmd_pt, 1159),
Function.raw_ability(232, "BurrowDown_quick", raw_cmd, 3661),
Function.raw_ability(233, "BurrowDown_Baneling_quick", raw_cmd, 1374, 3661),
Function.raw_ability(234, "BurrowDown_Drone_quick", raw_cmd, 1378, 3661),
Function.raw_ability(235, "BurrowDown_Hydralisk_quick", raw_cmd, 1382, 3661),
Function.raw_ability(236, "BurrowDown_Infestor_quick", raw_cmd, 1444, 3661),
Function.raw_ability(237, "BurrowDown_InfestorTerran_quick", raw_cmd, 1394, 3661),
Function.raw_ability(238, "BurrowDown_Lurker_quick", raw_cmd, 2108, 3661),
Function.raw_ability(239, "BurrowDown_Queen_quick", raw_cmd, 1433, 3661),
Function.raw_ability(240, "BurrowDown_Ravager_quick", raw_cmd, 2340, 3661),
Function.raw_ability(241, "BurrowDown_Roach_quick", raw_cmd, 1386, 3661),
Function.raw_ability(242, "BurrowDown_SwarmHost_quick", raw_cmd, 2014, 3661),
Function.raw_ability(243, "BurrowDown_Ultralisk_quick", raw_cmd, 1512, 3661),
Function.raw_ability(244, "BurrowDown_WidowMine_quick", raw_cmd, 2095, 3661),
Function.raw_ability(245, "BurrowDown_Zergling_quick", raw_cmd, 1390, 3661),
Function.raw_ability(247, "BurrowUp_autocast", raw_autocast, 3662),
Function.raw_ability(246, "BurrowUp_quick", raw_cmd, 3662),
Function.raw_ability(249, "BurrowUp_Baneling_autocast", raw_autocast, 1376, 3662),
Function.raw_ability(248, "BurrowUp_Baneling_quick", raw_cmd, 1376, 3662),
Function.raw_ability(250, "BurrowUp_Drone_quick", raw_cmd, 1380, 3662),
Function.raw_ability(252, "BurrowUp_Hydralisk_autocast", raw_autocast, 1384, 3662),
Function.raw_ability(251, "BurrowUp_Hydralisk_quick", raw_cmd, 1384, 3662),
Function.raw_ability(253, "BurrowUp_Infestor_quick", raw_cmd, 1446, 3662),
Function.raw_ability(255, "BurrowUp_InfestorTerran_autocast", raw_autocast, 1396, 3662),
Function.raw_ability(254, "BurrowUp_InfestorTerran_quick", raw_cmd, 1396, 3662),
Function.raw_ability(256, "BurrowUp_Lurker_quick", raw_cmd, 2110, 3662),
Function.raw_ability(258, "BurrowUp_Queen_autocast", raw_autocast, 1435, 3662),
Function.raw_ability(257, "BurrowUp_Queen_quick", raw_cmd, 1435, 3662),
Function.raw_ability(260, "BurrowUp_Ravager_autocast", raw_autocast, 2342, 3662),
Function.raw_ability(259, "BurrowUp_Ravager_quick", raw_cmd, 2342, 3662),
Function.raw_ability(262, "BurrowUp_Roach_autocast", raw_autocast, 1388, 3662),
Function.raw_ability(261, "BurrowUp_Roach_quick", raw_cmd, 1388, 3662),
Function.raw_ability(263, "BurrowUp_SwarmHost_quick", raw_cmd, 2016, 3662),
Function.raw_ability(265, "BurrowUp_Ultralisk_autocast", raw_autocast, 1514, 3662),
Function.raw_ability(264, "BurrowUp_Ultralisk_quick", raw_cmd, 1514, 3662),
Function.raw_ability(266, "BurrowUp_WidowMine_quick", raw_cmd, 2097, 3662),
Function.raw_ability(268, "BurrowUp_Zergling_autocast", raw_autocast, 1392, 3662),
Function.raw_ability(267, "BurrowUp_Zergling_quick", raw_cmd, 1392, 3662),
Function.raw_ability(98, "Cancel_quick", raw_cmd, 3659),
Function.raw_ability(123, "Cancel_AdeptPhaseShift_quick", raw_cmd, 2594, 3659),
Function.raw_ability(124, "Cancel_AdeptShadePhaseShift_quick", raw_cmd, 2596, 3659),
Function.raw_ability(269, "Cancel_BarracksAddOn_quick", raw_cmd, 451, 3659),
Function.raw_ability(125, "Cancel_BuildInProgress_quick", raw_cmd, 314, 3659),
Function.raw_ability(270, "Cancel_CreepTumor_quick", raw_cmd, 1763, 3659),
Function.raw_ability(271, "Cancel_FactoryAddOn_quick", raw_cmd, 484, 3659),
Function.raw_ability(126, "Cancel_GravitonBeam_quick", raw_cmd, 174, 3659),
Function.raw_ability(272, "Cancel_HangarQueue5_quick", raw_cmd, 1038, 3671),
Function.raw_ability(129, "Cancel_Last_quick", raw_cmd, 3671),
Function.raw_ability(273, "Cancel_LockOn_quick", raw_cmd, 2354, 3659),
Function.raw_ability(274, "Cancel_MorphBroodlord_quick", raw_cmd, 1373, 3659),
Function.raw_ability(275, "Cancel_MorphGreaterSpire_quick", raw_cmd, 1221, 3659),
Function.raw_ability(276, "Cancel_MorphHive_quick", raw_cmd, 1219, 3659),
Function.raw_ability(277, "Cancel_MorphLair_quick", raw_cmd, 1217, 3659),
Function.raw_ability(279, "Cancel_MorphLurkerDen_quick", raw_cmd, 2113, 3659),
Function.raw_ability(278, "Cancel_MorphLurker_quick", raw_cmd, 2333, 3659),
Function.raw_ability(280, "Cancel_MorphMothership_quick", raw_cmd, 1848, 3659),
Function.raw_ability(281, "Cancel_MorphOrbital_quick", raw_cmd, 1517, 3659),
Function.raw_ability(282, "Cancel_MorphOverlordTransport_quick", raw_cmd, 2709, 3659),
Function.raw_ability(283, "Cancel_MorphOverseer_quick", raw_cmd, 1449, 3659),
Function.raw_ability(284, "Cancel_MorphPlanetaryFortress_quick", raw_cmd, 1451, 3659),
Function.raw_ability(285, "Cancel_MorphRavager_quick", raw_cmd, 2331, 3659),
Function.raw_ability(286, "Cancel_MorphThorExplosiveMode_quick", raw_cmd, 2365, 3659),
Function.raw_ability(287, "Cancel_NeuralParasite_quick", raw_cmd, 250, 3659),
Function.raw_ability(288, "Cancel_Nuke_quick", raw_cmd, 1623, 3659),
Function.raw_ability(130, "Cancel_Queue1_quick", raw_cmd, 304, 3671),
Function.raw_ability(131, "Cancel_Queue5_quick", raw_cmd, 306, 3671),
Function.raw_ability(289, "Cancel_QueueAddOn_quick", raw_cmd, 312, 3671),
Function.raw_ability(132, "Cancel_QueueCancelToSelection_quick", raw_cmd, 308, 3671),
Function.raw_ability(134, "Cancel_QueuePassiveCancelToSelection_quick", raw_cmd, 1833, 3671),
Function.raw_ability(133, "Cancel_QueuePassive_quick", raw_cmd, 1831, 3671),
Function.raw_ability(290, "Cancel_SpineCrawlerRoot_quick", raw_cmd, 1730, 3659),
Function.raw_ability(291, "Cancel_SporeCrawlerRoot_quick", raw_cmd, 1732, 3659),
Function.raw_ability(292, "Cancel_StarportAddOn_quick", raw_cmd, 517, 3659),
Function.raw_ability(127, "Cancel_StasisTrap_quick", raw_cmd, 2535, 3659),
Function.raw_ability(128, "Cancel_VoidRayPrismaticAlignment_quick", raw_cmd, 3707, 3659),
Function.raw_ability(293, "Effect_Abduct_unit", raw_cmd_unit, 2067),
Function.raw_ability(96, "Effect_AdeptPhaseShift_pt", raw_cmd_pt, 2544),
Function.raw_ability(294, "Effect_AntiArmorMissile_unit", raw_cmd_unit, 3753),
Function.raw_ability(295, "Effect_AutoTurret_pt", raw_cmd_pt, 1764),
Function.raw_ability(296, "Effect_BlindingCloud_pt", raw_cmd_pt, 2063),
Function.raw_ability(111, "Effect_Blink_pt", raw_cmd_pt, 3687),
Function.raw_ability(135, "Effect_Blink_Stalker_pt", raw_cmd_pt, 1442, 3687),
Function.raw_ability(112, "Effect_Blink_unit", raw_cmd_unit, 3687), # wrong/unit
Function.raw_ability(297, "Effect_CalldownMULE_pt", raw_cmd_pt, 171),
Function.raw_ability(298, "Effect_CalldownMULE_unit", raw_cmd_unit, 171),
Function.raw_ability(299, "Effect_CausticSpray_unit", raw_cmd_unit, 2324),
Function.raw_ability(302, "Effect_Charge_autocast", raw_autocast, 1819),
Function.raw_ability(300, "Effect_Charge_pt", raw_cmd_pt, 1819),
Function.raw_ability(301, "Effect_Charge_unit", raw_cmd_unit, 1819),
Function.raw_ability(122, "Effect_ChronoBoostEnergyCost_unit", raw_cmd_unit, 3755), # new 4.0?
Function.raw_ability(33, "Effect_ChronoBoost_unit", raw_cmd_unit, 261), # wrong / old?
Function.raw_ability(303, "Effect_Contaminate_unit", raw_cmd_unit, 1825),
Function.raw_ability(304, "Effect_CorrosiveBile_pt", raw_cmd_pt, 2338),
Function.raw_ability(305, "Effect_EMP_pt", raw_cmd_pt, 1628),
Function.raw_ability(306, "Effect_EMP_unit", raw_cmd_unit, 1628),
Function.raw_ability(307, "Effect_Explode_quick", raw_cmd, 42),
Function.raw_ability(157, "Effect_Feedback_unit", raw_cmd_unit, 140),
Function.raw_ability(79, "Effect_ForceField_pt", raw_cmd_pt, 1526),
Function.raw_ability(308, "Effect_FungalGrowth_pt", raw_cmd_pt, 74),
Function.raw_ability(309, "Effect_FungalGrowth_unit", raw_cmd_unit, 74),
Function.raw_ability(310, "Effect_GhostSnipe_unit", raw_cmd_unit, 2714),
Function.raw_ability(32, "Effect_GravitonBeam_unit", raw_cmd_unit, 173),
Function.raw_ability(20, "Effect_GuardianShield_quick", raw_cmd, 76),
Function.raw_ability(312, "Effect_Heal_autocast", raw_autocast, 386),
Function.raw_ability(311, "Effect_Heal_unit", raw_cmd_unit, 386),
Function.raw_ability(313, "Effect_ImmortalBarrier_autocast", raw_autocast, 2328),
Function.raw_ability(91, "Effect_ImmortalBarrier_quick", raw_cmd, 2328),
Function.raw_ability(314, "Effect_InfestedTerrans_pt", raw_cmd_pt, 247),
Function.raw_ability(315, "Effect_InjectLarva_unit", raw_cmd_unit, 251),
Function.raw_ability(316, "Effect_InterferenceMatrix_unit", raw_cmd_unit, 3747),
Function.raw_ability(317, "Effect_KD8Charge_pt", raw_cmd_pt, 2588),
Function.raw_ability(538, "Effect_KD8Charge_unit", raw_cmd_unit, 2588),
Function.raw_ability(318, "Effect_LockOn_unit", raw_cmd_unit, 2350),
Function.raw_ability(541, "Effect_LockOn_autocast", raw_autocast, 2350),
Function.raw_ability(319, "Effect_LocustSwoop_pt", raw_cmd_pt, 2387),
Function.raw_ability(110, "Effect_MassRecall_pt", raw_cmd_pt, 3686),
Function.raw_ability(136, "Effect_MassRecall_Mothership_pt", raw_cmd_pt, 2368, 3686),
Function.raw_ability(162, "Effect_MassRecall_Nexus_pt", raw_cmd_pt, 3757, 3686),
Function.raw_ability(137, "Effect_MassRecall_StrategicRecall_pt", raw_cmd_pt, 142, 3686),
Function.raw_ability(320, "Effect_MedivacIgniteAfterburners_quick", raw_cmd, 2116),
Function.raw_ability(321, "Effect_NeuralParasite_unit", raw_cmd_unit, 249),
Function.raw_ability(322, "Effect_NukeCalldown_pt", raw_cmd_pt, 1622),
Function.raw_ability(90, "Effect_OracleRevelation_pt", raw_cmd_pt, 2146),
Function.raw_ability(323, "Effect_ParasiticBomb_unit", raw_cmd_unit, 2542),
Function.raw_ability(65, "Effect_PsiStorm_pt", raw_cmd_pt, 1036),
Function.raw_ability(167, "Effect_PurificationNova_pt", raw_cmd_pt, 2346),
Function.raw_ability(324, "Effect_Repair_autocast", raw_autocast, 3685),
Function.raw_ability(108, "Effect_Repair_pt", raw_cmd_pt, 3685),
Function.raw_ability(109, "Effect_Repair_unit", raw_cmd_unit, 3685),
Function.raw_ability(326, "Effect_Repair_Mule_autocast", raw_autocast, 78, 3685),
Function.raw_ability(325, "Effect_Repair_Mule_unit", raw_cmd_unit, 78, 3685),
Function.raw_ability(328, "Effect_Repair_RepairDrone_autocast", raw_autocast, 3751, 3685),
Function.raw_ability(327, "Effect_Repair_RepairDrone_unit", raw_cmd_unit, 3751, 3685),
Function.raw_ability(330, "Effect_Repair_SCV_autocast", raw_autocast, 316, 3685),
Function.raw_ability(329, "Effect_Repair_SCV_unit", raw_cmd_unit, 316, 3685),
Function.raw_ability(331, "Effect_Restore_autocast", raw_autocast, 3765),
Function.raw_ability(161, "Effect_Restore_unit", raw_cmd_unit, 3765),
Function.raw_ability(332, "Effect_Salvage_quick", raw_cmd, 32),
Function.raw_ability(333, "Effect_Scan_pt", raw_cmd_pt, 399),
Function.raw_ability(113, "Effect_ShadowStride_pt", raw_cmd_pt, 2700, 3687),
Function.raw_ability(334, "Effect_SpawnChangeling_quick", raw_cmd, 181),
Function.raw_ability(335, "Effect_SpawnLocusts_pt", raw_cmd_pt, 2704),
Function.raw_ability(336, "Effect_SpawnLocusts_unit", raw_cmd_unit, 2704),
Function.raw_ability(337, "Effect_Spray_pt", raw_cmd_pt, 3684),
Function.raw_ability(338, "Effect_Spray_Protoss_pt", raw_cmd_pt, 30, 3684),
Function.raw_ability(339, "Effect_Spray_Terran_pt", raw_cmd_pt, 26, 3684),
Function.raw_ability(340, "Effect_Spray_Zerg_pt", raw_cmd_pt, 28, 3684),
Function.raw_ability(341, "Effect_Stim_quick", raw_cmd, 3675),
Function.raw_ability(342, "Effect_Stim_Marauder_quick", raw_cmd, 253, 3675),
Function.raw_ability(343, "Effect_Stim_Marauder_Redirect_quick", raw_cmd, 1684, 3675),
Function.raw_ability(344, "Effect_Stim_Marine_quick", raw_cmd, 380, 3675),
Function.raw_ability(345, "Effect_Stim_Marine_Redirect_quick", raw_cmd, 1683, 3675),
Function.raw_ability(346, "Effect_SupplyDrop_unit", raw_cmd_unit, 255),
Function.raw_ability(347, "Effect_TacticalJump_pt", raw_cmd_pt, 2358),
Function.raw_ability(348, "Effect_TimeWarp_pt", raw_cmd_pt, 2244),
Function.raw_ability(349, "Effect_Transfusion_unit", raw_cmd_unit, 1664),
Function.raw_ability(350, "Effect_ViperConsume_unit", raw_cmd_unit, 2073),
Function.raw_ability(94, "Effect_VoidRayPrismaticAlignment_quick", raw_cmd, 2393),
Function.raw_ability(353, "Effect_WidowMineAttack_autocast", raw_autocast, 2099),
Function.raw_ability(351, "Effect_WidowMineAttack_pt", raw_cmd_pt, 2099),
Function.raw_ability(352, "Effect_WidowMineAttack_unit", raw_cmd_unit, 2099),
Function.raw_ability(537, "Effect_YamatoGun_unit", raw_cmd_unit, 401),
Function.raw_ability(93, "Hallucination_Adept_quick", raw_cmd, 2391),
Function.raw_ability(22, "Hallucination_Archon_quick", raw_cmd, 146),
Function.raw_ability(23, "Hallucination_Colossus_quick", raw_cmd, 148),
Function.raw_ability(92, "Hallucination_Disruptor_quick", raw_cmd, 2389),
Function.raw_ability(24, "Hallucination_HighTemplar_quick", raw_cmd, 150),
Function.raw_ability(25, "Hallucination_Immortal_quick", raw_cmd, 152),
Function.raw_ability(89, "Hallucination_Oracle_quick", raw_cmd, 2114),
Function.raw_ability(26, "Hallucination_Phoenix_quick", raw_cmd, 154),
Function.raw_ability(27, "Hallucination_Probe_quick", raw_cmd, 156),
Function.raw_ability(28, "Hallucination_Stalker_quick", raw_cmd, 158),
Function.raw_ability(29, "Hallucination_VoidRay_quick", raw_cmd, 160),
Function.raw_ability(30, "Hallucination_WarpPrism_quick", raw_cmd, 162),
Function.raw_ability(31, "Hallucination_Zealot_quick", raw_cmd, 164),
Function.raw_ability(354, "Halt_Building_quick", raw_cmd, 315, 3660),
Function.raw_ability(99, "Halt_quick", raw_cmd, 3660),
Function.raw_ability(355, "Halt_TerranBuild_quick", raw_cmd, 348, 3660),
Function.raw_ability(102, "Harvest_Gather_unit", raw_cmd_unit, 3666),
Function.raw_ability(356, "Harvest_Gather_Drone_unit", raw_cmd_unit, 1183, 3666),
Function.raw_ability(357, "Harvest_Gather_Mule_unit", raw_cmd_unit, 166, 3666),
Function.raw_ability(358, "Harvest_Gather_Probe_unit", raw_cmd_unit, 298, 3666),
Function.raw_ability(359, "Harvest_Gather_SCV_unit", raw_cmd_unit, 295, 3666),
Function.raw_ability(103, "Harvest_Return_quick", raw_cmd, 3667),
Function.raw_ability(360, "Harvest_Return_Drone_quick", raw_cmd, 1184, 3667),
Function.raw_ability(361, "Harvest_Return_Mule_quick", raw_cmd, 167, 3667),
Function.raw_ability(154, "Harvest_Return_Probe_quick", raw_cmd, 299, 3667),
Function.raw_ability(362, "Harvest_Return_SCV_quick", raw_cmd, 296, 3667),
Function.raw_ability(17, "HoldPosition_quick", raw_cmd, 3793),
Function.raw_ability(542, "HoldPosition_Battlecruiser_quick", raw_cmd, 3778, 3793),
Function.raw_ability(543, "HoldPosition_Hold_quick", raw_cmd, 18, 3793),
Function.raw_ability(364, "Land_Barracks_pt", raw_cmd_pt, 554, 3678),
Function.raw_ability(365, "Land_CommandCenter_pt", raw_cmd_pt, 419, 3678),
Function.raw_ability(366, "Land_Factory_pt", raw_cmd_pt, 520, 3678),
Function.raw_ability(367, "Land_OrbitalCommand_pt", raw_cmd_pt, 1524, 3678),
Function.raw_ability(363, "Land_pt", raw_cmd_pt, 3678),
Function.raw_ability(368, "Land_Starport_pt", raw_cmd_pt, 522, 3678),
Function.raw_ability(370, "Lift_Barracks_quick", raw_cmd, 452, 3679),
Function.raw_ability(371, "Lift_CommandCenter_quick", raw_cmd, 417, 3679),
Function.raw_ability(372, "Lift_Factory_quick", raw_cmd, 485, 3679),
Function.raw_ability(373, "Lift_OrbitalCommand_quick", raw_cmd, 1522, 3679),
Function.raw_ability(369, "Lift_quick", raw_cmd, 3679),
Function.raw_ability(374, "Lift_Starport_quick", raw_cmd, 518, 3679),
Function.raw_ability(376, "LoadAll_CommandCenter_quick", raw_cmd, 416, 3663),
Function.raw_ability(375, "LoadAll_quick", raw_cmd, 3663),
Function.raw_ability(377, "Load_Bunker_unit", raw_cmd_unit, 407, 3668),
Function.raw_ability(378, "Load_Medivac_unit", raw_cmd_unit, 394, 3668),
Function.raw_ability(379, "Load_NydusNetwork_unit", raw_cmd_unit, 1437, 3668),
Function.raw_ability(380, "Load_NydusWorm_unit", raw_cmd_unit, 2370, 3668),
Function.raw_ability(381, "Load_Overlord_unit", raw_cmd_unit, 1406, 3668),
Function.raw_ability(104, "Load_unit", raw_cmd_unit, 3668),
Function.raw_ability(382, "Load_WarpPrism_unit", raw_cmd_unit, 911, 3668),
Function.raw_ability(86, "Morph_Archon_quick", raw_cmd, 1766),
Function.raw_ability(383, "Morph_BroodLord_quick", raw_cmd, 1372),
Function.raw_ability(78, "Morph_Gateway_quick", raw_cmd, 1520),
Function.raw_ability(384, "Morph_GreaterSpire_quick", raw_cmd, 1220),
Function.raw_ability(385, "Morph_Hellbat_quick", raw_cmd, 1998),
Function.raw_ability(386, "Morph_Hellion_quick", raw_cmd, 1978),
Function.raw_ability(387, "Morph_Hive_quick", raw_cmd, 1218),
Function.raw_ability(388, "Morph_Lair_quick", raw_cmd, 1216),
Function.raw_ability(389, "Morph_LiberatorAAMode_quick", raw_cmd, 2560),
Function.raw_ability(390, "Morph_LiberatorAGMode_pt", raw_cmd_pt, 2558),
Function.raw_ability(392, "Morph_LurkerDen_quick", raw_cmd, 2112),
Function.raw_ability(391, "Morph_Lurker_quick", raw_cmd, 2332),
Function.raw_ability(393, "Morph_Mothership_quick", raw_cmd, 1847),
Function.raw_ability(121, "Morph_ObserverMode_quick", raw_cmd, 3739),
Function.raw_ability(394, "Morph_OrbitalCommand_quick", raw_cmd, 1516),
Function.raw_ability(395, "Morph_OverlordTransport_quick", raw_cmd, 2708),
Function.raw_ability(397, "Morph_OverseerMode_quick", raw_cmd, 3745),
Function.raw_ability(396, "Morph_Overseer_quick", raw_cmd, 1448),
Function.raw_ability(398, "Morph_OversightMode_quick", raw_cmd, 3743),
Function.raw_ability(399, "Morph_PlanetaryFortress_quick", raw_cmd, 1450),
Function.raw_ability(400, "Morph_Ravager_quick", raw_cmd, 2330),
Function.raw_ability(401, "Morph_Root_pt", raw_cmd_pt, 3680),
Function.raw_ability(402, "Morph_SiegeMode_quick", raw_cmd, 388),
Function.raw_ability(403, "Morph_SpineCrawlerRoot_pt", raw_cmd_pt, 1729, 3680),
Function.raw_ability(404, "Morph_SpineCrawlerUproot_quick", raw_cmd, 1725, 3681),
Function.raw_ability(405, "Morph_SporeCrawlerRoot_pt", raw_cmd_pt, 1731, 3680),
Function.raw_ability(406, "Morph_SporeCrawlerUproot_quick", raw_cmd, 1727, 3681),
Function.raw_ability(407, "Morph_SupplyDepot_Lower_quick", raw_cmd, 556),
Function.raw_ability(408, "Morph_SupplyDepot_Raise_quick", raw_cmd, 558),
Function.raw_ability(160, "Morph_SurveillanceMode_quick", raw_cmd, 3741),
Function.raw_ability(409, "Morph_ThorExplosiveMode_quick", raw_cmd, 2364),
Function.raw_ability(410, "Morph_ThorHighImpactMode_quick", raw_cmd, 2362),
Function.raw_ability(411, "Morph_Unsiege_quick", raw_cmd, 390),
Function.raw_ability(412, "Morph_Uproot_quick", raw_cmd, 3681),
Function.raw_ability(413, "Morph_VikingAssaultMode_quick", raw_cmd, 403),
Function.raw_ability(414, "Morph_VikingFighterMode_quick", raw_cmd, 405),
Function.raw_ability(77, "Morph_WarpGate_quick", raw_cmd, 1518),
Function.raw_ability(544, "Morph_WarpGate_autocast", raw_autocast, 1518),
Function.raw_ability(80, "Morph_WarpPrismPhasingMode_quick", raw_cmd, 1528),
Function.raw_ability(81, "Morph_WarpPrismTransportMode_quick", raw_cmd, 1530),
Function.raw_ability(13, "Move_pt", raw_cmd_pt, 3794),
Function.raw_ability(14, "Move_unit", raw_cmd_unit, 3794),
Function.raw_ability(545, "Move_Battlecruiser_pt", raw_cmd_pt, 3776, 3794),
Function.raw_ability(546, "Move_Battlecruiser_unit", raw_cmd_unit, 3776, 3794),
Function.raw_ability(547, "Move_Move_pt", raw_cmd_pt, 16, 3794),
Function.raw_ability(548, "Move_Move_unit", raw_cmd_unit, 16, 3794),
Function.raw_ability(15, "Patrol_pt", raw_cmd_pt, 3795),
Function.raw_ability(16, "Patrol_unit", raw_cmd_unit, 3795),
Function.raw_ability(549, "Patrol_Battlecruiser_pt", raw_cmd_pt, 3777, 3795),
Function.raw_ability(550, "Patrol_Battlecruiser_unit", raw_cmd_unit, 3777, 3795),
Function.raw_ability(551, "Patrol_Patrol_pt", raw_cmd_pt, 17, 3795),
Function.raw_ability(552, "Patrol_Patrol_unit", raw_cmd_unit, 17, 3795),
Function.raw_ability(415, "Rally_Building_pt", raw_cmd_pt, 195, 3673),
Function.raw_ability(416, "Rally_Building_unit", raw_cmd_unit, 195, 3673),
Function.raw_ability(417, "Rally_CommandCenter_pt", raw_cmd_pt, 203, 3690),
Function.raw_ability(418, "Rally_CommandCenter_unit", raw_cmd_unit, 203, 3690),
Function.raw_ability(419, "Rally_Hatchery_Units_pt", raw_cmd_pt, 211, 3673),
Function.raw_ability(420, "Rally_Hatchery_Units_unit", raw_cmd_unit, 211, 3673),
Function.raw_ability(421, "Rally_Hatchery_Workers_pt", raw_cmd_pt, 212, 3690),
Function.raw_ability(422, "Rally_Hatchery_Workers_unit", raw_cmd_unit, 212, 3690),
Function.raw_ability(423, "Rally_Morphing_Unit_pt", raw_cmd_pt, 199, 3673),
Function.raw_ability(424, "Rally_Morphing_Unit_unit", raw_cmd_unit, 199, 3673),
Function.raw_ability(138, "Rally_Nexus_pt", raw_cmd_pt, 207, 3690),
Function.raw_ability(165, "Rally_Nexus_unit", raw_cmd_unit, 207, 3690),
Function.raw_ability(106, "Rally_Units_pt", raw_cmd_pt, 3673),
Function.raw_ability(107, "Rally_Units_unit", raw_cmd_unit, 3673),
Function.raw_ability(114, "Rally_Workers_pt", raw_cmd_pt, 3690),
Function.raw_ability(115, "Rally_Workers_unit", raw_cmd_unit, 3690),
Function.raw_ability(425, "Research_AdaptiveTalons_quick", raw_cmd, 3709),
Function.raw_ability(85, "Research_AdeptResonatingGlaives_quick", raw_cmd, 1594),
Function.raw_ability(426, "Research_AdvancedBallistics_quick", raw_cmd, 805),
Function.raw_ability(553, "Research_AnabolicSynthesis_quick", raw_cmd, 263),
Function.raw_ability(427, "Research_BansheeCloakingField_quick", raw_cmd, 790),
Function.raw_ability(428, "Research_BansheeHyperflightRotors_quick", raw_cmd, 799),
Function.raw_ability(429, "Research_BattlecruiserWeaponRefit_quick", raw_cmd, 1532),
Function.raw_ability(84, "Research_Blink_quick", raw_cmd, 1593),
Function.raw_ability(430, "Research_Burrow_quick", raw_cmd, 1225),
Function.raw_ability(431, "Research_CentrifugalHooks_quick", raw_cmd, 1482),
Function.raw_ability(83, "Research_Charge_quick", raw_cmd, 1592),
Function.raw_ability(432, "Research_ChitinousPlating_quick", raw_cmd, 265),
Function.raw_ability(433, "Research_CombatShield_quick", raw_cmd, 731),
Function.raw_ability(434, "Research_ConcussiveShells_quick", raw_cmd, 732),
Function.raw_ability(554, "Research_CycloneLockOnDamage_quick", raw_cmd, 769),
Function.raw_ability(435, "Research_CycloneRapidFireLaunchers_quick", raw_cmd, 768),
Function.raw_ability(436, "Research_DrillingClaws_quick", raw_cmd, 764),
Function.raw_ability(563, "Research_EnhancedShockwaves_quick", raw_cmd, 822),
Function.raw_ability(69, "Research_ExtendedThermalLance_quick", raw_cmd, 1097),
Function.raw_ability(437, "Research_GlialRegeneration_quick", raw_cmd, 216),
Function.raw_ability(67, "Research_GraviticBooster_quick", raw_cmd, 1093),
Function.raw_ability(68, "Research_GraviticDrive_quick", raw_cmd, 1094),
Function.raw_ability(438, "Research_GroovedSpines_quick", raw_cmd, 1282),
Function.raw_ability(440, "Research_HighCapacityFuelTanks_quick", raw_cmd, 804),
Function.raw_ability(439, "Research_HiSecAutoTracking_quick", raw_cmd, 650),
Function.raw_ability(441, "Research_InfernalPreigniter_quick", raw_cmd, 761),
Function.raw_ability(18, "Research_InterceptorGravitonCatapult_quick", raw_cmd, 44),
Function.raw_ability(442, "Research_MuscularAugments_quick", raw_cmd, 1283),
Function.raw_ability(443, "Research_NeosteelFrame_quick", raw_cmd, 655),
Function.raw_ability(444, "Research_NeuralParasite_quick", raw_cmd, 1455),
Function.raw_ability(445, "Research_PathogenGlands_quick", raw_cmd, 1454),
Function.raw_ability(446, "Research_PersonalCloaking_quick", raw_cmd, 820),
Function.raw_ability(19, "Research_PhoenixAnionPulseCrystals_quick", raw_cmd, 46),
Function.raw_ability(447, "Research_PneumatizedCarapace_quick", raw_cmd, 1223),
Function.raw_ability(139, "Research_ProtossAirArmorLevel1_quick", raw_cmd, 1565, 3692),
Function.raw_ability(140, "Research_ProtossAirArmorLevel2_quick", raw_cmd, 1566, 3692),
Function.raw_ability(141, "Research_ProtossAirArmorLevel3_quick", raw_cmd, 1567, 3692),
Function.raw_ability(116, "Research_ProtossAirArmor_quick", raw_cmd, 3692),
Function.raw_ability(142, "Research_ProtossAirWeaponsLevel1_quick", raw_cmd, 1562, 3693),
Function.raw_ability(143, "Research_ProtossAirWeaponsLevel2_quick", raw_cmd, 1563, 3693),
Function.raw_ability(144, "Research_ProtossAirWeaponsLevel3_quick", raw_cmd, 1564, 3693),
Function.raw_ability(117, "Research_ProtossAirWeapons_quick", raw_cmd, 3693),
Function.raw_ability(145, "Research_ProtossGroundArmorLevel1_quick", raw_cmd, 1065, 3694),
Function.raw_ability(146, "Research_ProtossGroundArmorLevel2_quick", raw_cmd, 1066, 3694),
Function.raw_ability(147, "Research_ProtossGroundArmorLevel3_quick", raw_cmd, 1067, 3694),
Function.raw_ability(118, "Research_ProtossGroundArmor_quick", raw_cmd, 3694),
Function.raw_ability(148, "Research_ProtossGroundWeaponsLevel1_quick", raw_cmd, 1062, 3695),
Function.raw_ability(149, "Research_ProtossGroundWeaponsLevel2_quick", raw_cmd, 1063, 3695),
Function.raw_ability(150, "Research_ProtossGroundWeaponsLevel3_quick", raw_cmd, 1064, 3695),
Function.raw_ability(119, "Research_ProtossGroundWeapons_quick", raw_cmd, 3695),
Function.raw_ability(151, "Research_ProtossShieldsLevel1_quick", raw_cmd, 1068, 3696),
Function.raw_ability(152, "Research_ProtossShieldsLevel2_quick", raw_cmd, 1069, 3696),
Function.raw_ability(153, "Research_ProtossShieldsLevel3_quick", raw_cmd, 1070, 3696),
Function.raw_ability(120, "Research_ProtossShields_quick", raw_cmd, 3696),
Function.raw_ability(70, "Research_PsiStorm_quick", raw_cmd, 1126),
Function.raw_ability(448, "Research_RavenCorvidReactor_quick", raw_cmd, 793),
Function.raw_ability(449, "Research_RavenRecalibratedExplosives_quick", raw_cmd, 803),
Function.raw_ability(97, "Research_ShadowStrike_quick", raw_cmd, 2720),
Function.raw_ability(450, "Research_SmartServos_quick", raw_cmd, 766),
Function.raw_ability(451, "Research_Stimpack_quick", raw_cmd, 730),
Function.raw_ability(453, "Research_TerranInfantryArmorLevel1_quick", raw_cmd, 656, 3697),
Function.raw_ability(454, "Research_TerranInfantryArmorLevel2_quick", raw_cmd, 657, 3697),
Function.raw_ability(455, "Research_TerranInfantryArmorLevel3_quick", raw_cmd, 658, 3697),
Function.raw_ability(452, "Research_TerranInfantryArmor_quick", raw_cmd, 3697),
Function.raw_ability(457, "Research_TerranInfantryWeaponsLevel1_quick", raw_cmd, 652, 3698),
Function.raw_ability(458, "Research_TerranInfantryWeaponsLevel2_quick", raw_cmd, 653, 3698),
Function.raw_ability(459, "Research_TerranInfantryWeaponsLevel3_quick", raw_cmd, 654, 3698),
Function.raw_ability(456, "Research_TerranInfantryWeapons_quick", raw_cmd, 3698),
Function.raw_ability(461, "Research_TerranShipWeaponsLevel1_quick", raw_cmd, 861, 3699),
Function.raw_ability(462, "Research_TerranShipWeaponsLevel2_quick", raw_cmd, 862, 3699),
Function.raw_ability(463, "Research_TerranShipWeaponsLevel3_quick", raw_cmd, 863, 3699),
Function.raw_ability(460, "Research_TerranShipWeapons_quick", raw_cmd, 3699),
Function.raw_ability(464, "Research_TerranStructureArmorUpgrade_quick", raw_cmd, 651),
Function.raw_ability(466, "Research_TerranVehicleAndShipPlatingLevel1_quick", raw_cmd, 864, 3700),
Function.raw_ability(467, "Research_TerranVehicleAndShipPlatingLevel2_quick", raw_cmd, 865, 3700),
Function.raw_ability(468, "Research_TerranVehicleAndShipPlatingLevel3_quick", raw_cmd, 866, 3700),
Function.raw_ability(465, "Research_TerranVehicleAndShipPlating_quick", raw_cmd, 3700),
Function.raw_ability(470, "Research_TerranVehicleWeaponsLevel1_quick", raw_cmd, 855, 3701),
Function.raw_ability(471, "Research_TerranVehicleWeaponsLevel2_quick", raw_cmd, 856, 3701),
Function.raw_ability(472, "Research_TerranVehicleWeaponsLevel3_quick", raw_cmd, 857, 3701),
Function.raw_ability(469, "Research_TerranVehicleWeapons_quick", raw_cmd, 3701),
Function.raw_ability(473, "Research_TunnelingClaws_quick", raw_cmd, 217),
Function.raw_ability(82, "Research_WarpGate_quick", raw_cmd, 1568),
Function.raw_ability(475, "Research_ZergFlyerArmorLevel1_quick", raw_cmd, 1315, 3702),
Function.raw_ability(476, "Research_ZergFlyerArmorLevel2_quick", raw_cmd, 1316, 3702),
Function.raw_ability(477, "Research_ZergFlyerArmorLevel3_quick", raw_cmd, 1317, 3702),
Function.raw_ability(474, "Research_ZergFlyerArmor_quick", raw_cmd, 3702),
Function.raw_ability(479, "Research_ZergFlyerAttackLevel1_quick", raw_cmd, 1312, 3703),
Function.raw_ability(480, "Research_ZergFlyerAttackLevel2_quick", raw_cmd, 1313, 3703),
Function.raw_ability(481, "Research_ZergFlyerAttackLevel3_quick", raw_cmd, 1314, 3703),
Function.raw_ability(478, "Research_ZergFlyerAttack_quick", raw_cmd, 3703),
Function.raw_ability(483, "Research_ZergGroundArmorLevel1_quick", raw_cmd, 1189, 3704),
Function.raw_ability(484, "Research_ZergGroundArmorLevel2_quick", raw_cmd, 1190, 3704),
Function.raw_ability(485, "Research_ZergGroundArmorLevel3_quick", raw_cmd, 1191, 3704),
Function.raw_ability(482, "Research_ZergGroundArmor_quick", raw_cmd, 3704),
Function.raw_ability(494, "Research_ZerglingAdrenalGlands_quick", raw_cmd, 1252),
Function.raw_ability(495, "Research_ZerglingMetabolicBoost_quick", raw_cmd, 1253),
Function.raw_ability(487, "Research_ZergMeleeWeaponsLevel1_quick", raw_cmd, 1186, 3705),
Function.raw_ability(488, "Research_ZergMeleeWeaponsLevel2_quick", raw_cmd, 1187, 3705),
Function.raw_ability(489, "Research_ZergMeleeWeaponsLevel3_quick", raw_cmd, 1188, 3705),
Function.raw_ability(486, "Research_ZergMeleeWeapons_quick", raw_cmd, 3705),
Function.raw_ability(491, "Research_ZergMissileWeaponsLevel1_quick", raw_cmd, 1192, 3706),
Function.raw_ability(492, "Research_ZergMissileWeaponsLevel2_quick", raw_cmd, 1193, 3706),
Function.raw_ability(493, "Research_ZergMissileWeaponsLevel3_quick", raw_cmd, 1194, 3706),
Function.raw_ability(490, "Research_ZergMissileWeapons_quick", raw_cmd, 3706),
Function.raw_ability(10, "Scan_Move_pt", raw_cmd_pt, 19, 3674),
Function.raw_ability(11, "Scan_Move_unit", raw_cmd_unit, 19, 3674),
Function.raw_ability(1, "Smart_pt", raw_cmd_pt, 1),
Function.raw_ability(12, "Smart_unit", raw_cmd_unit, 1),
Function.raw_ability(101, "Stop_quick", raw_cmd, 3665),
Function.raw_ability(555, "Stop_Battlecruiser_quick", raw_cmd, 3783, 3665),
Function.raw_ability(496, "Stop_Building_quick", raw_cmd, 2057, 3665),
Function.raw_ability(497, "Stop_Redirect_quick", raw_cmd, 1691, 3665),
Function.raw_ability(155, "Stop_Stop_quick", raw_cmd, 4, 3665),
Function.raw_ability(54, "Train_Adept_quick", raw_cmd, 922),
Function.raw_ability(498, "Train_Baneling_quick", raw_cmd, 80),
Function.raw_ability(499, "Train_Banshee_quick", raw_cmd, 621),
Function.raw_ability(500, "Train_Battlecruiser_quick", raw_cmd, 623),
Function.raw_ability(56, "Train_Carrier_quick", raw_cmd, 948),
Function.raw_ability(62, "Train_Colossus_quick", raw_cmd, 978),
Function.raw_ability(501, "Train_Corruptor_quick", raw_cmd, 1353),
Function.raw_ability(502, "Train_Cyclone_quick", raw_cmd, 597),
Function.raw_ability(52, "Train_DarkTemplar_quick", raw_cmd, 920),
Function.raw_ability(166, "Train_Disruptor_quick", raw_cmd, 994),
Function.raw_ability(503, "Train_Drone_quick", raw_cmd, 1342),
Function.raw_ability(504, "Train_Ghost_quick", raw_cmd, 562),
Function.raw_ability(505, "Train_Hellbat_quick", raw_cmd, 596),
Function.raw_ability(506, "Train_Hellion_quick", raw_cmd, 595),
Function.raw_ability(51, "Train_HighTemplar_quick", raw_cmd, 919),
Function.raw_ability(507, "Train_Hydralisk_quick", raw_cmd, 1345),
Function.raw_ability(63, "Train_Immortal_quick", raw_cmd, 979),
Function.raw_ability(508, "Train_Infestor_quick", raw_cmd, 1352),
Function.raw_ability(509, "Train_Liberator_quick", raw_cmd, 626),
Function.raw_ability(510, "Train_Marauder_quick", raw_cmd, 563),
Function.raw_ability(511, "Train_Marine_quick", raw_cmd, 560),
Function.raw_ability(512, "Train_Medivac_quick", raw_cmd, 620),
Function.raw_ability(513, "Train_MothershipCore_quick", raw_cmd, 1853),
Function.raw_ability(21, "Train_Mothership_quick", raw_cmd, 110),
Function.raw_ability(514, "Train_Mutalisk_quick", raw_cmd, 1346),
Function.raw_ability(61, "Train_Observer_quick", raw_cmd, 977),
Function.raw_ability(58, "Train_Oracle_quick", raw_cmd, 954),
Function.raw_ability(515, "Train_Overlord_quick", raw_cmd, 1344),
Function.raw_ability(55, "Train_Phoenix_quick", raw_cmd, 946),
Function.raw_ability(64, "Train_Probe_quick", raw_cmd, 1006),
Function.raw_ability(516, "Train_Queen_quick", raw_cmd, 1632),
Function.raw_ability(517, "Train_Raven_quick", raw_cmd, 622),
Function.raw_ability(518, "Train_Reaper_quick", raw_cmd, 561),
Function.raw_ability(519, "Train_Roach_quick", raw_cmd, 1351),
Function.raw_ability(520, "Train_SCV_quick", raw_cmd, 524),
Function.raw_ability(53, "Train_Sentry_quick", raw_cmd, 921),
Function.raw_ability(521, "Train_SiegeTank_quick", raw_cmd, 591),
Function.raw_ability(50, "Train_Stalker_quick", raw_cmd, 917),
Function.raw_ability(522, "Train_SwarmHost_quick", raw_cmd, 1356),
Function.raw_ability(59, "Train_Tempest_quick", raw_cmd, 955),
Function.raw_ability(523, "Train_Thor_quick", raw_cmd, 594),
Function.raw_ability(524, "Train_Ultralisk_quick", raw_cmd, 1348),
Function.raw_ability(525, "Train_VikingFighter_quick", raw_cmd, 624),
Function.raw_ability(526, "Train_Viper_quick", raw_cmd, 1354),
Function.raw_ability(57, "Train_VoidRay_quick", raw_cmd, 950),
Function.raw_ability(76, "TrainWarp_Adept_pt", raw_cmd_pt, 1419),
Function.raw_ability(74, "TrainWarp_DarkTemplar_pt", raw_cmd_pt, 1417),
Function.raw_ability(73, "TrainWarp_HighTemplar_pt", raw_cmd_pt, 1416),
Function.raw_ability(60, "Train_WarpPrism_quick", raw_cmd, 976),
Function.raw_ability(75, "TrainWarp_Sentry_pt", raw_cmd_pt, 1418),
Function.raw_ability(72, "TrainWarp_Stalker_pt", raw_cmd_pt, 1414),
Function.raw_ability(71, "TrainWarp_Zealot_pt", raw_cmd_pt, 1413),
Function.raw_ability(527, "Train_WidowMine_quick", raw_cmd, 614),
Function.raw_ability(49, "Train_Zealot_quick", raw_cmd, 916),
Function.raw_ability(528, "Train_Zergling_quick", raw_cmd, 1343),
Function.raw_ability(529, "UnloadAllAt_Medivac_pt", raw_cmd_pt, 396, 3669),
Function.raw_ability(530, "UnloadAllAt_Medivac_unit", raw_cmd_unit, 396, 3669),
Function.raw_ability(531, "UnloadAllAt_Overlord_pt", raw_cmd_pt, 1408, 3669),
Function.raw_ability(532, "UnloadAllAt_Overlord_unit", raw_cmd_unit, 1408, 3669),
Function.raw_ability(105, "UnloadAllAt_pt", raw_cmd_pt, 3669),
Function.raw_ability(164, "UnloadAllAt_unit", raw_cmd_unit, 3669),
Function.raw_ability(156, "UnloadAllAt_WarpPrism_pt", raw_cmd_pt, 913, 3669),
Function.raw_ability(163, "UnloadAllAt_WarpPrism_unit", raw_cmd_unit, 913, 3669),
Function.raw_ability(533, "UnloadAll_Bunker_quick", raw_cmd, 408, 3664),
Function.raw_ability(534, "UnloadAll_CommandCenter_quick", raw_cmd, 413, 3664),
Function.raw_ability(535, "UnloadAll_NydusNetwork_quick", raw_cmd, 1438, 3664),
Function.raw_ability(536, "UnloadAll_NydusWorm_quick", raw_cmd, 2371, 3664),
Function.raw_ability(100, "UnloadAll_quick", raw_cmd, 3664),
Function.raw_ability(556, "UnloadUnit_quick", raw_cmd, 3796),
Function.raw_ability(557, "UnloadUnit_Bunker_quick", raw_cmd, 410, 3796),
Function.raw_ability(558, "UnloadUnit_CommandCenter_quick", raw_cmd, 415, 3796),
Function.raw_ability(559, "UnloadUnit_Medivac_quick", raw_cmd, 397, 3796),
Function.raw_ability(560, "UnloadUnit_NydusNetwork_quick", raw_cmd, 1440, 3796),
Function.raw_ability(561, "UnloadUnit_Overlord_quick", raw_cmd, 1409, 3796),
Function.raw_ability(562, "UnloadUnit_WarpPrism_quick", raw_cmd, 914, 3796),
]
# pylint: enable=line-too-long
# Create an IntEnum of the function names/ids so that printing the id will
# show something useful.
_Raw_Functions = enum.IntEnum( # pylint: disable=invalid-name
"_Raw_Functions", {f.name: f.id for f in _RAW_FUNCTIONS})
_RAW_FUNCTIONS = [f._replace(id=_Raw_Functions(f.id)) for f in _RAW_FUNCTIONS]
RAW_FUNCTIONS = Functions(_RAW_FUNCTIONS)
# Some indexes to support features.py and action conversion.
RAW_ABILITY_IDS = collections.defaultdict(set) # {ability_id: {funcs}}
for _func in RAW_FUNCTIONS:
if _func.ability_id >= 0:
RAW_ABILITY_IDS[_func.ability_id].add(_func)
RAW_ABILITY_IDS = {k: frozenset(v) for k, v in RAW_ABILITY_IDS.items()}
RAW_FUNCTIONS_AVAILABLE = {f.id: f for f in RAW_FUNCTIONS if f.avail_fn}
RAW_ABILITY_ID_TO_FUNC_ID = {k: min(f.id for f in v) # pylint: disable=g-complex-comprehension
for k, v in RAW_ABILITY_IDS.items()}
class FunctionCall(collections.namedtuple(
"FunctionCall", ["function", "arguments"])):
"""Represents a function call action.
Attributes:
function: Store the function id, eg 2 for select_point.
arguments: The list of arguments for that function, each being a list of
ints. For select_point this could be: [[0], [23, 38]].
"""
__slots__ = ()
@classmethod
def init_with_validation(cls, function, arguments, raw=False):
"""Return a `FunctionCall` given some validation for the function and args.
Args:
function: A function name or id, to be converted into a function id enum.
arguments: An iterable of function arguments. Arguments that are enum
types can be passed by name. Arguments that only take one value (ie
not a point) don't need to be wrapped in a list.
raw: Whether this is a raw function call.
Returns:
A new `FunctionCall` instance.
Raises:
KeyError: if the enum name doesn't exist.
ValueError: if the enum id doesn't exist.
"""
func = RAW_FUNCTIONS[function] if raw else FUNCTIONS[function]
args = []
for arg, arg_type in zip(arguments, func.args):
arg = numpy_to_python(arg)
if arg_type.values: # Allow enum values by name or int.
if isinstance(arg, str):
try:
args.append([arg_type.values[arg]])
except KeyError:
raise KeyError("Unknown argument value: %s, valid values: %s" % (
arg, [v.name for v in arg_type.values]))
else:
if isinstance(arg, list):
arg = arg[0]
try:
args.append([arg_type.values(arg)])
except ValueError:
raise ValueError("Unknown argument value: %s, valid values: %s" % (
arg, list(arg_type.values)))
elif isinstance(arg, int): # Allow bare ints.
args.append([arg])
elif isinstance(arg, list):
args.append(arg)
else:
raise ValueError(
"Unknown argument value type: %s, expected int or list of ints, or "
"their numpy equivalents. Value: %s" % (type(arg), arg))
return cls(func.id, args)
@classmethod
def all_arguments(cls, function, arguments, raw=False):
"""Helper function for creating `FunctionCall`s with `Arguments`.
Args:
function: The value to store for the action function.
arguments: The values to store for the arguments of the action. Can either
be an `Arguments` object, a `dict`, or an iterable. If a `dict` or an
iterable is provided, the values will be unpacked into an `Arguments`
object.
raw: Whether this is a raw function call.
Returns:
A new `FunctionCall` instance.
"""
args_type = RawArguments if raw else Arguments
if isinstance(arguments, dict):
arguments = args_type(**arguments)
elif not isinstance(arguments, args_type):
arguments = args_type(*arguments)
return cls(function, arguments)
def __reduce__(self):
return self.__class__, tuple(self)
class ValidActions(collections.namedtuple(
"ValidActions", ["types", "functions"])):
"""The set of types and functions that are valid for an agent to use.
Attributes:
types: A namedtuple of the types that the functions require. Unlike TYPES
above, this includes the sizes for screen and minimap.
functions: A namedtuple of all the functions.
"""
__slots__ = ()
def __reduce__(self):
return self.__class__, tuple(self)
| pysc2-master | pysc2/lib/actions.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| pysc2-master | pysc2/lib/__init__.py |
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Diff proto objects returning paths to changed attributes."""
import deepdiff
from google.protobuf import json_format
_ARRAY_PLACEHOLDER = "*"
class ProtoPath(object):
"""Path to a proto field, from the root of the proto object."""
def __init__(self, path):
"""Initializer.
Args:
path: Tuple of attribute names / array indices on the path to a field.
"""
self._path = tuple(path)
def get_field(self, proto):
"""Returns field at this proto path, in the specified proto."""
value = proto
for k in self._path:
if isinstance(k, int):
value = value[k]
else:
value = getattr(value, k)
return value
def with_anonymous_array_indices(self):
"""Path with array indices replaced with '*' so that they compare equal."""
return ProtoPath(
tuple(_ARRAY_PLACEHOLDER if isinstance(t, int) else t
for t in self._path))
@property
def path(self):
return self._path
def __lt__(self, other):
for k1, k2 in zip(self._path, other.path):
if k1 < k2:
return True
elif k1 > k2:
return False
return len(self._path) < len(other.path)
def __getitem__(self, item):
return self._path.__getitem__(item)
def __len__(self):
return len(self._path)
def __eq__(self, o):
return self._path == o.path
def __hash__(self):
return hash(self._path)
def __repr__(self):
result = ""
for k in self._path:
if isinstance(k, int) or k == _ARRAY_PLACEHOLDER:
result += "[{}]".format(k)
else:
result += ("." if result else "") + k
return result
class ProtoDiffs(object):
"""Summary of diffs between two protos."""
def __init__(self, proto_a, proto_b, changed, added, removed):
"""Initializer.
Args:
proto_a: First proto.
proto_b: Second proto.
changed: List of paths to attributes which changed between the two.
added: List of paths to attributes added from proto_a -> proto_b.
removed: List of paths to attributes removed from proto_a -> proto_b.
"""
self._proto_a = proto_a
self._proto_b = proto_b
self._changed = sorted(changed)
self._added = sorted(added)
self._removed = sorted(removed)
@property
def proto_a(self):
return self._proto_a
@property
def proto_b(self):
return self._proto_b
@property
def changed(self):
return self._changed
@property
def added(self):
return self._added
@property
def removed(self):
return self._removed
def all_diffs(self):
return self.changed + self.added + self.removed
def report(self, differencers=None, truncate_to=0):
"""Returns a string report of diffs.
Additions and removals are identified by proto path. Changes in value are
reported as path: old_value -> new_value by default, though this can be
customized via the differencers argument.
Args:
differencers: Iterable of callable(path, proto_a, proto_b) -> str or None
If a string is returned it is used to represent the diff between
path.get_field(proto_a) and path.get_field(proto_b), and no further
differencers are invoked. If None is returned by all differencers, the
default string diff is used.
truncate_to: Number of characters to truncate diff output values to.
Zero, the default, means no truncation.
"""
results = []
for a in self._added:
results.append("Added {}.".format(a))
for r in self._removed:
results.append("Removed {}.".format(r))
for c in self._changed:
result = None
if differencers:
for d in differencers:
result = d(c, self._proto_a, self._proto_b)
if result:
break
if not result:
result = "{} -> {}".format(
_truncate(c.get_field(self._proto_a), truncate_to),
_truncate(c.get_field(self._proto_b), truncate_to))
else:
result = _truncate(result, truncate_to)
results.append("Changed {}: {}.".format(c, result))
if results:
return "\n".join(results)
else:
return "No diffs."
def __repr__(self):
return "changed: {}, added: {}, removed: {}".format(
self._changed, self._added, self._removed)
def _truncate(val, truncate_to):
string_val = str(val)
if truncate_to and len(string_val) > truncate_to:
return string_val[:max(truncate_to - 3, 0)] + "..."
else:
return string_val
def _dict_path_to_proto_path(dict_path):
dict_path = dict_path[5:-1] # strip off 'root[...]'
keys = dict_path.split("][") # tokenize
return ProtoPath(
(k[1:-1] if k[0] == "'" else int(k)) for k in keys) # key or idx
def compute_diff(proto_a, proto_b):
"""Returns `ProtoDiff` of two protos, else None if no diffs.
Args:
proto_a: First of the two protos to compare.
proto_b: Second of the two protos to compare.
"""
dict1 = json_format.MessageToDict(proto_a, preserving_proto_field_name=True)
dict2 = json_format.MessageToDict(proto_b, preserving_proto_field_name=True)
diff = deepdiff.DeepDiff(dict1, dict2, significant_digits=3)
if diff:
changed_paths = []
for key in diff.pop("values_changed", []):
changed_paths.append(_dict_path_to_proto_path(key))
added_paths = []
for key in diff.pop("dictionary_item_added", []):
added_paths.append(_dict_path_to_proto_path(key))
for key in diff.pop("iterable_item_added", []):
added_paths.append(_dict_path_to_proto_path(key))
removed_paths = []
for key in diff.pop("dictionary_item_removed", []):
removed_paths.append(_dict_path_to_proto_path(key))
for key in diff.pop("iterable_item_removed", []):
removed_paths.append(_dict_path_to_proto_path(key))
if diff:
raise ValueError("Unhandled diffs: {}".format(diff))
return ProtoDiffs(
proto_a=proto_a,
proto_b=proto_b,
changed=changed_paths,
added=added_paths,
removed=removed_paths)
else:
return None
| pysc2-master | pysc2/lib/proto_diff.py |
#!/usr/bin/python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for lib.named_array."""
import collections
import enum
import pickle
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from pysc2.lib import named_array
class NamedDictTest(absltest.TestCase):
def test_named_dict(self):
a = named_array.NamedDict(a=2, b=(1, 2))
self.assertEqual(a["a"], a.a)
self.assertEqual(a["b"], a.b)
self.assertIs(a["b"], a.b)
self.assertNotEqual(a["a"], a.b)
a.c = 3
self.assertEqual(a["c"], 3)
class TestEnum(enum.IntEnum):
a = 0
b = 1
c = 2
class BadEnum(enum.IntEnum):
a = 1
b = 2
c = 3
class TestNamedTuple(collections.namedtuple("TestNamedTuple", ["a", "b", "c"])):
pass
class BadNamedTuple(collections.namedtuple("BadNamedTuple", ["a", "b"])):
pass
class NamedArrayTest(parameterized.TestCase):
def assertArrayEqual(self, a, b):
np.testing.assert_array_equal(a, b)
@parameterized.named_parameters(
("none", None),
("none2", [None]),
("short_list", ["a"]),
("long_list", ["a", "b", "c", "d"]),
("long_list2", [["a", "b", "c", "d"]]),
("ints", [[1, "b", 3]]),
("bad_enum", [BadEnum]),
("bad_namedtuple", [BadNamedTuple]),
("dict", [{"a": 0, "b": 1, "c": 2}]),
("set", [{"a", "b", "c"}]),
)
def test_bad_names(self, names):
with self.assertRaises(ValueError):
named_array.NamedNumpyArray([1, 3, 6], names)
@parameterized.named_parameters(
("list", ["a", "b", "c"]),
("tuple", ("a", "b", "c")),
("list2", [["a", "b", "c"]]),
("tuple2", (("a", "b", "c"))),
("list_tuple", [("a", "b", "c")]),
("named_tuple", TestNamedTuple),
("named_tuple2", [TestNamedTuple]),
("int_enum", TestEnum),
("int_enum2", [TestEnum]),
)
def test_single_dimension(self, names):
a = named_array.NamedNumpyArray([1, 3, 6], names)
self.assertEqual(a[0], 1)
self.assertEqual(a[1], 3)
self.assertEqual(a[2], 6)
self.assertEqual(a[-1], 6)
self.assertEqual(a.a, 1)
self.assertEqual(a.b, 3)
self.assertEqual(a.c, 6)
with self.assertRaises(AttributeError):
a.d # pylint: disable=pointless-statement
self.assertEqual(a["a"], 1)
self.assertEqual(a["b"], 3)
self.assertEqual(a["c"], 6)
with self.assertRaises(KeyError):
a["d"] # pylint: disable=pointless-statement
# New axis = None
self.assertArrayEqual(a, [1, 3, 6])
self.assertArrayEqual(a[np.newaxis], [[1, 3, 6]])
self.assertArrayEqual(a[None], [[1, 3, 6]])
self.assertArrayEqual(a[None, :], [[1, 3, 6]])
self.assertArrayEqual(a[:, None], [[1], [3], [6]])
self.assertArrayEqual(a[None, :, None], [[[1], [3], [6]]])
self.assertArrayEqual(a[None, a % 3 == 0, None], [[[3], [6]]])
self.assertArrayEqual(a[None][None], [[[1, 3, 6]]])
self.assertArrayEqual(a[None][0], [1, 3, 6])
self.assertEqual(a[None, 0], 1)
self.assertEqual(a[None, "a"], 1)
self.assertEqual(a[None][0].a, 1)
self.assertEqual(a[None][0, "b"], 3)
# range slicing
self.assertArrayEqual(a[0:2], [1, 3])
self.assertArrayEqual(a[1:3], [3, 6])
self.assertArrayEqual(a[0:2:], [1, 3])
self.assertArrayEqual(a[0:2:1], [1, 3])
self.assertArrayEqual(a[::2], [1, 6])
self.assertArrayEqual(a[::-1], [6, 3, 1])
self.assertEqual(a[1:3][0], 3)
self.assertEqual(a[1:3].b, 3)
self.assertEqual(a[1:3].c, 6)
# list slicing
self.assertArrayEqual(a[[0, 0]], [1, 1])
self.assertArrayEqual(a[[0, 1]], [1, 3])
self.assertArrayEqual(a[[1, 0]], [3, 1])
self.assertArrayEqual(a[[1, 2]], [3, 6])
self.assertArrayEqual(a[np.array([0, 2])], [1, 6])
self.assertEqual(a[[1, 2]].b, 3)
self.assertEqual(a[[2, 0]].c, 6)
with self.assertRaises(TypeError):
# Duplicates lead to unnamed dimensions.
a[[0, 0]].a # pylint: disable=pointless-statement
a[1] = 4
self.assertEqual(a[1], 4)
self.assertEqual(a.b, 4)
self.assertEqual(a["b"], 4)
a[1:2] = 2
self.assertEqual(a[1], 2)
self.assertEqual(a.b, 2)
self.assertEqual(a["b"], 2)
a[[1]] = 3
self.assertEqual(a[1], 3)
self.assertEqual(a.b, 3)
self.assertEqual(a["b"], 3)
a.b = 5
self.assertEqual(a[1], 5)
self.assertEqual(a.b, 5)
self.assertEqual(a["b"], 5)
def test_empty_array(self):
named_array.NamedNumpyArray([], [None, ["a", "b"]])
with self.assertRaises(ValueError):
# Must be the right length.
named_array.NamedNumpyArray([], [["a", "b"]])
with self.assertRaises(ValueError):
# Returning an empty slice is not supported, and it's not clear how or
# even if it should be supported.
named_array.NamedNumpyArray([], [["a", "b"], None])
with self.assertRaises(ValueError):
# Scalar arrays are unsupported.
named_array.NamedNumpyArray(1, [])
def test_named_array_multi_first(self):
a = named_array.NamedNumpyArray([[1, 3], [6, 8]], [["a", "b"], None])
self.assertArrayEqual(a.a, [1, 3])
self.assertArrayEqual(a[1], [6, 8])
self.assertArrayEqual(a["b"], [6, 8])
self.assertArrayEqual(a[::-1], [[6, 8], [1, 3]])
self.assertArrayEqual(a[::-1][::-1], [[1, 3], [6, 8]])
self.assertArrayEqual(a[::-1, ::-1], [[8, 6], [3, 1]])
self.assertArrayEqual(a[::-1][0], [6, 8])
self.assertArrayEqual(a[::-1, 0], [6, 1])
self.assertArrayEqual(a[::-1, 1], [8, 3])
self.assertArrayEqual(a[::-1].a, [1, 3])
self.assertArrayEqual(a[::-1].a[0], 1)
self.assertArrayEqual(a[::-1].b, [6, 8])
self.assertArrayEqual(a[[0, 0]], [[1, 3], [1, 3]])
with self.assertRaises(TypeError):
a[[0, 0]].a # pylint: disable=pointless-statement
self.assertEqual(a[0, 1], 3)
self.assertEqual(a[(0, 1)], 3)
self.assertEqual(a["a", 0], 1)
self.assertEqual(a["b", 0], 6)
self.assertEqual(a["b", 1], 8)
self.assertEqual(a.a[0], 1)
self.assertArrayEqual(a[a > 2], [3, 6, 8])
self.assertArrayEqual(a[a % 3 == 0], [3, 6])
with self.assertRaises(TypeError):
a[0].a # pylint: disable=pointless-statement
# New axis = None
self.assertArrayEqual(a, [[1, 3], [6, 8]])
self.assertArrayEqual(a[np.newaxis], [[[1, 3], [6, 8]]])
self.assertArrayEqual(a[None], [[[1, 3], [6, 8]]])
self.assertArrayEqual(a[None, :], [[[1, 3], [6, 8]]])
self.assertArrayEqual(a[None, "a"], [[1, 3]])
self.assertArrayEqual(a[:, None], [[[1, 3]], [[6, 8]]])
self.assertArrayEqual(a[None, :, None], [[[[1, 3]], [[6, 8]]]])
self.assertArrayEqual(a[None, 0, None], [[[1, 3]]])
self.assertArrayEqual(a[None, "a", None], [[[1, 3]]])
self.assertArrayEqual(a[None][None], [[[[1, 3], [6, 8]]]])
self.assertArrayEqual(a[None][0], [[1, 3], [6, 8]])
self.assertArrayEqual(a[None][0].a, [1, 3])
self.assertEqual(a[None][0].a[0], 1)
self.assertEqual(a[None][0, "b", 1], 8)
def test_named_array_multi_second(self):
a = named_array.NamedNumpyArray([[1, 3], [6, 8]], [None, ["a", "b"]])
self.assertArrayEqual(a[0], [1, 3])
self.assertEqual(a[0, 1], 3)
self.assertEqual(a[0, "a"], 1)
self.assertEqual(a[0, "b"], 3)
self.assertEqual(a[1, "b"], 8)
self.assertEqual(a[0].a, 1)
self.assertArrayEqual(a[a > 2], [3, 6, 8])
self.assertArrayEqual(a[a % 3 == 0], [3, 6])
with self.assertRaises(TypeError):
a.a # pylint: disable=pointless-statement
self.assertArrayEqual(a[None, :, "a"], [[1, 6]])
def test_masking(self):
a = named_array.NamedNumpyArray([[1, 2, 3, 4], [5, 6, 7, 8]],
[None, list("abcd")])
self.assertArrayEqual(a[a > 2], [3, 4, 5, 6, 7, 8])
self.assertArrayEqual(a[a < 4], [1, 2, 3])
self.assertArrayEqual(a[a % 2 == 0], [2, 4, 6, 8])
self.assertArrayEqual(a[a % 3 == 0], [3, 6])
def test_slicing(self):
a = named_array.NamedNumpyArray([1, 2, 3, 4, 5], list("abcde"))
self.assertArrayEqual(a[:], [1, 2, 3, 4, 5])
self.assertArrayEqual(a[::], [1, 2, 3, 4, 5])
self.assertArrayEqual(a[::2], [1, 3, 5])
self.assertArrayEqual(a[::-1], [5, 4, 3, 2, 1])
self.assertEqual(a[:].a, 1)
self.assertEqual(a[::].b, 2)
self.assertEqual(a[::2].c, 3)
with self.assertRaises(AttributeError):
a[::2].d # pylint: disable=pointless-statement
self.assertEqual(a[::-1].e, 5)
self.assertArrayEqual(a[a % 2 == 0], [2, 4])
self.assertEqual(a[a % 2 == 0].b, 2)
a = named_array.NamedNumpyArray([[1, 2, 3, 4], [5, 6, 7, 8]],
[None, list("abcd")])
self.assertArrayEqual(a[:], [[1, 2, 3, 4], [5, 6, 7, 8]])
self.assertArrayEqual(a[::], [[1, 2, 3, 4], [5, 6, 7, 8]])
self.assertArrayEqual(a[:, :], [[1, 2, 3, 4], [5, 6, 7, 8]])
self.assertArrayEqual(a[:, ...], [[1, 2, 3, 4], [5, 6, 7, 8]])
self.assertArrayEqual(a[..., ::], [[1, 2, 3, 4], [5, 6, 7, 8]])
self.assertArrayEqual(a[:, ::2], [[1, 3], [5, 7]])
self.assertArrayEqual(a[::-1], [[5, 6, 7, 8], [1, 2, 3, 4]])
self.assertArrayEqual(a[..., ::-1], [[4, 3, 2, 1], [8, 7, 6, 5]])
self.assertArrayEqual(a[:, ::-1], [[4, 3, 2, 1], [8, 7, 6, 5]])
self.assertArrayEqual(a[:, ::-2], [[4, 2], [8, 6]])
self.assertArrayEqual(a[:, -2::-2], [[3, 1], [7, 5]])
self.assertArrayEqual(a[::-1, -2::-2], [[7, 5], [3, 1]])
self.assertArrayEqual(a[..., 0, 0], 1) # weird scalar arrays...
a = named_array.NamedNumpyArray(
[[[[0, 1], [2, 3]], [[4, 5], [6, 7]]],
[[[8, 9], [10, 11]], [[12, 13], [14, 15]]]],
[["a", "b"], ["c", "d"], ["e", "f"], ["g", "h"]])
self.assertEqual(a.a.c.e.g, 0)
self.assertEqual(a.b.c.f.g, 10)
self.assertEqual(a.b.d.f.h, 15)
self.assertArrayEqual(a[0, ..., 0], [[0, 2], [4, 6]])
self.assertArrayEqual(a[0, ..., 1], [[1, 3], [5, 7]])
self.assertArrayEqual(a[0, 0, ..., 1], [1, 3])
self.assertArrayEqual(a[0, ..., 1, 1], [3, 7])
self.assertArrayEqual(a[..., 1, 1], [[3, 7], [11, 15]])
self.assertArrayEqual(a[1, 0, ...], [[8, 9], [10, 11]])
self.assertArrayEqual(a["a", ..., "g"], [[0, 2], [4, 6]])
self.assertArrayEqual(a["a", ...], [[[0, 1], [2, 3]], [[4, 5], [6, 7]]])
self.assertArrayEqual(a[..., "g"], [[[0, 2], [4, 6]], [[8, 10], [12, 14]]])
self.assertArrayEqual(a["a", "c"], [[0, 1], [2, 3]])
self.assertArrayEqual(a["a", ...].c, [[0, 1], [2, 3]])
self.assertArrayEqual(a["a", ..., "g"].c, [0, 2])
with self.assertRaises(TypeError):
a[np.array([[0, 1], [0, 1]])] # pylint: disable=pointless-statement, expression-not-assigned
with self.assertRaises(IndexError):
a[..., 0, ...] # pylint: disable=pointless-statement
def test_string(self):
a = named_array.NamedNumpyArray([1, 3, 6], ["a", "b", "c"], dtype=np.int32)
self.assertEqual(str(a), "[1 3 6]")
self.assertEqual(repr(a), ("NamedNumpyArray([1, 3, 6], ['a', 'b', 'c'], "
"dtype=int32)"))
a = named_array.NamedNumpyArray([[1, 3], [6, 8]], [None, ["a", "b"]])
self.assertEqual(str(a), "[[1 3]\n [6 8]]")
self.assertEqual(repr(a), ("NamedNumpyArray([[1, 3],\n"
" [6, 8]], [None, ['a', 'b']])"))
a = named_array.NamedNumpyArray([[1, 3], [6, 8]], [["a", "b"], None])
self.assertEqual(str(a), "[[1 3]\n [6 8]]")
self.assertEqual(repr(a), ("NamedNumpyArray([[1, 3],\n"
" [6, 8]], [['a', 'b'], None])"))
a = named_array.NamedNumpyArray([0, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[str(i) for i in range(13)], dtype=np.int32)
numpy_repr = np.array_repr(a)
if "\n" in numpy_repr: # ie numpy > 1.14
self.assertEqual(repr(a), """
NamedNumpyArray([ 0, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0],
['0', '1', '2', '3', '4', '...', '8', '9', '10', '11', '12'],
dtype=int32)""".strip()) # Keep the middle newlines.
else:
self.assertEqual(repr(a), (
"NamedNumpyArray("
"[ 0, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0], "
"['0', '1', '2', '3', '4', '...', '8', '9', '10', '11', '12'], "
"dtype=int32)")) # Note the lack of newlines.
a = named_array.NamedNumpyArray([list(range(50))] * 50,
[None, ["a%s" % i for i in range(50)]])
self.assertIn("49", str(a))
self.assertIn("49", repr(a))
self.assertIn("a4", repr(a))
self.assertIn("a49", repr(a))
a = named_array.NamedNumpyArray([list(range(50))] * 50,
[["a%s" % i for i in range(50)], None])
self.assertIn("49", str(a))
self.assertIn("49", repr(a))
self.assertIn("a4", repr(a))
self.assertIn("a49", repr(a))
def test_pickle(self):
arr = named_array.NamedNumpyArray([1, 3, 6], ["a", "b", "c"])
pickled = pickle.loads(pickle.dumps(arr))
self.assertTrue(np.all(arr == pickled))
self.assertEqual(repr(pickled),
"NamedNumpyArray([1, 3, 6], ['a', 'b', 'c'])")
if __name__ == "__main__":
absltest.main()
| pysc2-master | pysc2/lib/named_array_test.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Render feature layers from SC2 Observation protos into numpy arrays."""
# pylint: disable=g-complex-comprehension
import collections
import enum
import random
from absl import logging
import numpy as np
from pysc2.lib import actions
from pysc2.lib import colors
from pysc2.lib import named_array
from pysc2.lib import point
from pysc2.lib import static_data
from pysc2.lib import stopwatch
from pysc2.lib import transform
from s2clientprotocol import raw_pb2 as sc_raw
from s2clientprotocol import sc2api_pb2 as sc_pb
sw = stopwatch.sw
EPSILON = 1e-5
class FeatureType(enum.Enum):
SCALAR = 1
CATEGORICAL = 2
class PlayerRelative(enum.IntEnum):
"""The values for the `player_relative` feature layers."""
NONE = 0
SELF = 1
ALLY = 2
NEUTRAL = 3
ENEMY = 4
class Visibility(enum.IntEnum):
"""Values for the `visibility` feature layers."""
HIDDEN = 0
SEEN = 1
VISIBLE = 2
class Effects(enum.IntEnum):
"""Values for the `effects` feature layer."""
# pylint: disable=invalid-name
none = 0
PsiStorm = 1
GuardianShield = 2
TemporalFieldGrowing = 3
TemporalField = 4
ThermalLance = 5
ScannerSweep = 6
NukeDot = 7
LiberatorDefenderZoneSetup = 8
LiberatorDefenderZone = 9
BlindingCloud = 10
CorrosiveBile = 11
LurkerSpines = 12
# pylint: enable=invalid-name
class ScoreCumulative(enum.IntEnum):
"""Indices into the `score_cumulative` observation."""
score = 0
idle_production_time = 1
idle_worker_time = 2
total_value_units = 3
total_value_structures = 4
killed_value_units = 5
killed_value_structures = 6
collected_minerals = 7
collected_vespene = 8
collection_rate_minerals = 9
collection_rate_vespene = 10
spent_minerals = 11
spent_vespene = 12
class ScoreByCategory(enum.IntEnum):
"""Indices for the `score_by_category` observation's first dimension."""
food_used = 0
killed_minerals = 1
killed_vespene = 2
lost_minerals = 3
lost_vespene = 4
friendly_fire_minerals = 5
friendly_fire_vespene = 6
used_minerals = 7
used_vespene = 8
total_used_minerals = 9
total_used_vespene = 10
class ScoreCategories(enum.IntEnum):
"""Indices for the `score_by_category` observation's second dimension."""
none = 0
army = 1
economy = 2
technology = 3
upgrade = 4
class ScoreByVital(enum.IntEnum):
"""Indices for the `score_by_vital` observation's first dimension."""
total_damage_dealt = 0
total_damage_taken = 1
total_healed = 2
class ScoreVitals(enum.IntEnum):
"""Indices for the `score_by_vital` observation's second dimension."""
life = 0
shields = 1
energy = 2
class Player(enum.IntEnum):
"""Indices into the `player` observation."""
player_id = 0
minerals = 1
vespene = 2
food_used = 3
food_cap = 4
food_army = 5
food_workers = 6
idle_worker_count = 7
army_count = 8
warp_gate_count = 9
larva_count = 10
class UnitLayer(enum.IntEnum):
"""Indices into the unit layers in the observations."""
unit_type = 0
player_relative = 1
health = 2
shields = 3
energy = 4
transport_slots_taken = 5
build_progress = 6
class UnitCounts(enum.IntEnum):
"""Indices into the `unit_counts` observations."""
unit_type = 0
count = 1
class FeatureUnit(enum.IntEnum):
"""Indices for the `feature_unit` observations."""
unit_type = 0
alliance = 1
health = 2
shield = 3
energy = 4
cargo_space_taken = 5
build_progress = 6
health_ratio = 7
shield_ratio = 8
energy_ratio = 9
display_type = 10
owner = 11
x = 12
y = 13
facing = 14
radius = 15
cloak = 16
is_selected = 17
is_blip = 18
is_powered = 19
mineral_contents = 20
vespene_contents = 21
cargo_space_max = 22
assigned_harvesters = 23
ideal_harvesters = 24
weapon_cooldown = 25
order_length = 26 # If zero, the unit is idle.
order_id_0 = 27
order_id_1 = 28
tag = 29 # Unique identifier for a unit (only populated for raw units).
hallucination = 30
buff_id_0 = 31
buff_id_1 = 32
addon_unit_type = 33
active = 34
is_on_screen = 35
order_progress_0 = 36
order_progress_1 = 37
order_id_2 = 38
order_id_3 = 39
is_in_cargo = 40
buff_duration_remain = 41
buff_duration_max = 42
attack_upgrade_level = 43
armor_upgrade_level = 44
shield_upgrade_level = 45
class EffectPos(enum.IntEnum):
"""Positions of the active effects."""
effect = 0
alliance = 1
owner = 2
radius = 3
x = 4
y = 5
class Radar(enum.IntEnum):
"""Positions of the Sensor towers."""
x = 0
y = 1
radius = 2
class ProductionQueue(enum.IntEnum):
"""Indices for the `production_queue` observations."""
ability_id = 0
build_progress = 1
class Feature(collections.namedtuple(
"Feature", ["index", "name", "layer_set", "full_name", "scale", "type",
"palette", "clip"])):
"""Define properties of a feature layer.
Attributes:
index: Index of this layer into the set of layers.
name: The name of the layer within the set.
layer_set: Which set of feature layers to look at in the observation proto.
full_name: The full name including for visualization.
scale: Max value (+1) of this layer, used to scale the values.
type: A FeatureType for scalar vs categorical.
palette: A color palette for rendering.
clip: Whether to clip the values for coloring.
"""
__slots__ = ()
dtypes = {
1: np.uint8,
8: np.uint8,
16: np.uint16,
32: np.int32,
}
def unpack(self, obs):
"""Return a correctly shaped numpy array for this feature."""
planes = getattr(obs.feature_layer_data, self.layer_set)
plane = getattr(planes, self.name)
return self.unpack_layer(plane)
@staticmethod
@sw.decorate
def unpack_layer(plane):
"""Return a correctly shaped numpy array given the feature layer bytes."""
size = point.Point.build(plane.size)
if size == (0, 0):
# New layer that isn't implemented in this SC2 version.
return None
data = np.frombuffer(plane.data, dtype=Feature.dtypes[plane.bits_per_pixel])
if plane.bits_per_pixel == 1:
data = np.unpackbits(data)
if data.shape[0] != size.x * size.y:
# This could happen if the correct length isn't a multiple of 8, leading
# to some padding bits at the end of the string which are incorrectly
# interpreted as data.
data = data[:size.x * size.y]
return data.reshape(size.y, size.x)
@staticmethod
@sw.decorate
def unpack_rgb_image(plane):
"""Return a correctly shaped numpy array given the image bytes."""
assert plane.bits_per_pixel == 24, "{} != 24".format(plane.bits_per_pixel)
size = point.Point.build(plane.size)
data = np.frombuffer(plane.data, dtype=np.uint8)
return data.reshape(size.y, size.x, 3)
@sw.decorate
def color(self, plane):
if self.clip:
plane = np.clip(plane, 0, self.scale - 1)
return self.palette[plane]
class ScreenFeatures(collections.namedtuple("ScreenFeatures", [
"height_map", "visibility_map", "creep", "power", "player_id",
"player_relative", "unit_type", "selected", "unit_hit_points",
"unit_hit_points_ratio", "unit_energy", "unit_energy_ratio", "unit_shields",
"unit_shields_ratio", "unit_density", "unit_density_aa", "effects",
"hallucinations", "cloaked", "blip", "buffs", "buff_duration", "active",
"build_progress", "pathable", "buildable", "placeholder"])):
"""The set of screen feature layers."""
__slots__ = ()
def __new__(cls, **kwargs):
feats = {}
for name, (scale, type_, palette, clip) in kwargs.items():
feats[name] = Feature(
index=ScreenFeatures._fields.index(name),
name=name,
layer_set="renders",
full_name="screen " + name,
scale=scale,
type=type_,
palette=palette(scale) if callable(palette) else palette,
clip=clip)
return super(ScreenFeatures, cls).__new__(cls, **feats) # pytype: disable=missing-parameter
class MinimapFeatures(collections.namedtuple("MinimapFeatures", [
"height_map", "visibility_map", "creep", "camera", "player_id",
"player_relative", "selected", "unit_type", "alerts", "pathable",
"buildable"])):
"""The set of minimap feature layers."""
__slots__ = ()
def __new__(cls, **kwargs):
feats = {}
for name, (scale, type_, palette) in kwargs.items():
feats[name] = Feature(
index=MinimapFeatures._fields.index(name),
name=name,
layer_set="minimap_renders",
full_name="minimap " + name,
scale=scale,
type=type_,
palette=palette(scale) if callable(palette) else palette,
clip=False)
return super(MinimapFeatures, cls).__new__(cls, **feats) # pytype: disable=missing-parameter
SCREEN_FEATURES = ScreenFeatures(
height_map=(256, FeatureType.SCALAR, colors.height_map, False),
visibility_map=(4, FeatureType.CATEGORICAL,
colors.VISIBILITY_PALETTE, False),
creep=(2, FeatureType.CATEGORICAL, colors.CREEP_PALETTE, False),
power=(2, FeatureType.CATEGORICAL, colors.POWER_PALETTE, False),
player_id=(17, FeatureType.CATEGORICAL,
colors.PLAYER_ABSOLUTE_PALETTE, False),
player_relative=(5, FeatureType.CATEGORICAL,
colors.PLAYER_RELATIVE_PALETTE, False),
unit_type=(max(static_data.UNIT_TYPES) + 1, FeatureType.CATEGORICAL,
colors.unit_type, False),
selected=(2, FeatureType.CATEGORICAL, colors.SELECTED_PALETTE, False),
unit_hit_points=(1600, FeatureType.SCALAR, colors.hot, True),
unit_hit_points_ratio=(256, FeatureType.SCALAR, colors.hot, False),
unit_energy=(1000, FeatureType.SCALAR, colors.hot, True),
unit_energy_ratio=(256, FeatureType.SCALAR, colors.hot, False),
unit_shields=(1000, FeatureType.SCALAR, colors.hot, True),
unit_shields_ratio=(256, FeatureType.SCALAR, colors.hot, False),
unit_density=(16, FeatureType.SCALAR, colors.hot, True),
unit_density_aa=(256, FeatureType.SCALAR, colors.hot, False),
effects=(16, FeatureType.CATEGORICAL, colors.effects, False),
hallucinations=(2, FeatureType.CATEGORICAL, colors.POWER_PALETTE, False),
cloaked=(2, FeatureType.CATEGORICAL, colors.POWER_PALETTE, False),
blip=(2, FeatureType.CATEGORICAL, colors.POWER_PALETTE, False),
buffs=(max(static_data.BUFFS) + 1, FeatureType.CATEGORICAL,
colors.buffs, False),
buff_duration=(256, FeatureType.SCALAR, colors.hot, False),
active=(2, FeatureType.CATEGORICAL, colors.POWER_PALETTE, False),
build_progress=(256, FeatureType.SCALAR, colors.hot, False),
pathable=(2, FeatureType.CATEGORICAL, colors.winter, False),
buildable=(2, FeatureType.CATEGORICAL, colors.winter, False),
placeholder=(2, FeatureType.CATEGORICAL, colors.winter, False),
)
MINIMAP_FEATURES = MinimapFeatures(
height_map=(256, FeatureType.SCALAR, colors.height_map),
visibility_map=(4, FeatureType.CATEGORICAL, colors.VISIBILITY_PALETTE),
creep=(2, FeatureType.CATEGORICAL, colors.CREEP_PALETTE),
camera=(2, FeatureType.CATEGORICAL, colors.CAMERA_PALETTE),
player_id=(17, FeatureType.CATEGORICAL, colors.PLAYER_ABSOLUTE_PALETTE),
player_relative=(5, FeatureType.CATEGORICAL,
colors.PLAYER_RELATIVE_PALETTE),
selected=(2, FeatureType.CATEGORICAL, colors.winter),
unit_type=(max(static_data.UNIT_TYPES) + 1, FeatureType.CATEGORICAL,
colors.unit_type),
alerts=(2, FeatureType.CATEGORICAL, colors.winter),
pathable=(2, FeatureType.CATEGORICAL, colors.winter),
buildable=(2, FeatureType.CATEGORICAL, colors.winter),
)
def _to_point(dims):
"""Convert (width, height) or size -> point.Point."""
assert dims
if isinstance(dims, (tuple, list)):
if len(dims) != 2:
raise ValueError(
"A two element tuple or list is expected here, got {}.".format(dims))
else:
width = int(dims[0])
height = int(dims[1])
if width <= 0 or height <= 0:
raise ValueError("Must specify +ve dims, got {}.".format(dims))
else:
return point.Point(width, height)
else:
size = int(dims)
if size <= 0:
raise ValueError(
"Must specify a +ve value for size, got {}.".format(dims))
else:
return point.Point(size, size)
class Dimensions(object):
"""Screen and minimap dimensions configuration.
Both screen and minimap must be specified. Sizes must be positive.
Screen size must be greater than or equal to minimap size in both dimensions.
Attributes:
screen: A (width, height) int tuple or a single int to be used for both.
minimap: A (width, height) int tuple or a single int to be used for both.
"""
def __init__(self, screen=None, minimap=None):
if not screen or not minimap:
raise ValueError(
"screen and minimap must both be set, screen={}, minimap={}".format(
screen, minimap))
self._screen = _to_point(screen)
self._minimap = _to_point(minimap)
@property
def screen(self):
return self._screen
@property
def minimap(self):
return self._minimap
def __repr__(self):
return "Dimensions(screen={}, minimap={})".format(self.screen, self.minimap)
def __eq__(self, other):
return (isinstance(other, Dimensions) and self.screen == other.screen and
self.minimap == other.minimap)
def __ne__(self, other):
return not self == other
class AgentInterfaceFormat(object):
"""Observation and action interface format specific to a particular agent."""
def __init__(
self,
feature_dimensions=None,
rgb_dimensions=None,
raw_resolution=None,
action_space=None,
camera_width_world_units=None,
use_feature_units=False,
use_raw_units=False,
use_raw_actions=False,
max_raw_actions=512,
max_selected_units=30,
use_unit_counts=False,
use_camera_position=False,
show_cloaked=False,
show_burrowed_shadows=False,
show_placeholders=False,
hide_specific_actions=True,
action_delay_fn=None,
send_observation_proto=False,
crop_to_playable_area=False,
raw_crop_to_playable_area=False,
allow_cheating_layers=False,
add_cargo_to_units=False):
"""Initializer.
Args:
feature_dimensions: Feature layer `Dimension`s. Either this or
rgb_dimensions (or both) must be set.
rgb_dimensions: RGB `Dimension`. Either this or feature_dimensions
(or both) must be set.
raw_resolution: Discretize the `raw_units` observation's x,y to this
resolution. Default is the map_size.
action_space: If you pass both feature and rgb sizes, then you must also
specify which you want to use for your actions as an ActionSpace enum.
camera_width_world_units: The width of your screen in world units. If your
feature_dimensions.screen=(64, 48) and camera_width is 24, then each
px represents 24 / 64 = 0.375 world units in each of x and y.
It'll then represent a camera of size (24, 0.375 * 48) = (24, 18)
world units.
use_feature_units: Whether to include feature_unit observations.
use_raw_units: Whether to include raw unit data in observations. This
differs from feature_units because it includes units outside the
screen and hidden units, and because unit positions are given in
terms of world units instead of screen units.
use_raw_actions: [bool] Whether to use raw actions as the interface.
Same as specifying action_space=ActionSpace.RAW.
max_raw_actions: [int] Maximum number of raw actions
max_selected_units: [int] The maximum number of selected units in the
raw interface.
use_unit_counts: Whether to include unit_counts observation. Disabled by
default since it gives information outside the visible area.
use_camera_position: Whether to include the camera's position (in minimap
coordinates) in the observations.
show_cloaked: Whether to show limited information for cloaked units.
show_burrowed_shadows: Whether to show limited information for burrowed
units that leave a shadow on the ground (ie widow mines and moving
roaches and infestors).
show_placeholders: Whether to show buildings that are queued for
construction.
hide_specific_actions: [bool] Some actions (eg cancel) have many
specific versions (cancel this building, cancel that spell) and can
be represented in a more general form. If a specific action is
available, the general will also be available. If you set
`hide_specific_actions` to False, the specific versions will also be
available, but if it's True, the specific ones will be hidden.
Similarly, when transforming back, a specific action will be returned
as the general action. This simplifies the action space, though can
lead to some actions in replays not being exactly representable using
only the general actions.
action_delay_fn: A callable which when invoked returns a delay in game
loops to apply to a requested action. Defaults to None, meaning no
delays are added (actions will be executed on the next game loop,
hence with the minimum delay of 1).
send_observation_proto: Whether or not to send the raw observation
response proto in the observations.
crop_to_playable_area: Crop the feature layer minimap observations down
from the full map area to just the playable area. Also improves the
heightmap rendering.
raw_crop_to_playable_area: Crop the raw units to the playable area. This
means units will show up closer to the origin with less dead space
around their valid locations.
allow_cheating_layers: Show the unit types and potentially other cheating
layers on the minimap.
add_cargo_to_units: Whether to add the units that are currently in cargo
to the feature_units and raw_units lists.
Raises:
ValueError: if the parameters are inconsistent.
"""
if not (feature_dimensions or rgb_dimensions or use_raw_units):
raise ValueError("Must set either the feature layer or rgb dimensions, "
"or use raw units.")
if action_space:
if not isinstance(action_space, actions.ActionSpace):
raise ValueError("action_space must be of type ActionSpace.")
if action_space == actions.ActionSpace.RAW:
use_raw_actions = True
elif ((action_space == actions.ActionSpace.FEATURES and
not feature_dimensions) or
(action_space == actions.ActionSpace.RGB and
not rgb_dimensions)):
raise ValueError(
"Action space must match the observations, action space={}, "
"feature_dimensions={}, rgb_dimensions={}".format(
action_space, feature_dimensions, rgb_dimensions))
else:
if use_raw_actions:
action_space = actions.ActionSpace.RAW
elif feature_dimensions and rgb_dimensions:
raise ValueError(
"You must specify the action space if you have both screen and "
"rgb observations.")
elif feature_dimensions:
action_space = actions.ActionSpace.FEATURES
else:
action_space = actions.ActionSpace.RGB
if raw_resolution:
raw_resolution = _to_point(raw_resolution)
if use_raw_actions:
if not use_raw_units:
raise ValueError(
"You must set use_raw_units if you intend to use_raw_actions")
if action_space != actions.ActionSpace.RAW:
raise ValueError(
"Don't specify both an action_space and use_raw_actions.")
if (rgb_dimensions and
(rgb_dimensions.screen.x < rgb_dimensions.minimap.x or
rgb_dimensions.screen.y < rgb_dimensions.minimap.y)):
raise ValueError(
"RGB Screen (%s) can't be smaller than the minimap (%s)." % (
rgb_dimensions.screen, rgb_dimensions.minimap))
self._feature_dimensions = feature_dimensions
self._rgb_dimensions = rgb_dimensions
self._action_space = action_space
self._camera_width_world_units = camera_width_world_units or 24
self._use_feature_units = use_feature_units
self._use_raw_units = use_raw_units
self._raw_resolution = raw_resolution
self._use_raw_actions = use_raw_actions
self._max_raw_actions = max_raw_actions
self._max_selected_units = max_selected_units
self._use_unit_counts = use_unit_counts
self._use_camera_position = use_camera_position
self._show_cloaked = show_cloaked
self._show_burrowed_shadows = show_burrowed_shadows
self._show_placeholders = show_placeholders
self._hide_specific_actions = hide_specific_actions
self._action_delay_fn = action_delay_fn
self._send_observation_proto = send_observation_proto
self._add_cargo_to_units = add_cargo_to_units
self._crop_to_playable_area = crop_to_playable_area
self._raw_crop_to_playable_area = raw_crop_to_playable_area
self._allow_cheating_layers = allow_cheating_layers
if action_space == actions.ActionSpace.FEATURES:
self._action_dimensions = feature_dimensions
else:
self._action_dimensions = rgb_dimensions
@property
def feature_dimensions(self):
return self._feature_dimensions
@property
def rgb_dimensions(self):
return self._rgb_dimensions
@property
def action_space(self):
return self._action_space
@property
def camera_width_world_units(self):
return self._camera_width_world_units
@property
def use_feature_units(self):
return self._use_feature_units
@property
def use_raw_units(self):
return self._use_raw_units
@property
def raw_resolution(self):
return self._raw_resolution
@raw_resolution.setter
def raw_resolution(self, value):
self._raw_resolution = value
@property
def use_raw_actions(self):
return self._use_raw_actions
@property
def max_raw_actions(self):
return self._max_raw_actions
@property
def max_selected_units(self):
return self._max_selected_units
@property
def use_unit_counts(self):
return self._use_unit_counts
@property
def use_camera_position(self):
return self._use_camera_position
@property
def show_cloaked(self):
return self._show_cloaked
@property
def show_burrowed_shadows(self):
return self._show_burrowed_shadows
@property
def show_placeholders(self):
return self._show_placeholders
@property
def hide_specific_actions(self):
return self._hide_specific_actions
@property
def action_delay_fn(self):
return self._action_delay_fn
@property
def send_observation_proto(self):
return self._send_observation_proto
@property
def add_cargo_to_units(self):
return self._add_cargo_to_units
@property
def action_dimensions(self):
return self._action_dimensions
@property
def crop_to_playable_area(self):
return self._crop_to_playable_area
@property
def raw_crop_to_playable_area(self):
return self._raw_crop_to_playable_area
@property
def allow_cheating_layers(self):
return self._allow_cheating_layers
def parse_agent_interface_format(
feature_screen=None,
feature_minimap=None,
rgb_screen=None,
rgb_minimap=None,
action_space=None,
action_delays=None,
**kwargs):
"""Creates an AgentInterfaceFormat object from keyword args.
Convenient when using dictionaries or command-line arguments for config.
Note that the feature_* and rgb_* properties define the respective spatial
observation dimensions and accept:
* None or 0 to disable that spatial observation.
* A single int for a square observation with that side length.
* A (int, int) tuple for a rectangular (width, height) observation.
Args:
feature_screen: If specified, so must feature_minimap be.
feature_minimap: If specified, so must feature_screen be.
rgb_screen: If specified, so must rgb_minimap be.
rgb_minimap: If specified, so must rgb_screen be.
action_space: ["FEATURES", "RGB", "RAW"].
action_delays: List of relative frequencies for each of [1, 2, 3, ...]
game loop delays on executed actions. Only used when the environment
is non-realtime. Intended to simulate the delays which can be
experienced when playing in realtime. Note that 1 is the minimum
possible delay; as actions can only ever be executed on a subsequent
game loop.
**kwargs: Anything else is passed through to AgentInterfaceFormat.
Returns:
An `AgentInterfaceFormat` object.
Raises:
ValueError: If an invalid parameter is specified.
"""
if feature_screen or feature_minimap:
feature_dimensions = Dimensions(feature_screen, feature_minimap)
else:
feature_dimensions = None
if rgb_screen or rgb_minimap:
rgb_dimensions = Dimensions(rgb_screen, rgb_minimap)
else:
rgb_dimensions = None
def _action_delay_fn(delays):
"""Delay frequencies per game loop delay -> fn returning game loop delay."""
if not delays:
return None
else:
total = sum(delays)
cumulative_sum = np.cumsum([delay / total for delay in delays])
def fn():
sample = random.uniform(0, 1) - EPSILON
for i, cumulative in enumerate(cumulative_sum):
if sample <= cumulative:
return i + 1
raise ValueError("Failed to sample action delay??")
return fn
return AgentInterfaceFormat(
feature_dimensions=feature_dimensions,
rgb_dimensions=rgb_dimensions,
action_space=(action_space and actions.ActionSpace[action_space.upper()]),
action_delay_fn=_action_delay_fn(action_delays),
**kwargs)
def features_from_game_info(game_info, agent_interface_format=None,
map_name=None, **kwargs):
"""Construct a Features object using data extracted from game info.
Args:
game_info: A `sc_pb.ResponseGameInfo` from the game.
agent_interface_format: an optional AgentInterfaceFormat.
map_name: an optional map name, which overrides the one in game_info.
**kwargs: Anything else is passed through to AgentInterfaceFormat. It's an
error to send any kwargs if you pass an agent_interface_format.
Returns:
A features object matching the specified parameterisation.
Raises:
ValueError: if you pass both agent_interface_format and kwargs.
ValueError: if you pass an agent_interface_format that doesn't match
game_info's resolutions.
"""
if isinstance(agent_interface_format, sc_pb.InterfaceOptions):
return Passthrough()
if not map_name:
map_name = game_info.map_name
if game_info.options.HasField("feature_layer"):
fl_opts = game_info.options.feature_layer
feature_dimensions = Dimensions(
screen=(fl_opts.resolution.x, fl_opts.resolution.y),
minimap=(fl_opts.minimap_resolution.x, fl_opts.minimap_resolution.y))
camera_width_world_units = game_info.options.feature_layer.width
else:
feature_dimensions = None
camera_width_world_units = None
if game_info.options.HasField("render"):
rgb_opts = game_info.options.render
rgb_dimensions = Dimensions(
screen=(rgb_opts.resolution.x, rgb_opts.resolution.y),
minimap=(rgb_opts.minimap_resolution.x, rgb_opts.minimap_resolution.y))
else:
rgb_dimensions = None
map_size = game_info.start_raw.map_size
requested_races = {
info.player_id: info.race_requested for info in game_info.player_info
if info.type != sc_pb.Observer}
if agent_interface_format:
if kwargs:
raise ValueError(
"Either give an agent_interface_format or kwargs, not both.")
aif = agent_interface_format
if (aif.rgb_dimensions != rgb_dimensions or
aif.feature_dimensions != feature_dimensions or
(feature_dimensions and
aif.camera_width_world_units != camera_width_world_units)):
raise ValueError("""
The supplied agent_interface_format doesn't match the resolutions computed from
the game_info:
rgb_dimensions: %s vs %s
feature_dimensions: %s vs %s
camera_width_world_units: %s vs %s
""" % (aif.rgb_dimensions, rgb_dimensions,
aif.feature_dimensions, feature_dimensions,
aif.camera_width_world_units, camera_width_world_units))
else:
agent_interface_format = AgentInterfaceFormat(
feature_dimensions=feature_dimensions,
rgb_dimensions=rgb_dimensions,
camera_width_world_units=camera_width_world_units,
**kwargs)
return Features(
agent_interface_format=agent_interface_format,
map_size=map_size,
map_name=map_name,
requested_races=requested_races)
def _init_valid_functions(action_dimensions):
"""Initialize ValidFunctions and set up the callbacks."""
sizes = {
"screen": tuple(int(i) for i in action_dimensions.screen),
"screen2": tuple(int(i) for i in action_dimensions.screen),
"minimap": tuple(int(i) for i in action_dimensions.minimap),
}
types = actions.Arguments(*[
actions.ArgumentType.spec(t.id, t.name, sizes.get(t.name, t.sizes))
for t in actions.TYPES])
functions = actions.Functions([
actions.Function.spec(f.id, f.name, tuple(types[t.id] for t in f.args))
for f in actions.FUNCTIONS])
return actions.ValidActions(types, functions)
def _init_valid_raw_functions(raw_resolution, max_selected_units):
"""Initialize ValidFunctions and set up the callbacks."""
sizes = {
"world": tuple(int(i) for i in raw_resolution),
"unit_tags": (max_selected_units,),
}
types = actions.RawArguments(*[
actions.ArgumentType.spec(t.id, t.name, sizes.get(t.name, t.sizes))
for t in actions.RAW_TYPES])
functions = actions.Functions([
actions.Function.spec(f.id, f.name, tuple(types[t.id] for t in f.args))
for f in actions.RAW_FUNCTIONS])
return actions.ValidActions(types, functions)
class Features(object):
"""Render feature layers from SC2 Observation protos into numpy arrays.
This has the implementation details of how to render a starcraft environment.
It translates between agent action/observation formats and starcraft
action/observation formats, which should not be seen by agent authors. The
starcraft protos contain more information than they should have access to.
This is outside of the environment so that it can also be used in other
contexts, eg a supervised dataset pipeline.
"""
def __init__(self, agent_interface_format=None, map_size=None,
requested_races=None, map_name="unknown"):
"""Initialize a Features instance matching the specified interface format.
Args:
agent_interface_format: See the documentation for `AgentInterfaceFormat`.
map_size: The size of the map in world units, needed for feature_units.
requested_races: Optional. Dict mapping `player_id`s to that player's
requested race. If present, will send player races in observation.
map_name: Optional name of the map, to be added to the observation.
Raises:
ValueError: if agent_interface_format isn't specified.
ValueError: if map_size isn't specified when use_feature_units or
use_camera_position is.
"""
if not agent_interface_format:
raise ValueError("Please specify agent_interface_format")
self._agent_interface_format = agent_interface_format
aif = self._agent_interface_format
if not aif.raw_resolution and map_size:
aif.raw_resolution = point.Point.build(map_size)
self._map_size = map_size
self._map_name = map_name
if (aif.use_feature_units
or aif.use_camera_position
or aif.use_raw_units):
self.init_camera(
aif.feature_dimensions,
map_size,
aif.camera_width_world_units,
aif.raw_resolution)
self._send_observation_proto = aif.send_observation_proto
self._raw = aif.use_raw_actions
if self._raw:
self._valid_functions = _init_valid_raw_functions(
aif.raw_resolution, aif.max_selected_units)
self._raw_tags = []
else:
self._valid_functions = _init_valid_functions(aif.action_dimensions)
self._requested_races = requested_races
if requested_races is not None:
assert len(requested_races) <= 2
def init_camera(
self, feature_dimensions, map_size, camera_width_world_units,
raw_resolution):
"""Initialize the camera (especially for feature_units).
This is called in the constructor and may be called repeatedly after
`Features` is constructed, since it deals with rescaling coordinates and not
changing environment/action specs.
Args:
feature_dimensions: See the documentation in `AgentInterfaceFormat`.
map_size: The size of the map in world units.
camera_width_world_units: See the documentation in `AgentInterfaceFormat`.
raw_resolution: See the documentation in `AgentInterfaceFormat`.
Raises:
ValueError: If map_size or camera_width_world_units are falsey (which
should mainly happen if called by the constructor).
"""
if not map_size or not camera_width_world_units:
raise ValueError(
"Either pass the game_info with raw enabled, or map_size and "
"camera_width_world_units in order to use feature_units or camera"
"position.")
map_size = point.Point.build(map_size)
self._world_to_world_tl = transform.Linear(point.Point(1, -1),
point.Point(0, map_size.y))
self._world_tl_to_world_camera_rel = transform.Linear(offset=-map_size / 4)
if feature_dimensions:
world_camera_rel_to_feature_screen = transform.Linear(
feature_dimensions.screen / camera_width_world_units,
feature_dimensions.screen / 2)
self._world_to_feature_screen_px = transform.Chain(
self._world_to_world_tl,
self._world_tl_to_world_camera_rel,
world_camera_rel_to_feature_screen,
transform.PixelToCoord())
# If we don't have a specified raw resolution, we do no transform.
world_tl_to_feature_minimap = transform.Linear(
scale=raw_resolution / map_size.max_dim() if raw_resolution else None)
self._world_to_minimap_px = transform.Chain(
self._world_to_world_tl,
world_tl_to_feature_minimap,
transform.PixelToCoord())
self._camera_size = (
raw_resolution / map_size.max_dim() * camera_width_world_units)
def _update_camera(self, camera_center):
"""Update the camera transform based on the new camera center."""
self._world_tl_to_world_camera_rel.offset = (
-self._world_to_world_tl.fwd_pt(camera_center) *
self._world_tl_to_world_camera_rel.scale)
def observation_spec(self):
"""The observation spec for the SC2 environment.
It's worth noting that the image-like observations are in y,x/row,column
order which is different than the actions which are in x,y order. This is
due to conflicting conventions, and to facilitate printing of the images.
Returns:
The dict of observation names to their tensor shapes. Shapes with a 0 can
vary in length, for example the number of valid actions depends on which
units you have selected.
"""
# pytype: disable=wrong-arg-types
obs_spec = named_array.NamedDict({
"action_result": (0,), # See error.proto: ActionResult.
"alerts": (0,), # See sc2api.proto: Alert.
"build_queue": (0, len(UnitLayer)),
"cargo": (0, len(UnitLayer)),
"cargo_slots_available": (1,),
"control_groups": (10, 2),
"game_loop": (1,),
"last_actions": (0,),
"map_name": (0,),
"multi_select": (0, len(UnitLayer)),
"player": (len(Player),),
"production_queue": (0, len(ProductionQueue)),
"score_cumulative": (len(ScoreCumulative),),
"score_by_category": (len(ScoreByCategory), len(ScoreCategories)),
"score_by_vital": (len(ScoreByVital), len(ScoreVitals)),
"single_select": (0, len(UnitLayer)), # Only (n, 7) for n in (0, 1).
})
# pytype: enable=wrong-arg-types
if not self._raw:
obs_spec["available_actions"] = (0,)
aif = self._agent_interface_format
if aif.feature_dimensions:
obs_spec["feature_screen"] = (len(SCREEN_FEATURES),
aif.feature_dimensions.screen.y,
aif.feature_dimensions.screen.x)
obs_spec["feature_minimap"] = (len(MINIMAP_FEATURES),
aif.feature_dimensions.minimap.y,
aif.feature_dimensions.minimap.x)
if aif.rgb_dimensions:
obs_spec["rgb_screen"] = (aif.rgb_dimensions.screen.y,
aif.rgb_dimensions.screen.x,
3)
obs_spec["rgb_minimap"] = (aif.rgb_dimensions.minimap.y,
aif.rgb_dimensions.minimap.x,
3)
if aif.use_feature_units:
obs_spec["feature_units"] = (0, len(FeatureUnit)) # pytype: disable=wrong-arg-types
obs_spec["feature_effects"] = (0, len(EffectPos))
if aif.use_raw_units:
obs_spec["raw_units"] = (0, len(FeatureUnit))
obs_spec["raw_effects"] = (0, len(EffectPos))
if aif.use_feature_units or aif.use_raw_units:
obs_spec["radar"] = (0, len(Radar))
obs_spec["upgrades"] = (0,)
if aif.use_unit_counts:
obs_spec["unit_counts"] = (0, len(UnitCounts))
if aif.use_camera_position:
obs_spec["camera_position"] = (2,)
obs_spec["camera_size"] = (2,)
if self._send_observation_proto:
obs_spec["_response_observation"] = (0,)
obs_spec["home_race_requested"] = (1,)
obs_spec["away_race_requested"] = (1,)
return obs_spec
def action_spec(self):
"""The action space pretty complicated and fills the ValidFunctions."""
return self._valid_functions
@property
def map_size(self):
return self._map_size
@property
def requested_races(self):
return self._requested_races
@sw.decorate
def transform_obs(self, obs):
"""Render some SC2 observations into something an agent can handle."""
empty_unit = np.array([], dtype=np.int32).reshape((0, len(UnitLayer)))
out = named_array.NamedDict({ # Fill out some that are sometimes empty.
"single_select": empty_unit,
"multi_select": empty_unit,
"build_queue": empty_unit,
"cargo": empty_unit,
"production_queue": np.array([], dtype=np.int32).reshape(
(0, len(ProductionQueue))),
"last_actions": np.array([], dtype=np.int32),
"cargo_slots_available": np.array([0], dtype=np.int32),
"home_race_requested": np.array([0], dtype=np.int32),
"away_race_requested": np.array([0], dtype=np.int32),
"map_name": self._map_name,
})
def or_zeros(layer, size):
if layer is not None:
return layer.astype(np.int32, copy=False)
else:
return np.zeros((size.y, size.x), dtype=np.int32)
aif = self._agent_interface_format
if aif.feature_dimensions:
with sw("feature_screen"):
out["feature_screen"] = named_array.NamedNumpyArray(
np.stack([or_zeros(f.unpack(obs.observation),
aif.feature_dimensions.screen)
for f in SCREEN_FEATURES]),
names=[ScreenFeatures, None, None])
with sw("feature_minimap"):
out["feature_minimap"] = named_array.NamedNumpyArray(
np.stack([or_zeros(f.unpack(obs.observation),
aif.feature_dimensions.minimap)
for f in MINIMAP_FEATURES]),
names=[MinimapFeatures, None, None])
if aif.rgb_dimensions:
with sw("rgb_screen"):
out["rgb_screen"] = Feature.unpack_rgb_image(
obs.observation.render_data.map).astype(np.int32)
with sw("rgb_minimap"):
out["rgb_minimap"] = Feature.unpack_rgb_image(
obs.observation.render_data.minimap).astype(np.int32)
if not self._raw:
with sw("last_actions"):
out["last_actions"] = np.array(
[self.reverse_action(a).function for a in obs.actions],
dtype=np.int32)
out["action_result"] = np.array([o.result for o in obs.action_errors],
dtype=np.int32)
out["alerts"] = np.array(obs.observation.alerts, dtype=np.int32)
out["game_loop"] = np.array([obs.observation.game_loop], dtype=np.int32)
with sw("score"):
score_details = obs.observation.score.score_details
out["score_cumulative"] = named_array.NamedNumpyArray([
obs.observation.score.score,
score_details.idle_production_time,
score_details.idle_worker_time,
score_details.total_value_units,
score_details.total_value_structures,
score_details.killed_value_units,
score_details.killed_value_structures,
score_details.collected_minerals,
score_details.collected_vespene,
score_details.collection_rate_minerals,
score_details.collection_rate_vespene,
score_details.spent_minerals,
score_details.spent_vespene,
], names=ScoreCumulative, dtype=np.int32)
def get_score_details(key, details, categories):
row = getattr(details, key.name)
return [getattr(row, category.name) for category in categories]
out["score_by_category"] = named_array.NamedNumpyArray([
get_score_details(key, score_details, ScoreCategories)
for key in ScoreByCategory
], names=[ScoreByCategory, ScoreCategories], dtype=np.int32)
out["score_by_vital"] = named_array.NamedNumpyArray([
get_score_details(key, score_details, ScoreVitals)
for key in ScoreByVital
], names=[ScoreByVital, ScoreVitals], dtype=np.int32)
player = obs.observation.player_common
out["player"] = named_array.NamedNumpyArray([
player.player_id,
player.minerals,
player.vespene,
player.food_used,
player.food_cap,
player.food_army,
player.food_workers,
player.idle_worker_count,
player.army_count,
player.warp_gate_count,
player.larva_count,
], names=Player, dtype=np.int32)
def unit_vec(u):
return np.array((
u.unit_type,
u.player_relative,
u.health,
u.shields,
u.energy,
u.transport_slots_taken,
int(u.build_progress * 100), # discretize
), dtype=np.int32)
ui = obs.observation.ui_data
with sw("ui"):
groups = np.zeros((10, 2), dtype=np.int32)
for g in ui.groups:
groups[g.control_group_index, :] = (g.leader_unit_type, g.count)
out["control_groups"] = groups
if ui.HasField("single"):
out["single_select"] = named_array.NamedNumpyArray(
[unit_vec(ui.single.unit)], [None, UnitLayer])
elif ui.HasField("multi"):
out["multi_select"] = named_array.NamedNumpyArray(
[unit_vec(u) for u in ui.multi.units], [None, UnitLayer])
elif ui.HasField("cargo"):
out["single_select"] = named_array.NamedNumpyArray(
[unit_vec(ui.cargo.unit)], [None, UnitLayer])
out["cargo"] = named_array.NamedNumpyArray(
[unit_vec(u) for u in ui.cargo.passengers], [None, UnitLayer])
out["cargo_slots_available"] = np.array([ui.cargo.slots_available],
dtype=np.int32)
elif ui.HasField("production"):
out["single_select"] = named_array.NamedNumpyArray(
[unit_vec(ui.production.unit)], [None, UnitLayer])
if ui.production.build_queue:
out["build_queue"] = named_array.NamedNumpyArray(
[unit_vec(u) for u in ui.production.build_queue],
[None, UnitLayer], dtype=np.int32)
if ui.production.production_queue:
out["production_queue"] = named_array.NamedNumpyArray(
[(item.ability_id, item.build_progress * 100)
for item in ui.production.production_queue],
[None, ProductionQueue], dtype=np.int32)
tag_types = {} # Only populate the cache if it's needed.
def get_addon_type(tag):
if not tag_types:
for u in raw.units:
tag_types[u.tag] = u.unit_type
return tag_types.get(tag, 0)
def full_unit_vec(u, pos_transform, is_raw=False):
"""Compute unit features."""
screen_pos = pos_transform.fwd_pt(
point.Point.build(u.pos))
screen_radius = pos_transform.fwd_dist(u.radius)
def raw_order(i):
if len(u.orders) > i:
# TODO(tewalds): Return a generalized func id.
return actions.RAW_ABILITY_ID_TO_FUNC_ID.get(
u.orders[i].ability_id, 0)
return 0
features = [
# Match unit_vec order
u.unit_type,
u.alliance, # Self = 1, Ally = 2, Neutral = 3, Enemy = 4
u.health,
u.shield,
u.energy,
u.cargo_space_taken,
int(u.build_progress * 100), # discretize
# Resume API order
int(u.health / u.health_max * 255) if u.health_max > 0 else 0,
int(u.shield / u.shield_max * 255) if u.shield_max > 0 else 0,
int(u.energy / u.energy_max * 255) if u.energy_max > 0 else 0,
u.display_type, # Visible = 1, Snapshot = 2, Hidden = 3
u.owner, # 1-15, 16 = neutral
screen_pos.x,
screen_pos.y,
u.facing,
screen_radius,
u.cloak, # Cloaked = 1, CloakedDetected = 2, NotCloaked = 3
u.is_selected,
u.is_blip,
u.is_powered,
u.mineral_contents,
u.vespene_contents,
# Not populated for enemies or neutral
u.cargo_space_max,
u.assigned_harvesters,
u.ideal_harvesters,
u.weapon_cooldown,
len(u.orders),
raw_order(0),
raw_order(1),
u.tag if is_raw else 0,
u.is_hallucination,
u.buff_ids[0] if len(u.buff_ids) >= 1 else 0,
u.buff_ids[1] if len(u.buff_ids) >= 2 else 0,
get_addon_type(u.add_on_tag) if u.add_on_tag else 0,
u.is_active,
u.is_on_screen,
int(u.orders[0].progress * 100) if len(u.orders) >= 1 else 0,
int(u.orders[1].progress * 100) if len(u.orders) >= 2 else 0,
raw_order(2),
raw_order(3),
0,
u.buff_duration_remain,
u.buff_duration_max,
u.attack_upgrade_level,
u.armor_upgrade_level,
u.shield_upgrade_level,
]
return features
raw = obs.observation.raw_data
if aif.use_feature_units:
with sw("feature_units"):
# Update the camera location so we can calculate world to screen pos
self._update_camera(point.Point.build(raw.player.camera))
feature_units = [full_unit_vec(u, self._world_to_feature_screen_px)
for u in raw.units if u.is_on_screen]
out["feature_units"] = named_array.NamedNumpyArray(
feature_units, [None, FeatureUnit], dtype=np.int64)
feature_effects = []
feature_screen_size = aif.feature_dimensions.screen
for effect in raw.effects:
for pos in effect.pos:
screen_pos = self._world_to_feature_screen_px.fwd_pt(
point.Point.build(pos))
if (0 <= screen_pos.x < feature_screen_size.x and
0 <= screen_pos.y < feature_screen_size.y):
feature_effects.append([
effect.effect_id,
effect.alliance,
effect.owner,
effect.radius,
screen_pos.x,
screen_pos.y,
])
out["feature_effects"] = named_array.NamedNumpyArray(
feature_effects, [None, EffectPos], dtype=np.int32)
if aif.use_raw_units:
with sw("raw_units"):
with sw("to_list"):
raw_units = [full_unit_vec(u, self._world_to_minimap_px, is_raw=True)
for u in raw.units]
with sw("to_numpy"):
out["raw_units"] = named_array.NamedNumpyArray(
raw_units, [None, FeatureUnit], dtype=np.int64)
if raw_units:
self._raw_tags = out["raw_units"][:, FeatureUnit.tag]
else:
self._raw_tags = np.array([])
raw_effects = []
for effect in raw.effects:
for pos in effect.pos:
raw_pos = self._world_to_minimap_px.fwd_pt(point.Point.build(pos))
raw_effects.append([
effect.effect_id,
effect.alliance,
effect.owner,
effect.radius,
raw_pos.x,
raw_pos.y,
])
out["raw_effects"] = named_array.NamedNumpyArray(
raw_effects, [None, EffectPos], dtype=np.int32)
out["upgrades"] = np.array(raw.player.upgrade_ids, dtype=np.int32)
def cargo_units(u, pos_transform, is_raw=False):
"""Compute unit features."""
screen_pos = pos_transform.fwd_pt(
point.Point.build(u.pos))
features = []
for v in u.passengers:
features.append([
v.unit_type,
u.alliance, # Self = 1, Ally = 2, Neutral = 3, Enemy = 4
v.health,
v.shield,
v.energy,
0, # cargo_space_taken
0, # build_progress
int(v.health / v.health_max * 255) if v.health_max > 0 else 0,
int(v.shield / v.shield_max * 255) if v.shield_max > 0 else 0,
int(v.energy / v.energy_max * 255) if v.energy_max > 0 else 0,
0, # display_type
u.owner, # 1-15, 16 = neutral
screen_pos.x,
screen_pos.y,
0, # facing
0, # screen_radius
0, # cloak
0, # is_selected
0, # is_blip
0, # is powered
0, # mineral_contents
0, # vespene_contents
0, # cargo_space_max
0, # assigned_harvesters
0, # ideal_harvesters
0, # weapon_cooldown
0, # order_length
0, # order_id_0
0, # order_id_1
v.tag if is_raw else 0,
0, # is hallucination
0, # buff_id_1
0, # buff_id_2
0, # addon_unit_type
0, # active
0, # is_on_screen
0, # order_progress_1
0, # order_progress_2
0, # order_id_2
0, # order_id_3
1, # is_in_cargo
0, # buff_duration_remain
0, # buff_duration_max
0, # attack_upgrade_level
0, # armor_upgrade_level
0, # shield_upgrade_level
])
return features
if aif.add_cargo_to_units:
with sw("add_cargo_to_units"):
if aif.use_feature_units:
with sw("feature_units"):
with sw("to_list"):
feature_cargo_units = []
for u in raw.units:
if u.is_on_screen:
feature_cargo_units += cargo_units(
u, self._world_to_feature_screen_px)
with sw("to_numpy"):
if feature_cargo_units:
all_feature_units = np.array(
feature_cargo_units, dtype=np.int64)
all_feature_units = np.concatenate(
[out["feature_units"], feature_cargo_units], axis=0)
out["feature_units"] = named_array.NamedNumpyArray(
all_feature_units, [None, FeatureUnit], dtype=np.int64)
if aif.use_raw_units:
with sw("raw_units"):
with sw("to_list"):
raw_cargo_units = []
for u in raw.units:
raw_cargo_units += cargo_units(
u, self._world_to_minimap_px, is_raw=True)
with sw("to_numpy"):
if raw_cargo_units:
raw_cargo_units = np.array(raw_cargo_units, dtype=np.int64)
all_raw_units = np.concatenate(
[out["raw_units"], raw_cargo_units], axis=0)
out["raw_units"] = named_array.NamedNumpyArray(
all_raw_units, [None, FeatureUnit], dtype=np.int64)
self._raw_tags = out["raw_units"][:, FeatureUnit.tag]
if aif.use_unit_counts:
with sw("unit_counts"):
unit_counts = collections.defaultdict(int)
for u in raw.units:
if u.alliance == sc_raw.Self:
unit_counts[u.unit_type] += 1
out["unit_counts"] = named_array.NamedNumpyArray(
sorted(unit_counts.items()), [None, UnitCounts], dtype=np.int32)
if aif.use_camera_position:
camera_position = self._world_to_minimap_px.fwd_pt(
point.Point.build(raw.player.camera))
out["camera_position"] = np.array((camera_position.x, camera_position.y),
dtype=np.int32)
out["camera_size"] = np.array((self._camera_size.x, self._camera_size.y),
dtype=np.int32)
if not self._raw:
out["available_actions"] = np.array(
self.available_actions(obs.observation), dtype=np.int32)
if self._requested_races is not None:
out["home_race_requested"] = np.array(
(self._requested_races[player.player_id],), dtype=np.int32)
for player_id, race in self._requested_races.items():
if player_id != player.player_id:
out["away_race_requested"] = np.array((race,), dtype=np.int32)
if aif.use_feature_units or aif.use_raw_units:
def transform_radar(radar):
p = self._world_to_minimap_px.fwd_pt(point.Point.build(radar.pos))
return p.x, p.y, radar.radius
out["radar"] = named_array.NamedNumpyArray(
list(map(transform_radar, obs.observation.raw_data.radar)),
[None, Radar], dtype=np.int32)
# Send the entire proto as well (in a function, so it isn't copied).
if self._send_observation_proto:
out["_response_observation"] = lambda: obs
return out
@sw.decorate
def available_actions(self, obs):
"""Return the list of available action ids."""
available_actions = set()
hide_specific_actions = self._agent_interface_format.hide_specific_actions
for i, func in actions.FUNCTIONS_AVAILABLE.items():
if func.avail_fn(obs):
available_actions.add(i)
for a in obs.abilities:
if a.ability_id not in actions.ABILITY_IDS:
logging.warning("Unknown ability %s seen as available.", a.ability_id)
continue
found_applicable = False
for func in actions.ABILITY_IDS[a.ability_id]:
if func.function_type in actions.POINT_REQUIRED_FUNCS[a.requires_point]:
if func.general_id == 0 or not hide_specific_actions:
available_actions.add(func.id)
found_applicable = True
if func.general_id != 0: # Always offer generic actions.
for general_func in actions.ABILITY_IDS[func.general_id]:
if general_func.function_type is func.function_type:
# Only the right type. Don't want to expose the general action
# to minimap if only the screen version is available.
available_actions.add(general_func.id)
found_applicable = True
break
if not found_applicable:
raise ValueError("Failed to find applicable action for {}".format(a))
return list(available_actions)
@sw.decorate
def transform_action(self, obs, func_call, skip_available=False):
"""Transform an agent-style action to one that SC2 can consume.
Args:
obs: a `sc_pb.Observation` from the previous frame.
func_call: a `FunctionCall` to be turned into a `sc_pb.Action`.
skip_available: If True, assume the action is available. This should only
be used for testing or if you expect to make actions that weren't
valid at the last observation.
Returns:
a corresponding `sc_pb.Action`.
Raises:
ValueError: if the action doesn't pass validation.
"""
# Ignore sc_pb.Action's to make the env more flexible, eg raw actions.
if isinstance(func_call, sc_pb.Action):
return func_call
func_id = func_call.function
try:
if self._raw:
func = actions.RAW_FUNCTIONS[func_id]
else:
func = actions.FUNCTIONS[func_id]
except KeyError:
raise ValueError("Invalid function id: %s." % func_id)
# Available?
if not (skip_available or self._raw or
func_id in self.available_actions(obs)):
raise ValueError("Function %s/%s is currently not available" % (
func_id, func.name))
# Right number of args?
if len(func_call.arguments) != len(func.args):
raise ValueError(
"Wrong number of arguments for function: %s, got: %s" % (
func, func_call.arguments))
# Args are valid?
aif = self._agent_interface_format
for t, arg in zip(func.args, func_call.arguments):
if t.count:
if 1 <= len(arg) <= t.count:
continue
else:
raise ValueError(
"Wrong number of values for argument of %s, got: %s" % (
func, func_call.arguments))
if t.name in ("screen", "screen2"):
sizes = aif.action_dimensions.screen
elif t.name == "minimap":
sizes = aif.action_dimensions.minimap
elif t.name == "world":
sizes = aif.raw_resolution
else:
sizes = t.sizes
if len(sizes) != len(arg):
raise ValueError(
"Wrong number of values for argument of %s, got: %s" % (
func, func_call.arguments))
for s, a in zip(sizes, arg):
if not np.all(0 <= a) and np.all(a < s):
raise ValueError("Argument is out of range for %s, got: %s" % (
func, func_call.arguments))
# Convert them to python types.
kwargs = {type_.name: type_.fn(a)
for type_, a in zip(func.args, func_call.arguments)}
# Call the right callback to get an SC2 action proto.
sc2_action = sc_pb.Action()
kwargs["action"] = sc2_action
if func.ability_id:
kwargs["ability_id"] = func.ability_id
if self._raw:
if "world" in kwargs:
kwargs["world"] = self._world_to_minimap_px.back_pt(kwargs["world"])
def find_original_tag(position):
if position >= len(self._raw_tags): # Assume it's a real unit tag.
return position
original_tag = self._raw_tags[position]
if original_tag == 0:
logging.warning("Tag not found: %s", original_tag)
return original_tag
if "target_unit_tag" in kwargs:
kwargs["target_unit_tag"] = find_original_tag(
kwargs["target_unit_tag"][0])
if "unit_tags" in kwargs:
kwargs["unit_tags"] = [find_original_tag(t)
for t in kwargs["unit_tags"]]
actions.RAW_FUNCTIONS[func_id].function_type(**kwargs)
else:
kwargs["action_space"] = aif.action_space
actions.FUNCTIONS[func_id].function_type(**kwargs)
return sc2_action
@sw.decorate
def reverse_action(self, action):
"""Transform an SC2-style action into an agent-style action.
This should be the inverse of `transform_action`.
Args:
action: a `sc_pb.Action` to be transformed.
Returns:
A corresponding `actions.FunctionCall`.
Raises:
ValueError: if it doesn't know how to transform this action.
"""
FUNCTIONS = actions.FUNCTIONS # pylint: disable=invalid-name
aif = self._agent_interface_format
def func_call_ability(ability_id, cmd_type, *args):
"""Get the function id for a specific ability id and action type."""
if ability_id not in actions.ABILITY_IDS:
logging.warning("Unknown ability_id: %s. This is probably dance or "
"cheer, or some unknown new or map specific ability. "
"Treating it as a no-op.", ability_id)
return FUNCTIONS.no_op()
if aif.hide_specific_actions:
general_id = next(iter(actions.ABILITY_IDS[ability_id])).general_id
if general_id:
ability_id = general_id
for func in actions.ABILITY_IDS[ability_id]:
if func.function_type is cmd_type:
return FUNCTIONS[func.id](*args)
raise ValueError("Unknown ability_id: %s, type: %s. Likely a bug." % (
ability_id, cmd_type.__name__))
if action.HasField("action_ui"):
act_ui = action.action_ui
if act_ui.HasField("multi_panel"):
return FUNCTIONS.select_unit(act_ui.multi_panel.type - 1,
act_ui.multi_panel.unit_index)
if act_ui.HasField("control_group"):
return FUNCTIONS.select_control_group(
act_ui.control_group.action - 1,
act_ui.control_group.control_group_index)
if act_ui.HasField("select_idle_worker"):
return FUNCTIONS.select_idle_worker(act_ui.select_idle_worker.type - 1)
if act_ui.HasField("select_army"):
return FUNCTIONS.select_army(act_ui.select_army.selection_add)
if act_ui.HasField("select_warp_gates"):
return FUNCTIONS.select_warp_gates(
act_ui.select_warp_gates.selection_add)
if act_ui.HasField("select_larva"):
return FUNCTIONS.select_larva()
if act_ui.HasField("cargo_panel"):
return FUNCTIONS.unload(act_ui.cargo_panel.unit_index)
if act_ui.HasField("production_panel"):
return FUNCTIONS.build_queue(act_ui.production_panel.unit_index)
if act_ui.HasField("toggle_autocast"):
return func_call_ability(act_ui.toggle_autocast.ability_id,
actions.autocast)
if (action.HasField("action_feature_layer") or
action.HasField("action_render")):
act_sp = actions.spatial(action, aif.action_space)
if act_sp.HasField("camera_move"):
coord = point.Point.build(act_sp.camera_move.center_minimap)
return FUNCTIONS.move_camera(coord)
if act_sp.HasField("unit_selection_point"):
select_point = act_sp.unit_selection_point
coord = point.Point.build(select_point.selection_screen_coord)
return FUNCTIONS.select_point(select_point.type - 1, coord)
if act_sp.HasField("unit_selection_rect"):
select_rect = act_sp.unit_selection_rect
# TODO(tewalds): After looking at some replays we should decide if
# this is good enough. Maybe we need to simulate multiple actions or
# merge the selection rects into a bigger one.
tl = point.Point.build(select_rect.selection_screen_coord[0].p0)
br = point.Point.build(select_rect.selection_screen_coord[0].p1)
return FUNCTIONS.select_rect(select_rect.selection_add, tl, br)
if act_sp.HasField("unit_command"):
cmd = act_sp.unit_command
queue = int(cmd.queue_command)
if cmd.HasField("target_screen_coord"):
coord = point.Point.build(cmd.target_screen_coord)
return func_call_ability(cmd.ability_id, actions.cmd_screen,
queue, coord)
elif cmd.HasField("target_minimap_coord"):
coord = point.Point.build(cmd.target_minimap_coord)
return func_call_ability(cmd.ability_id, actions.cmd_minimap,
queue, coord)
else:
return func_call_ability(cmd.ability_id, actions.cmd_quick, queue)
if action.HasField("action_raw") or action.HasField("action_render"):
raise ValueError("Unknown action:\n%s" % action)
return FUNCTIONS.no_op()
@sw.decorate
def reverse_raw_action(self, action, prev_obs):
"""Transform an SC2-style action into an agent-style action.
This should be the inverse of `transform_action`.
Args:
action: a `sc_pb.Action` to be transformed.
prev_obs: an obs to figure out tags.
Returns:
A corresponding `actions.FunctionCall`.
Raises:
ValueError: if it doesn't know how to transform this action.
"""
aif = self._agent_interface_format
raw_tags = prev_obs["raw_units"][:, FeatureUnit.tag]
def find_tag_position(original_tag):
for i, tag in enumerate(raw_tags):
if tag == original_tag:
return i
logging.warning("Not found tag! %s", original_tag)
return -1
def func_call_ability(ability_id, cmd_type, *args):
"""Get the function id for a specific ability id and action type."""
if ability_id not in actions.RAW_ABILITY_IDS:
logging.warning("Unknown ability_id: %s. This is probably dance or "
"cheer, or some unknown new or map specific ability. "
"Treating it as a no-op.", ability_id)
return actions.RAW_FUNCTIONS.no_op()
if aif.hide_specific_actions:
general_id = next(iter(actions.RAW_ABILITY_IDS[ability_id])).general_id
if general_id:
ability_id = general_id
for func in actions.RAW_ABILITY_IDS[ability_id]:
if func.function_type is cmd_type:
return actions.RAW_FUNCTIONS[func.id](*args)
raise ValueError("Unknown ability_id: %s, type: %s. Likely a bug." % (
ability_id, cmd_type.__name__))
if action.HasField("action_raw"):
raw_act = action.action_raw
if raw_act.HasField("unit_command"):
uc = raw_act.unit_command
ability_id = uc.ability_id
queue_command = uc.queue_command
unit_tags = (find_tag_position(t) for t in uc.unit_tags)
# Remove invalid units.
unit_tags = [t for t in unit_tags if t != -1]
if not unit_tags:
return actions.RAW_FUNCTIONS.no_op()
if uc.HasField("target_unit_tag"):
target_unit_tag = find_tag_position(uc.target_unit_tag)
if target_unit_tag == -1:
return actions.RAW_FUNCTIONS.no_op()
return func_call_ability(ability_id, actions.raw_cmd_unit,
queue_command, unit_tags, target_unit_tag)
if uc.HasField("target_world_space_pos"):
coord = point.Point.build(uc.target_world_space_pos)
coord = self._world_to_minimap_px.fwd_pt(coord)
return func_call_ability(ability_id, actions.raw_cmd_pt,
queue_command, unit_tags, coord)
else:
return func_call_ability(ability_id, actions.raw_cmd,
queue_command, unit_tags)
if raw_act.HasField("toggle_autocast"):
uc = raw_act.toggle_autocast
ability_id = uc.ability_id
unit_tags = (find_tag_position(t) for t in uc.unit_tags)
# Remove invalid units.
unit_tags = [t for t in unit_tags if t != -1]
if not unit_tags:
return actions.RAW_FUNCTIONS.no_op()
return func_call_ability(ability_id, actions.raw_autocast, unit_tags)
if raw_act.HasField("unit_command"):
raise ValueError("Unknown action:\n%s" % action)
if raw_act.HasField("camera_move"):
coord = point.Point.build(raw_act.camera_move.center_world_space)
coord = self._world_to_minimap_px.fwd_pt(coord)
return actions.RAW_FUNCTIONS.raw_move_camera(coord)
return actions.RAW_FUNCTIONS.no_op()
class Passthrough:
"""Alternative to `Features` which passes actions and observations through."""
def observation_spec(self):
return {}
def transform_obs(self, observation):
return observation
def action_spec(self):
return {}
def transform_action(self, observation, action, skip_available):
del observation
del skip_available
return action
def available_actions(self, observation):
del observation
raise NotImplementedError(
"available_actions isn't supported for passthrough")
def reverse_action(self, action):
del action
raise NotImplementedError("reverse_action isn't supported for passthrough")
| pysc2-master | pysc2/lib/features.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A stopwatch to check how much time is used by bits of code."""
import collections
import functools
import math
import os
import sys
import threading
import time
class Stat(object):
"""A set of statistics about a single value series."""
__slots__ = ("num", "min", "max", "sum", "sum_sq")
def __init__(self):
self.reset()
def reset(self):
self.num = 0
self.min = 1000000000
self.max = 0
self.sum = 0
self.sum_sq = 0
def add(self, val):
self.num += 1
if self.min > val:
self.min = val
if self.max < val:
self.max = val
self.sum += val
self.sum_sq += val**2
@property
def avg(self):
return 0 if self.num == 0 else self.sum / self.num
@property
def dev(self):
"""Standard deviation."""
if self.num == 0:
return 0
return math.sqrt(max(0, self.sum_sq / self.num - (self.sum / self.num)**2))
def merge(self, other):
self.num += other.num
self.min = min(self.min, other.min)
self.max = max(self.max, other.max)
self.sum += other.sum
self.sum_sq += other.sum_sq
@staticmethod
def build(summation, average, standard_deviation, minimum, maximum, number):
stat = Stat()
if number > 0:
stat.num = number
stat.min = minimum
stat.max = maximum
stat.sum = summation
stat.sum_sq = number * (standard_deviation**2 + average**2)
return stat
@staticmethod
def parse(s):
if s == "num=0":
return Stat()
parts = (float(p.split(":")[1]) for p in s.split(", "))
return Stat.build(*parts)
def __str__(self):
if self.num == 0:
return "num=0"
return "sum: %.4f, avg: %.4f, dev: %.4f, min: %.4f, max: %.4f, num: %d" % (
self.sum, self.avg, self.dev, self.min, self.max, self.num)
class StopWatchContext(object):
"""Time an individual call."""
__slots__ = ("_sw", "_start")
def __init__(self, stopwatch, name):
self._sw = stopwatch
self._sw.push(name)
def __enter__(self):
self._start = time.time()
def __exit__(self, unused_exception_type, unused_exc_value, unused_traceback):
self._sw.add(self._sw.pop(), time.time() - self._start)
class TracingStopWatchContext(StopWatchContext):
"""Time an individual call, but also output all the enter/exit calls."""
def __enter__(self):
super(TracingStopWatchContext, self).__enter__()
self._log(">>> %s" % self._sw.cur_stack())
def __exit__(self, *args, **kwargs):
self._log("<<< %s: %.6f secs" % (self._sw.cur_stack(),
time.time() - self._start))
super(TracingStopWatchContext, self).__exit__(*args, **kwargs)
def _log(self, s):
print(s, file=sys.stderr)
class FakeStopWatchContext(object):
"""A fake stopwatch context for when the stopwatch is too slow or unneeded."""
__slots__ = ()
def __enter__(self):
pass
def __exit__(self, unused_exception_type, unused_exc_value, unused_traceback):
pass
fake_context = FakeStopWatchContext()
class StopWatch(object):
"""A context manager that tracks call count and latency, and other stats.
Usage:
sw = stopwatch.Stopwatch()
with sw("foo"):
foo()
with sw("bar"):
bar()
@sw.decorate
def func():
pass
func()
print(sw)
"""
__slots__ = ("_times", "_local", "_factory")
def __init__(self, enabled=True, trace=False):
self._times = collections.defaultdict(Stat)
self._local = threading.local()
if trace:
self.trace()
elif enabled:
self.enable()
else:
self.disable()
def disable(self):
self._factory = lambda _: fake_context
def enable(self):
self._factory = lambda name: StopWatchContext(self, name)
def trace(self):
self._factory = lambda name: TracingStopWatchContext(self, name)
def custom(self, factory):
self._factory = factory
def __call__(self, name):
return self._factory(name)
def decorate(self, name_or_func):
"""Decorate a function/method to check its timings.
To use the function's name:
@sw.decorate
def func():
pass
To name it explicitly:
@sw.decorate("name")
def random_func_name():
pass
Args:
name_or_func: the name or the function to decorate.
Returns:
If a name is passed, returns this as a decorator, otherwise returns the
decorated function.
"""
if os.environ.get("SC2_NO_STOPWATCH"):
return name_or_func if callable(name_or_func) else lambda func: func
def decorator(name, func):
@functools.wraps(func)
def _stopwatch(*args, **kwargs):
with self(name):
return func(*args, **kwargs)
return _stopwatch
if callable(name_or_func):
return decorator(name_or_func.__name__, name_or_func)
else:
return lambda func: decorator(name_or_func, func)
def push(self, name):
try:
self._local.stack.append(name)
except AttributeError:
# Using an exception is faster than using hasattr.
self._local.stack = [name]
def pop(self):
stack = self._local.stack
ret = ".".join(stack)
stack.pop()
return ret
def cur_stack(self):
return ".".join(self._local.stack)
def clear(self):
self._times.clear()
def add(self, name, duration):
self._times[name].add(duration)
def __getitem__(self, name):
return self._times[name]
@property
def times(self):
return self._times
def merge(self, other):
for k, v in other.times.items():
self._times[k].merge(v)
@staticmethod
def parse(s):
"""Parse the output below to create a new StopWatch."""
stopwatch = StopWatch()
for line in s.splitlines():
if line.strip():
parts = line.split(None)
name = parts[0]
if name != "%": # ie not the header line
rest = (float(v) for v in parts[2:])
stopwatch.times[parts[0]].merge(Stat.build(*rest))
return stopwatch
def str(self, threshold=0.1):
"""Return a string representation of the timings."""
if not self._times:
return ""
total = sum(s.sum for k, s in self._times.items() if "." not in k)
table = [["", "% total", "sum", "avg", "dev", "min", "max", "num"]]
for k, v in sorted(self._times.items()):
percent = 100 * v.sum / (total or 1)
if percent > threshold: # ignore anything below the threshold
table.append([
k,
"%.2f%%" % percent,
"%.4f" % v.sum,
"%.4f" % v.avg,
"%.4f" % v.dev,
"%.4f" % v.min,
"%.4f" % v.max,
"%d" % v.num,
])
col_widths = [max(len(row[i]) for row in table)
for i in range(len(table[0]))]
out = ""
for row in table:
out += " " + row[0].ljust(col_widths[0]) + " "
out += " ".join(
val.rjust(width) for val, width in zip(row[1:], col_widths[1:]))
out += "\n"
return out
def __str__(self):
return self.str()
# Global stopwatch is disabled by default to not incur the performance hit if
# it's not wanted.
sw = StopWatch(enabled=False)
| pysc2-master | pysc2/lib/stopwatch.py |
#!/usr/bin/python
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compare the observations from multiple binaries."""
from pysc2.lib import features
from pysc2.lib import np_util
from pysc2.lib import proto_diff
from s2clientprotocol import common_pb2
def image_differencer(path, proto_a, proto_b):
"""proto_diff differencer for PySC2 image data."""
if path[-1] == "data" and len(path) >= 2:
image_data_path = proto_diff.ProtoPath(path[:-1])
image_data_a = image_data_path.get_field(proto_a)
if isinstance(image_data_a, common_pb2.ImageData):
image_data_b = image_data_path.get_field(proto_b)
image_a = features.Feature.unpack_layer(image_data_a)
image_b = features.Feature.unpack_layer(image_data_b)
return np_util.summarize_array_diffs(image_a, image_b)
return None
| pysc2-master | pysc2/lib/image_differencer.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic Point and Rect classes."""
import collections
import math
import random
class Point(collections.namedtuple("Point", ["x", "y"])):
"""A basic Point class."""
__slots__ = ()
@classmethod
def build(cls, obj):
"""Build a Point from an object that has properties `x` and `y`."""
return cls(obj.x, obj.y)
@classmethod
def unit_rand(cls):
"""Return a Point with x, y chosen randomly with 0 <= x < 1, 0 <= y < 1."""
return cls(random.random(), random.random())
def assign_to(self, obj):
"""Assign `x` and `y` to an object that has properties `x` and `y`."""
obj.x = self.x
obj.y = self.y
def dist(self, other):
"""Distance to some other point."""
dx = self.x - other.x
dy = self.y - other.y
return math.sqrt(dx**2 + dy**2)
def dist_sq(self, other):
"""Distance squared to some other point."""
dx = self.x - other.x
dy = self.y - other.y
return dx**2 + dy**2
def round(self):
"""Round `x` and `y` to integers."""
return Point(int(round(self.x)), int(round(self.y)))
def floor(self):
"""Round `x` and `y` down to integers."""
return Point(int(math.floor(self.x)), int(math.floor(self.y)))
def ceil(self):
"""Round `x` and `y` up to integers."""
return Point(int(math.ceil(self.x)), int(math.ceil(self.y)))
def abs(self):
"""Take the absolute value of `x` and `y`."""
return Point(abs(self.x), abs(self.y))
def len(self):
"""Length of the vector to this point."""
return math.sqrt(self.x**2 + self.y**2)
def scale(self, target_len):
"""Scale the vector to have the target length."""
return self * (target_len / self.len())
def scale_max_size(self, max_size):
"""Scale this value, keeping aspect ratio, but fitting inside `max_size`."""
return self * (max_size / self).min_dim()
def scale_min_size(self, min_size):
"""Scale this value, keeping aspect ratio, but fitting around `min_size`."""
return self * (min_size / self).max_dim()
def min_dim(self):
return min(self.x, self.y)
def max_dim(self):
return max(self.x, self.y)
def transpose(self):
"""Flip x and y."""
return Point(self.y, self.x)
def rotate_deg(self, angle):
return self.rotate_rad(math.radians(angle))
def rotate_rad(self, angle):
return Point(self.x * math.cos(angle) - self.y * math.sin(angle),
self.x * math.sin(angle) + self.y * math.cos(angle))
def rotate_rand(self, angle=180):
return self.rotate_deg(random.uniform(-angle, angle))
def contained_circle(self, pt, radius):
"""Is this point inside the circle defined by (`pt`, `radius`)?"""
return self.dist(pt) < radius
def bound(self, p1, p2=None):
"""Bound this point within the rect defined by (`p1`, `p2`)."""
r = Rect(p1, p2)
return Point(min(max(self.x, r.l), r.r), min(max(self.y, r.t), r.b))
def __str__(self):
if all(isinstance(v, int) for v in self):
return "%d,%d" % self
else:
return "%.6f,%.6f" % self
def __neg__(self):
return Point(-self.x, -self.y)
def __add__(self, pt_or_val):
if isinstance(pt_or_val, Point):
return Point(self.x + pt_or_val.x, self.y + pt_or_val.y)
else:
return Point(self.x + pt_or_val, self.y + pt_or_val)
def __sub__(self, pt_or_val):
if isinstance(pt_or_val, Point):
return Point(self.x - pt_or_val.x, self.y - pt_or_val.y)
else:
return Point(self.x - pt_or_val, self.y - pt_or_val)
def __mul__(self, pt_or_val):
if isinstance(pt_or_val, Point):
return Point(self.x * pt_or_val.x, self.y * pt_or_val.y)
else:
return Point(self.x * pt_or_val, self.y * pt_or_val)
def __truediv__(self, pt_or_val):
if isinstance(pt_or_val, Point):
return Point(self.x / pt_or_val.x, self.y / pt_or_val.y)
else:
return Point(self.x / pt_or_val, self.y / pt_or_val)
def __floordiv__(self, pt_or_val):
if isinstance(pt_or_val, Point):
return Point(int(self.x // pt_or_val.x), int(self.y // pt_or_val.y))
else:
return Point(int(self.x // pt_or_val), int(self.y // pt_or_val))
__div__ = __truediv__
origin = Point(0, 0)
class Rect(collections.namedtuple("Rect", ["t", "l", "b", "r"])):
"""A basic Rect class. Assumes tl <= br."""
__slots__ = ()
def __new__(cls, *args):
if len(args) == 1 or (len(args) == 2 and args[1] is None):
args = (origin, args[0])
if len(args) == 2:
p1, p2 = args
if not isinstance(p1, Point) or not isinstance(p2, Point):
raise TypeError("Rect expected Points")
return super(Rect, cls).__new__(
cls,
min(p1.y, p2.y),
min(p1.x, p2.x),
max(p1.y, p2.y),
max(p1.x, p2.x))
if len(args) == 4:
if args[0] > args[2] or args[1] > args[3]:
raise TypeError("Rect requires: t <= b and l <= r")
# TODO(b/117657518): Remove the disable once the pytype bug is fixed.
return super(Rect, cls).__new__(cls, *args) # pytype: disable=missing-parameter
raise TypeError(
"Unexpected arguments to Rect. Takes 1 or 2 Points, or 4 coords.")
def __str__(self):
if all(isinstance(v, int) for v in self):
return "%d,%d,%d,%d" % self
else:
return "%.6f,%.6f,%.6f,%.6f" % self
@property
def center(self):
return Point(self.l + self.r, self.t + self.b) / 2
@property
def top(self):
return self.t
@property
def left(self):
return self.l
@property
def bottom(self):
return self.b
@property
def right(self):
return self.r
@property
def width(self):
return self.r - self.l
@property
def height(self):
return self.b - self.t
@property
def tl(self):
return Point(self.l, self.t)
@property
def br(self):
return Point(self.r, self.b)
@property
def tr(self):
return Point(self.r, self.t)
@property
def bl(self):
return Point(self.l, self.b)
@property
def diagonal(self):
return Point(self.width, self.height)
@property
def size(self):
return self.br - self.tl
@property
def area(self):
size = self.size
return size.x * size.y
def round(self):
return Rect(self.tl.round(), self.br.round())
def floor(self):
return Rect(self.tl.floor(), self.br.floor())
def ceil(self):
return Rect(self.tl.ceil(), self.br.ceil())
def contains_point(self, pt):
"""Is the point inside this rect?"""
return (self.l < pt.x and self.r > pt.x and
self.t < pt.y and self.b > pt.y)
def contains_circle(self, pt, radius):
"""Is the circle completely inside this rect?"""
return (self.l < pt.x - radius and self.r > pt.x + radius and
self.t < pt.y - radius and self.b > pt.y + radius)
def intersects_circle(self, pt, radius):
"""Does the circle intersect with this rect?"""
# How this works: http://stackoverflow.com/a/402010
rect_corner = self.size / 2 # relative to the rect center
circle_center = (pt - self.center).abs() # relative to the rect center
# Is the circle far from the rect?
if (circle_center.x > rect_corner.x + radius or
circle_center.y > rect_corner.y + radius):
return False
# Is the circle center inside the rect or near one of the edges?
if (circle_center.x <= rect_corner.x or
circle_center.y <= rect_corner.y):
return True
# Does the circle contain the corner of the rect?
return circle_center.dist_sq(rect_corner) <= radius**2
| pysc2-master | pysc2/lib/point.py |
#!/usr/bin/python
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for np_util.py."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from pysc2.lib import np_util
class NpUtilTest(parameterized.TestCase):
@parameterized.named_parameters(
("no_diff_1d", [1, 2, 3, 4], [1, 2, 3, 4], ""),
("no_diff_2d", [[1, 2], [3, 4]], [[1, 2], [3, 4]], ""),
("diff_1d", [1, 2, 3, 4], [1, 3, 2, 4],
"2 element(s) changed - [1]: 2 -> 3; [2]: 3 -> 2"),
("diff_2d", [[1, 2], [3, 4]], [[1, 3], [2, 4]],
"2 element(s) changed - [0][1]: 2 -> 3; [1][0]: 3 -> 2"))
def testSummarizeArrayDiffs(self, lhs, rhs, expected):
a = np.array(lhs)
b = np.array(rhs)
result = np_util.summarize_array_diffs(a, b)
self.assertEqual(result, expected)
if __name__ == "__main__":
absltest.main()
| pysc2-master | pysc2/lib/np_util_test.py |
#!/usr/bin/python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for portspicker.py."""
from absl.testing import absltest
from absl.testing import parameterized
from pysc2.lib import portspicker
class PortsTest(parameterized.TestCase):
@parameterized.parameters(range(1, 10))
def testNonContiguousReservation(self, num_ports):
reserved = portspicker.pick_unused_ports(num_ports)
self.assertLen(reserved, num_ports)
portspicker.return_ports(reserved)
@parameterized.parameters(range(2, 5))
def testContiguousReservation(self, num_ports):
reserved = portspicker.pick_contiguous_unused_ports(num_ports)
self.assertLen(reserved, num_ports)
portspicker.return_ports(reserved)
def testInvalidReservation(self):
with self.assertRaises(ValueError):
portspicker.pick_unused_ports(0)
def testInvalidContiguousReservation(self):
with self.assertRaises(ValueError):
portspicker.pick_contiguous_unused_ports(0)
if __name__ == "__main__":
absltest.main()
| pysc2-master | pysc2/lib/portspicker_test.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Give a crude ascii rendering of the feature_screen."""
from pysc2.lib import units
def get_printable_unit_types():
"""Generate the list of printable unit type characters."""
types = {
units.Protoss.Assimilator: "a",
units.Protoss.Probe: "p",
units.Protoss.Stalker: "s",
units.Terran.SCV: "s",
units.Terran.Marine: "m",
units.Terran.SupplyDepot: "D",
units.Terran.SupplyDepotLowered: "D",
}
substrings = {
"MineralField": "$",
"VespeneGeyser": "&",
"Collapsible": "@",
"Debris": "@",
"Destructible": "@",
"Rock": "@",
}
for name, unit_type in units.Neutral.__members__.items():
for substring, char in substrings.items():
if substring in name:
types[unit_type] = char
for race in (units.Protoss, units.Terran, units.Zerg):
for name, unit_type in race.__members__.items():
if unit_type not in types:
types[unit_type] = name[0]
return types
_printable_unit_types = get_printable_unit_types()
VISIBILITY = "#+." # Fogged, seen, visible.
PLAYER_RELATIVE = ".SANE" # self, allied, neutral, enemy.
def _summary(obs, view, width):
s = " %s: p%s; step: %s; money: %s, %s; food: %s/%s " % (
view, obs.player.player_id, obs.game_loop[0], obs.player.minerals,
obs.player.vespene, obs.player.food_used, obs.player.food_cap)
return s.center(max(len(s) + 6, width), "-")
def screen(obs):
"""Give a crude ascii rendering of feature_screen."""
unit_type = obs.feature_screen.unit_type
selected = obs.feature_screen.selected
visibility = obs.feature_screen.visibility_map
max_y, max_x = unit_type.shape
out = _summary(obs, "screen", max_y * 2) + "\n"
for y in range(max_y):
started = False
for x in range(max_x):
s = selected[y, x]
u = unit_type[y, x]
v = visibility[y, x]
if started and not s:
out += ")"
elif not started and s:
out += "("
else:
out += " "
if u:
out += _printable_unit_types.get(u, str(u))
else:
out += VISIBILITY[v]
started = s
if started:
out += ")"
out += "\n"
return out
def minimap(obs):
"""Give a crude ascii rendering of feature_minimap."""
player = obs.feature_minimap.player_relative
selected = obs.feature_minimap.selected
visibility = obs.feature_minimap.visibility_map
max_y, max_x = visibility.shape
out = _summary(obs, "minimap", max_y * 2) + "\n"
for y in range(max_y):
started = False
for x in range(max_x):
s = selected[y, x]
p = player[y, x]
v = visibility[y, x]
if started and not s:
out += ")"
elif not started and s:
out += "("
else:
out += " "
if v:
out += PLAYER_RELATIVE[p]
else:
out += VISIBILITY[v]
started = s
if started:
out += ")"
out += "\n"
return out
| pysc2-master | pysc2/lib/renderer_ascii.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Expose static data in a more useful form than the raw protos."""
class StaticData(object):
"""Expose static data in a more useful form than the raw protos."""
def __init__(self, data):
"""Takes data from RequestData."""
self._units = {u.unit_id: u.name for u in data.units}
self._unit_stats = {u.unit_id: u for u in data.units}
self._upgrades = {a.upgrade_id: a for a in data.upgrades}
self._abilities = {a.ability_id: a for a in data.abilities}
self._general_abilities = {a.remaps_to_ability_id
for a in data.abilities
if a.remaps_to_ability_id}
for a in self._abilities.values():
a.hotkey = a.hotkey.lower()
@property
def abilities(self):
return self._abilities
@property
def upgrades(self):
return self._upgrades
@property
def units(self):
return self._units
@property
def unit_stats(self):
return self._unit_stats
@property
def general_abilities(self):
return self._general_abilities
# List of used/available abilities found by parsing replays.
ABILITIES = [
1, 4, 6, 7, 16, 17, 18, 19, 23, 26, 28, 30, 32, 36, 38, 42, 44, 46, 74, 76,
78, 80, 110, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162,
164, 166, 167, 169, 171, 173, 174, 181, 195, 199, 203, 207, 211, 212, 216,
217, 247, 249, 250, 251, 253, 255, 261, 263, 265, 295, 296, 298, 299, 304,
305, 306, 307, 308, 309, 312, 313, 314, 315, 316, 318, 319, 320, 321, 322,
323, 324, 326, 327, 328, 329, 331, 333, 348, 380, 382, 383, 386, 388, 390,
392, 393, 394, 396, 397, 399, 401, 403, 405, 407, 408, 410, 413, 415, 416,
417, 419, 421, 422, 451, 452, 454, 455, 484, 485, 487, 488, 517, 518, 520,
522, 524, 554, 556, 558, 560, 561, 562, 563, 591, 594, 595, 596, 597, 614,
620, 621, 622, 623, 624, 626, 650, 651, 652, 653, 654, 655, 656, 657, 658,
710, 730, 731, 732, 761, 764, 766, 768, 769, 790, 793, 799, 803, 804, 805,
820, 822, 855, 856, 857, 861, 862, 863, 864, 865, 866, 880, 881, 882, 883,
884, 885, 886, 887, 889, 890, 891, 892, 893, 894, 895, 911, 913, 914, 916,
917, 919, 920, 921, 922, 946, 948, 950, 954, 955, 976, 977, 978, 979, 994,
1006, 1036, 1038, 1039, 1042, 1062, 1063, 1064, 1065, 1066, 1067, 1068,
1069, 1070, 1093, 1094, 1097, 1126, 1152, 1154, 1155, 1156, 1157, 1158,
1159, 1160, 1161, 1162, 1163, 1165, 1166, 1167, 1183, 1184, 1186, 1187,
1188, 1189, 1190, 1191, 1192, 1193, 1194, 1216, 1217, 1218, 1219, 1220,
1221, 1223, 1225, 1252, 1253, 1282, 1283, 1312, 1313, 1314, 1315, 1316,
1317, 1342, 1343, 1344, 1345, 1346, 1348, 1351, 1352, 1353, 1354, 1356,
1372, 1373, 1374, 1376, 1378, 1380, 1382, 1384, 1386, 1388, 1390, 1392,
1394, 1396, 1406, 1408, 1409, 1413, 1414, 1416, 1417, 1418, 1419, 1433,
1435, 1437, 1438, 1440, 1442, 1444, 1446, 1448, 1449, 1450, 1451, 1454,
1455, 1482, 1512, 1514, 1516, 1517, 1518, 1520, 1522, 1524, 1526, 1528,
1530, 1532, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1592, 1593, 1594,
1622, 1623, 1628, 1632, 1664, 1682, 1683, 1684, 1691, 1692, 1693, 1694,
1725, 1727, 1729, 1730, 1731, 1732, 1733, 1763, 1764, 1766, 1768, 1819,
1825, 1831, 1832, 1833, 1834, 1847, 1848, 1853, 1974, 1978, 1998, 2014,
2016, 2048, 2057, 2063, 2067, 2073, 2081, 2082, 2095, 2097, 2099, 2108,
2110, 2112, 2113, 2114, 2116, 2146, 2162, 2244, 2324, 2328, 2330, 2331,
2332, 2333, 2338, 2340, 2342, 2346, 2350, 2354, 2358, 2362, 2364, 2365,
2368, 2370, 2371, 2373, 2375, 2376, 2387, 2389, 2391, 2393, 2505, 2535,
2542, 2544, 2550, 2552, 2558, 2560, 2588, 2594, 2596, 2700, 2704, 2708,
2709, 2714, 2720, 3707, 3709, 3739, 3741, 3743, 3745, 3747, 3749, 3751,
3753, 3755, 3757, 3765, 3771, 3776, 3777, 3778, 3783,
]
# List of known unit types. It is generated by parsing replays and from:
# https://github.com/Blizzard/s2client-api/blob/master/include/sc2api/sc2_typeenums.h
UNIT_TYPES = [
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
115, 116, 117, 118, 119, 120, 125, 126, 127, 128, 129, 130, 131, 132, 133,
134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 149,
150, 151, 268, 289, 311, 321, 322, 324, 330, 335, 336, 341, 342, 343, 344,
350, 364, 365, 371, 372, 373, 376, 377, 472, 473, 474, 475, 483, 484, 485,
486, 487, 488, 489, 490, 493, 494, 495, 496, 498, 499, 500, 501, 502, 503,
504, 517, 518, 559, 560, 561, 562, 563, 564, 588, 589, 590, 591, 608, 609,
610, 612, 628, 629, 630, 638, 639, 640, 641, 642, 643, 648, 649, 651, 661,
662, 663, 664, 665, 666, 687, 688, 689, 690, 691, 692, 693, 694, 732, 733,
734, 796, 797, 801, 824, 830, 877, 880, 881, 884, 885, 886, 887, 892, 893,
894, 1904, 1908, 1910, 1911, 1912, 1913, 1955, 1956, 1957, 1958, 1960, 1961,
]
# List of used buffs found by parsing replays.
BUFFS = [
5, 6, 7, 8, 11, 12, 13, 16, 17, 18, 22, 24, 25, 27, 28, 29, 30, 33, 36, 38,
49, 59, 83, 89, 99, 102, 116, 121, 122, 129, 132, 133, 134, 136, 137, 145,
271, 272, 273, 274, 275, 277, 279, 280, 281, 288, 289,
]
# List of used upgrades found by parsing replays.
UPGRADES = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 17, 19, 20, 22, 25, 30,
31, 32, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 52, 53,
54, 55, 56, 57, 58, 59, 60, 61, 62, 64, 65, 66, 68, 69, 70, 71, 72, 73, 74,
75, 76, 78, 79, 80, 81, 82, 83, 84, 86, 87, 88, 99, 101, 116, 117, 118, 122,
130, 134, 135, 136, 139, 140, 141, 144, 289, 291, 293, 296,
]
| pysc2-master | pysc2/lib/static_data.py |
#!/usr/bin/python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for replays."""
import io
import json
import mpyq
from pysc2.run_configs import lib as run_configs_lib
def get_replay_version(replay_data):
replay_io = io.BytesIO()
replay_io.write(replay_data)
replay_io.seek(0)
archive = mpyq.MPQArchive(replay_io).extract()
metadata = json.loads(archive[b"replay.gamemetadata.json"].decode("utf-8"))
return run_configs_lib.Version(
game_version=".".join(metadata["GameVersion"].split(".")[:-1]),
build_version=int(metadata["BaseBuild"][4:]),
data_version=metadata.get("DataVersion"), # Only in replays version 4.1+.
binary=None)
| pysc2-master | pysc2/lib/replay.py |
#!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for features."""
import copy
import pickle
from absl.testing import absltest
from absl.testing import parameterized
import numpy
from pysc2.lib import actions
from pysc2.lib import features
from pysc2.lib import point
from google.protobuf import text_format
from s2clientprotocol import sc2api_pb2 as sc_pb
# Heavily trimmed, so this is useful for testing actions, but not observations.
observation_text_proto = """
player_common {
player_id: 1
minerals: 0
vespene: 0
food_cap: 10
food_used: 0
food_army: 0
food_workers: 0
idle_worker_count: 0
army_count: 0
warp_gate_count: 0
larva_count: 0
}
game_loop: 20
"""
RECTANGULAR_DIMENSIONS = features.Dimensions(screen=(84, 80), minimap=(64, 67))
SQUARE_DIMENSIONS = features.Dimensions(screen=84, minimap=64)
class AvailableActionsTest(absltest.TestCase):
always_expected = {
"no_op", "move_camera", "select_point", "select_rect",
"select_control_group"
}
def setUp(self):
super(AvailableActionsTest, self).setUp()
self.obs = text_format.Parse(observation_text_proto, sc_pb.Observation())
self.hideSpecificActions(True)
def hideSpecificActions(self, hide_specific_actions): # pylint: disable=invalid-name
self.features = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
hide_specific_actions=hide_specific_actions))
def assertAvail(self, expected):
actual = self.features.available_actions(self.obs)
actual_names = {actions.FUNCTIONS[i].name for i in actual}
self.assertEqual(actual_names, set(expected) | self.always_expected)
def testAlways(self):
self.assertAvail([])
def testSelectUnit(self):
self.obs.ui_data.multi.units.add(unit_type=1)
self.assertAvail(["select_unit"])
def testSelectIdleWorkder(self):
self.obs.player_common.idle_worker_count = 1
self.assertAvail(["select_idle_worker"])
def testSelectArmy(self):
self.obs.player_common.army_count = 3
self.assertAvail(["select_army"])
def testSelectWarpGates(self):
self.obs.player_common.warp_gate_count = 1
self.assertAvail(["select_warp_gates"])
def testSelectLarva(self):
self.obs.player_common.larva_count = 2
self.assertAvail(["select_larva"])
def testQuick(self):
self.obs.abilities.add(ability_id=32)
self.assertAvail(["Effect_Salvage_quick"])
def testScreen(self):
self.obs.abilities.add(ability_id=326, requires_point=True)
self.assertAvail(["Build_SensorTower_screen"])
def testScreenMinimap(self):
self.obs.abilities.add(ability_id=17, requires_point=True)
self.assertAvail(["Patrol_screen", "Patrol_minimap"])
def testScreenAutocast(self):
self.obs.abilities.add(ability_id=386, requires_point=True)
self.assertAvail(["Effect_Heal_screen", "Effect_Heal_autocast"])
def testScreenQuick(self):
a = self.obs.abilities.add(ability_id=421)
self.hideSpecificActions(True)
a.requires_point = False
self.assertAvail(["Build_TechLab_quick"])
a.requires_point = True
self.assertAvail(["Build_TechLab_screen"])
self.hideSpecificActions(False)
a.requires_point = False
self.assertAvail(["Build_TechLab_Barracks_quick", "Build_TechLab_quick"])
a.requires_point = True
self.assertAvail(["Build_TechLab_Barracks_screen", "Build_TechLab_screen"])
def testGeneral(self):
self.obs.abilities.add(ability_id=1374)
self.hideSpecificActions(False)
self.assertAvail(["BurrowDown_quick", "BurrowDown_Baneling_quick"])
self.hideSpecificActions(True)
self.assertAvail(["BurrowDown_quick"])
def testGeneralType(self):
a = self.obs.abilities.add(ability_id=1376)
self.hideSpecificActions(False)
self.assertAvail(["BurrowUp_quick", "BurrowUp_Baneling_quick",
"BurrowUp_autocast", "BurrowUp_Baneling_autocast"])
self.hideSpecificActions(True)
self.assertAvail(["BurrowUp_quick", "BurrowUp_autocast"])
a.ability_id = 2110
self.hideSpecificActions(False)
self.assertAvail(["BurrowUp_quick", "BurrowUp_Lurker_quick"])
self.hideSpecificActions(True)
self.assertAvail(["BurrowUp_quick"])
def testMany(self):
add = [
(23, True), # Attack
(318, True), # Build_CommandCenter
(320, True), # Build_Refinery
(319, True), # Build_SupplyDepot
(316, True), # Effect_Repair_SCV
(295, True), # Harvest_Gather_SCV
(16, True), # Move
(17, True), # Patrol
(4, False), # Stop
]
for a, r in add:
self.obs.abilities.add(ability_id=a, requires_point=r)
self.hideSpecificActions(False)
self.assertAvail([
"Attack_Attack_minimap",
"Attack_Attack_screen",
"Attack_minimap",
"Attack_screen",
"Build_CommandCenter_screen",
"Build_Refinery_screen",
"Build_SupplyDepot_screen",
"Effect_Repair_screen",
"Effect_Repair_autocast",
"Effect_Repair_SCV_autocast",
"Effect_Repair_SCV_screen",
"Harvest_Gather_screen",
"Harvest_Gather_SCV_screen",
"Move_minimap",
"Move_screen",
"Move_Move_minimap",
"Move_Move_screen",
"Patrol_minimap",
"Patrol_screen",
"Patrol_Patrol_minimap",
"Patrol_Patrol_screen",
"Stop_quick",
"Stop_Stop_quick"
])
self.hideSpecificActions(True)
self.assertAvail([
"Attack_minimap",
"Attack_screen",
"Build_CommandCenter_screen",
"Build_Refinery_screen",
"Build_SupplyDepot_screen",
"Effect_Repair_screen",
"Effect_Repair_autocast",
"Harvest_Gather_screen",
"Move_minimap",
"Move_screen",
"Patrol_minimap",
"Patrol_screen",
"Stop_quick",
])
class ToPointTest(absltest.TestCase):
def testIntAsString(self):
value = features._to_point("32")
self.assertEqual(value, point.Point(32, 32))
def testIntStringTwoTuple(self):
value = features._to_point(("32", 64))
self.assertEqual(value, point.Point(32, 64))
def testNoneInputReturnsNoneOutput(self):
with self.assertRaises(AssertionError):
features._to_point(None)
def testNoneAsFirstElementOfTupleRaises(self):
with self.assertRaises(TypeError):
features._to_point((None, 32))
def testNoneAsSecondElementOfTupleRaises(self):
with self.assertRaises(TypeError):
features._to_point((32, None))
def testSingletonTupleRaises(self):
with self.assertRaises(ValueError):
features._to_point((32,))
def testThreeTupleRaises(self):
with self.assertRaises(ValueError):
features._to_point((32, 32, 32))
class DimensionsTest(absltest.TestCase):
def testScreenSizeWithoutMinimapRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=84)
def testScreenWidthWithoutHeightRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(84, 0), minimap=64)
def testScreenWidthHeightWithoutMinimapRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(84, 80))
def testMinimapWidthAndHeightWithoutScreenRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(minimap=(64, 67))
def testNoneNoneRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=None, minimap=None)
def testSingularZeroesRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=0, minimap=0)
def testTwoZeroesRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(0, 0), minimap=(0, 0))
def testThreeTupleScreenRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(1, 2, 3), minimap=32)
def testThreeTupleMinimapRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=64, minimap=(1, 2, 3))
def testNegativeScreenRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=-64, minimap=32)
def testNegativeMinimapRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=64, minimap=-32)
def testNegativeScreenTupleRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(-64, -64), minimap=32)
def testNegativeMinimapTupleRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=64, minimap=(-32, -32))
def testEquality(self):
self.assertEqual(features.Dimensions(screen=64, minimap=64),
features.Dimensions(screen=64, minimap=64))
self.assertNotEqual(features.Dimensions(screen=64, minimap=64),
features.Dimensions(screen=64, minimap=32))
self.assertNotEqual(features.Dimensions(screen=64, minimap=64), None)
class TestParseAgentInterfaceFormat(parameterized.TestCase):
def test_no_arguments_raises(self):
with self.assertRaises(ValueError):
features.parse_agent_interface_format()
@parameterized.parameters((32, None), (None, 32))
def test_invalid_feature_combinations_raise(self, screen, minimap):
with self.assertRaises(ValueError):
features.parse_agent_interface_format(
feature_screen=screen,
feature_minimap=minimap)
def test_valid_feature_specification_is_parsed(self):
agent_interface_format = features.parse_agent_interface_format(
feature_screen=32,
feature_minimap=(24, 24))
self.assertEqual(
agent_interface_format.feature_dimensions.screen,
point.Point(32, 32))
self.assertEqual(
agent_interface_format.feature_dimensions.minimap,
point.Point(24, 24))
@parameterized.parameters((32, None), (None, 32), (32, 64))
def test_invalid_minimap_combinations_raise(self, screen, minimap):
with self.assertRaises(ValueError):
features.parse_agent_interface_format(
rgb_screen=screen,
rgb_minimap=minimap)
def test_valid_minimap_specification_is_parsed(self):
agent_interface_format = features.parse_agent_interface_format(
rgb_screen=32,
rgb_minimap=(24, 24))
self.assertEqual(
agent_interface_format.rgb_dimensions.screen,
point.Point(32, 32))
self.assertEqual(
agent_interface_format.rgb_dimensions.minimap,
point.Point(24, 24))
def test_invalid_action_space_raises(self):
with self.assertRaises(KeyError):
features.parse_agent_interface_format(
feature_screen=64,
feature_minimap=64,
action_space="UNKNOWN_ACTION_SPACE")
@parameterized.parameters(actions.ActionSpace.__members__.keys())
def test_valid_action_space_is_parsed(self, action_space):
agent_interface_format = features.parse_agent_interface_format(
feature_screen=32,
feature_minimap=(24, 24),
rgb_screen=64,
rgb_minimap=(48, 48),
use_raw_units=True,
action_space=action_space)
self.assertEqual(
agent_interface_format.action_space,
actions.ActionSpace[action_space])
def test_camera_width_world_units_are_parsed(self):
agent_interface_format = features.parse_agent_interface_format(
feature_screen=32,
feature_minimap=(24, 24),
camera_width_world_units=77)
self.assertEqual(agent_interface_format.camera_width_world_units, 77)
def test_use_feature_units_is_parsed(self):
agent_interface_format = features.parse_agent_interface_format(
feature_screen=32,
feature_minimap=(24, 24),
use_feature_units=True)
self.assertEqual(agent_interface_format.use_feature_units, True)
class FeaturesTest(absltest.TestCase):
def testFunctionsIdsAreConsistent(self):
for i, f in enumerate(actions.FUNCTIONS):
self.assertEqual(i, f.id, "id doesn't match for %s" % f.id)
def testAllVersionsOfAnAbilityHaveTheSameGeneral(self):
for ability_id, funcs in actions.ABILITY_IDS.items():
self.assertLen({f.general_id for f in funcs}, 1,
"Multiple generals for %s" % ability_id)
def testValidFunctionsAreConsistent(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS))
valid_funcs = feats.action_spec()
for func_def in valid_funcs.functions:
func = actions.FUNCTIONS[func_def.id]
self.assertEqual(func_def.id, func.id)
self.assertEqual(func_def.name, func.name)
self.assertEqual(len(func_def.args), len(func.args)) # pylint: disable=g-generic-assert
def gen_random_function_call(self, action_spec, func_id):
args = [[numpy.random.randint(0, size) for size in arg.sizes] # pylint: disable=g-complex-comprehension
for arg in action_spec.functions[func_id].args]
return actions.FunctionCall(func_id, args)
def testIdsMatchIndex(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS))
action_spec = feats.action_spec()
for func_index, func_def in enumerate(action_spec.functions):
self.assertEqual(func_index, func_def.id)
for type_index, type_def in enumerate(action_spec.types):
self.assertEqual(type_index, type_def.id)
def testReversingUnknownAction(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
hide_specific_actions=False))
sc2_action = sc_pb.Action()
sc2_action.action_feature_layer.unit_command.ability_id = 6 # Cheer
func_call = feats.reverse_action(sc2_action)
self.assertEqual(func_call.function, 0) # No-op
def testSpecificActionsAreReversible(self):
"""Test that the `transform_action` and `reverse_action` are inverses."""
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
hide_specific_actions=False))
action_spec = feats.action_spec()
for func_def in action_spec.functions:
for _ in range(10):
func_call = self.gen_random_function_call(action_spec, func_def.id)
sc2_action = feats.transform_action(
None, func_call, skip_available=True)
func_call2 = feats.reverse_action(sc2_action)
sc2_action2 = feats.transform_action(
None, func_call2, skip_available=True)
if func_def.id == actions.FUNCTIONS.select_rect.id:
# Need to check this one manually since the same rect can be
# defined in multiple ways.
def rect(a):
return point.Rect(point.Point(*a[1]).floor(),
point.Point(*a[2]).floor())
self.assertEqual(func_call.function, func_call2.function)
self.assertEqual(len(func_call.arguments), len(func_call2.arguments)) # pylint: disable=g-generic-assert
self.assertEqual(func_call.arguments[0], func_call2.arguments[0])
self.assertEqual(rect(func_call.arguments),
rect(func_call2.arguments))
else:
self.assertEqual(func_call, func_call2, msg=sc2_action)
self.assertEqual(sc2_action, sc2_action2)
def testRawActionUnitTags(self):
feats = features.Features(
features.AgentInterfaceFormat(
use_raw_units=True,
action_space=actions.ActionSpace.RAW),
map_size=point.Point(100, 100))
tags = [numpy.random.randint(2**20, 2**24) for _ in range(10)]
ntags = numpy.array(tags, dtype=numpy.int64)
tag = tags[0]
ntag = numpy.array(tag, dtype=numpy.int64)
def transform(fn, *args):
func_call = actions.RAW_FUNCTIONS[fn]("now", *args)
proto = feats.transform_action(None, func_call, skip_available=True)
return proto.action_raw.unit_command
self.assertEqual(transform("Attack_pt", tag, [15, 20]).unit_tags, [tag])
self.assertEqual(transform("Attack_pt", ntag, [15, 20]).unit_tags, [tag])
self.assertEqual(transform("Attack_pt", [tag], [15, 20]).unit_tags, [tag])
self.assertEqual(transform("Attack_pt", [ntag], [15, 20]).unit_tags, [tag])
self.assertEqual(transform("Attack_pt", tags, [15, 20]).unit_tags, tags)
self.assertEqual(transform("Attack_pt", ntags, [15, 20]).unit_tags, tags)
# Weird, but needed for backwards compatibility
self.assertEqual(transform("Attack_pt", [tags], [15, 20]).unit_tags, tags)
self.assertEqual(transform("Attack_pt", [ntags], [15, 20]).unit_tags, tags)
self.assertEqual(transform("Attack_unit", tag, tag).target_unit_tag, tag)
self.assertEqual(transform("Attack_unit", tag, ntag).target_unit_tag, tag)
self.assertEqual(transform("Attack_unit", tag, [tag]).target_unit_tag, tag)
self.assertEqual(transform("Attack_unit", tag, [ntag]).target_unit_tag, tag)
def testCanPickleSpecs(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=SQUARE_DIMENSIONS))
action_spec = feats.action_spec()
observation_spec = feats.observation_spec()
self.assertEqual(action_spec, pickle.loads(pickle.dumps(action_spec)))
self.assertEqual(observation_spec,
pickle.loads(pickle.dumps(observation_spec)))
def testCanPickleFunctionCall(self):
func = actions.FUNCTIONS.select_point("select", [1, 2])
self.assertEqual(func, pickle.loads(pickle.dumps(func)))
def testCanDeepcopyNumpyFunctionCall(self):
arguments = [numpy.float32] * len(actions.Arguments._fields)
dtypes = actions.FunctionCall(
function=numpy.float32,
arguments=actions.Arguments(*arguments))
self.assertEqual(dtypes, copy.deepcopy(dtypes))
def testSizeConstructors(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=SQUARE_DIMENSIONS))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (84, 84))
self.assertEqual(spec.types.screen2.sizes, (84, 84))
self.assertEqual(spec.types.minimap.sizes, (64, 64))
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (84, 80))
self.assertEqual(spec.types.screen2.sizes, (84, 80))
self.assertEqual(spec.types.minimap.sizes, (64, 67))
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (84, 80))
self.assertEqual(spec.types.screen2.sizes, (84, 80))
self.assertEqual(spec.types.minimap.sizes, (64, 67))
# Missing one or the other of game_info and dimensions.
with self.assertRaises(ValueError):
features.Features()
# Resolution/action space mismatch.
with self.assertRaises(ValueError):
features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
action_space=actions.ActionSpace.RGB))
with self.assertRaises(ValueError):
features.Features(features.AgentInterfaceFormat(
rgb_dimensions=RECTANGULAR_DIMENSIONS,
action_space=actions.ActionSpace.FEATURES))
with self.assertRaises(ValueError):
features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
rgb_dimensions=RECTANGULAR_DIMENSIONS))
def testFlRgbActionSpec(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),
action_space=actions.ActionSpace.FEATURES))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (84, 80))
self.assertEqual(spec.types.screen2.sizes, (84, 80))
self.assertEqual(spec.types.minimap.sizes, (64, 67))
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),
action_space=actions.ActionSpace.RGB))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (128, 132))
self.assertEqual(spec.types.screen2.sizes, (128, 132))
self.assertEqual(spec.types.minimap.sizes, (74, 77))
def testFlRgbObservationSpec(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),
action_space=actions.ActionSpace.FEATURES))
obs_spec = feats.observation_spec()
self.assertEqual(obs_spec["feature_screen"], # pylint: disable=g-generic-assert
(len(features.SCREEN_FEATURES), 80, 84))
self.assertEqual(obs_spec["feature_minimap"], # pylint: disable=g-generic-assert
(len(features.MINIMAP_FEATURES), 67, 64))
self.assertEqual(obs_spec["rgb_screen"], (132, 128, 3))
self.assertEqual(obs_spec["rgb_minimap"], (77, 74, 3))
if __name__ == "__main__":
absltest.main()
| pysc2-master | pysc2/lib/features_test.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transform coordinates for rendering in various ways.
It's best to name these as `a_to_b` for example `screen_to_world`. The
`fwd` methods take a point or distance in coordinate system `a` and
convert it to a point or distance in coordinate system `b`. The `back` methods
do the reverse going from `b` to `a`.
These can then be chained as b_to_c.fwd(a_to_b.fwd(pt)) which will take
something in `a` and return something in `c`. It's better to use the Chain
transform to create `a_to_c`.
"""
import numbers
from pysc2.lib import point
class Transform(object):
"""Base class for coordinate transforms."""
def fwd_dist(self, dist):
raise NotImplementedError()
def fwd_pt(self, pt):
raise NotImplementedError()
def back_dist(self, dist):
raise NotImplementedError()
def back_pt(self, pt):
raise NotImplementedError()
class Linear(Transform):
"""A linear transform with a scale and offset."""
def __init__(self, scale=None, offset=None):
if scale is None:
self.scale = point.Point(1, 1)
elif isinstance(scale, numbers.Number):
self.scale = point.Point(scale, scale)
else:
self.scale = scale
assert self.scale.x != 0 and self.scale.y != 0
self.offset = offset or point.Point(0, 0)
def fwd_dist(self, dist):
return dist * self.scale.x
def fwd_pt(self, pt):
return pt * self.scale + self.offset
def back_dist(self, dist):
return dist / self.scale.x
def back_pt(self, pt):
return (pt - self.offset) / self.scale
def __str__(self):
return "Linear(scale=%s, offset=%s)" % (self.scale, self.offset)
class Chain(Transform):
"""Chain a set of transforms: Chain(a_to_b, b_to_c) => a_to_c."""
def __init__(self, *args):
self.transforms = args
def fwd_dist(self, dist):
for transform in self.transforms:
dist = transform.fwd_dist(dist)
return dist
def fwd_pt(self, pt):
for transform in self.transforms:
pt = transform.fwd_pt(pt)
return pt
def back_dist(self, dist):
for transform in reversed(self.transforms):
dist = transform.back_dist(dist)
return dist
def back_pt(self, pt):
for transform in reversed(self.transforms):
pt = transform.back_pt(pt)
return pt
def __str__(self):
return "Chain(%s)" % (self.transforms,)
class PixelToCoord(Transform):
"""Take a point within a pixel and use the tl, or tl to pixel center."""
def fwd_dist(self, dist):
return dist
def fwd_pt(self, pt):
return pt.floor()
def back_dist(self, dist):
return dist
def back_pt(self, pt):
return pt.floor() + 0.5
def __str__(self):
return "PixelToCoord()"
| pysc2-master | pysc2/lib/transform.py |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""portpicker for multiple ports."""
import time
import portpicker
# The set of ports returned by pick_contiguous_unused_ports and not by
# the underlying portpicker.
_contiguous_ports = set()
def pick_unused_ports(num_ports, retry_interval_secs=1, retry_attempts=5):
"""Reserves and returns a list of `num_ports` unused ports."""
if num_ports <= 0:
raise ValueError("Number of ports, must be >= 1, got: %s" % num_ports)
ports = set()
for _ in range(retry_attempts):
ports.update(
portpicker.pick_unused_port() for _ in range(num_ports - len(ports)))
ports.discard(None) # portpicker returns None on error.
if len(ports) == num_ports:
return list(ports)
# Duplicate ports can be returned, especially when insufficient ports are
# free. Wait for more ports to be freed and retry.
time.sleep(retry_interval_secs)
# Could not obtain enough ports. Release what we do have.
return_ports(ports)
raise RuntimeError("Unable to obtain %d unused ports." % num_ports)
def pick_contiguous_unused_ports(
num_ports,
retry_interval_secs=1,
retry_attempts=5):
"""Reserves and returns a list of `num_ports` contiguous unused ports."""
if num_ports <= 0:
raise ValueError("Number of ports, must be >= 1, got: %s" % num_ports)
for _ in range(retry_attempts):
start_port = portpicker.pick_unused_port()
if start_port is not None:
ports = [start_port + p for p in range(num_ports)]
if all(portpicker.is_port_free(p) for p in ports):
_contiguous_ports.update(ports[1:])
return ports
else:
portpicker.return_port(start_port)
time.sleep(retry_interval_secs)
raise RuntimeError("Unable to obtain %d contiguous unused ports." % num_ports)
def return_ports(ports):
"""Returns previously reserved ports so that may be reused."""
for port in ports:
if port in _contiguous_ports:
_contiguous_ports.discard(port)
else:
portpicker.return_port(port)
| pysc2-master | pysc2/lib/portspicker.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers take actions and generates observations in proto format."""
import copy
import functools
import socket
import sys
import time
from absl import flags
from absl import logging
from pysc2.lib import protocol
from pysc2.lib import static_data
from pysc2.lib import stopwatch
import websocket
from s2clientprotocol import debug_pb2 as sc_debug
from s2clientprotocol import sc2api_pb2 as sc_pb
flags.DEFINE_bool("sc2_log_actions", False,
("Print all the actions sent to SC2. If you want observations"
" as well, consider using `sc2_verbose_protocol`."))
flags.DEFINE_integer("sc2_timeout", 360,
"Timeout to connect and wait for rpc responses.")
FLAGS = flags.FLAGS
sw = stopwatch.sw
Status = protocol.Status # pylint: disable=invalid-name
class ConnectError(Exception):
pass
class RequestError(Exception):
def __init__(self, description, res):
super(RequestError, self).__init__(description)
self.res = res
def check_error(res, error_enum):
"""Raise if the result has an error, otherwise return the result."""
if res.HasField("error"):
enum_name = error_enum.DESCRIPTOR.full_name
error_name = error_enum.Name(res.error)
details = getattr(res, "error_details", "<none>")
raise RequestError("%s.%s: '%s'" % (enum_name, error_name, details), res)
return res
def decorate_check_error(error_enum):
"""Decorator to call `check_error` on the return value."""
def decorator(func):
@functools.wraps(func)
def _check_error(*args, **kwargs):
return check_error(func(*args, **kwargs), error_enum)
return _check_error
return decorator
def skip_status(*skipped):
"""Decorator to skip this call if we're in one of the skipped states."""
def decorator(func):
@functools.wraps(func)
def _skip_status(self, *args, **kwargs):
if self.status not in skipped:
return func(self, *args, **kwargs)
return _skip_status
return decorator
def valid_status(*valid):
"""Decorator to assert that we're in a valid state."""
def decorator(func):
@functools.wraps(func)
def _valid_status(self, *args, **kwargs):
if self.status not in valid:
raise protocol.ProtocolError(
"`%s` called while in state: %s, valid: (%s)" % (
func.__name__, self.status, ",".join(map(str, valid))))
return func(self, *args, **kwargs)
return _valid_status
return decorator
def catch_game_end(func):
"""Decorator to handle 'Game has already ended' exceptions."""
@functools.wraps(func)
def _catch_game_end(self, *args, **kwargs):
"""Decorator to handle 'Game has already ended' exceptions."""
prev_status = self.status
try:
return func(self, *args, **kwargs)
except protocol.ProtocolError as protocol_error:
if prev_status == Status.in_game and (
"Game has already ended" in str(protocol_error)):
# It's currently possible for us to receive this error even though
# our previous status was in_game. This shouldn't happen according
# to the protocol. It does happen sometimes when we don't observe on
# every step (possibly also requiring us to be playing against a
# built-in bot). To work around the issue, we catch the exception
# and so let the client code continue.
logging.warning(
"Received a 'Game has already ended' error from SC2 whilst status "
"in_game. Suppressing the exception, returning None.")
return None
else:
raise
return _catch_game_end
class RemoteController(object):
"""Implements a python interface to interact with the SC2 binary.
All of these are implemented as blocking calls, so wait for the response
before returning.
Many of these functions take a Request* object and respond with the
corresponding Response* object as returned from SC2. The simpler functions
take a value and construct the Request itself, or return something more useful
than a Response* object.
"""
def __init__(self, host, port, proc=None, timeout_seconds=None):
timeout_seconds = timeout_seconds or FLAGS.sc2_timeout
sock = self._connect(host, port, proc, timeout_seconds)
self._client = protocol.StarcraftProtocol(sock)
self._last_obs = None
self.ping()
@sw.decorate
def _connect(self, host, port, proc, timeout_seconds):
"""Connect to the websocket, retrying as needed. Returns the socket."""
if ":" in host and not host.startswith("["): # Support ipv6 addresses.
host = "[%s]" % host
url = "ws://%s:%s/sc2api" % (host, port)
was_running = False
for i in range(timeout_seconds):
is_running = proc and proc.running
was_running = was_running or is_running
if (i >= timeout_seconds // 4 or was_running) and not is_running:
logging.warning(
"SC2 isn't running, so bailing early on the websocket connection.")
break
logging.info("Connecting to: %s, attempt: %s, running: %s", url, i,
is_running)
try:
return websocket.create_connection(url, timeout=timeout_seconds)
except socket.error:
pass # SC2 hasn't started listening yet.
except websocket.WebSocketConnectionClosedException:
raise ConnectError("Connection rejected. Is something else connected?")
except websocket.WebSocketBadStatusException as err:
if err.status_code == 404:
pass # SC2 is listening, but hasn't set up the /sc2api endpoint yet.
else:
raise
time.sleep(1)
raise ConnectError("Failed to connect to the SC2 websocket. Is it up?")
def close(self):
self._client.close()
@property
def status_ended(self):
return self.status == protocol.Status.ended
@valid_status(Status.launched, Status.ended, Status.in_game, Status.in_replay)
@decorate_check_error(sc_pb.ResponseCreateGame.Error)
@sw.decorate
def create_game(self, req_create_game):
"""Create a new game. This can only be done by the host."""
return self._client.send(create_game=req_create_game)
@valid_status(Status.launched, Status.init_game)
@decorate_check_error(sc_pb.ResponseSaveMap.Error)
@sw.decorate
def save_map(self, map_path, map_data):
"""Save a map into temp dir so create game can access it in multiplayer."""
return self._client.send(save_map=sc_pb.RequestSaveMap(
map_path=map_path, map_data=map_data))
@valid_status(Status.launched, Status.init_game)
@decorate_check_error(sc_pb.ResponseJoinGame.Error)
@sw.decorate
def join_game(self, req_join_game):
"""Join a game, done by all connected clients."""
return self._client.send(join_game=req_join_game)
@valid_status(Status.ended, Status.in_game)
@decorate_check_error(sc_pb.ResponseRestartGame.Error)
@sw.decorate
def restart(self):
"""Restart the game. Only done by the host."""
return self._client.send(restart_game=sc_pb.RequestRestartGame())
@valid_status(Status.launched, Status.ended, Status.in_game, Status.in_replay)
@decorate_check_error(sc_pb.ResponseStartReplay.Error)
@sw.decorate
def start_replay(self, req_start_replay):
"""Start a replay."""
return self._client.send(start_replay=req_start_replay)
@valid_status(Status.in_game, Status.in_replay)
@sw.decorate
def game_info(self):
"""Get the basic information about the game."""
return self._client.send(game_info=sc_pb.RequestGameInfo())
@valid_status(Status.in_game, Status.in_replay)
@sw.decorate
def data_raw(self, ability_id=True, unit_type_id=True, upgrade_id=True,
buff_id=True, effect_id=True):
"""Get the raw static data for the current game. Prefer `data` instead."""
return self._client.send(data=sc_pb.RequestData(
ability_id=ability_id, unit_type_id=unit_type_id, upgrade_id=upgrade_id,
buff_id=buff_id, effect_id=effect_id))
def data(self):
"""Get the static data for the current game."""
return static_data.StaticData(self.data_raw())
@valid_status(Status.in_game, Status.in_replay, Status.ended)
@sw.decorate
def observe(self, disable_fog=False, target_game_loop=0):
"""Get a current observation."""
obs = self._client.send(observation=sc_pb.RequestObservation(
game_loop=target_game_loop,
disable_fog=disable_fog))
if obs.observation.game_loop == 2**32 - 1:
logging.info("Received stub observation.")
if not obs.player_result:
raise ValueError("Expect a player result in a stub observation")
elif self._last_obs is None:
raise RuntimeError("Received stub observation with no previous obs")
# Rather than handling empty obs through the code, regurgitate the last
# observation (+ player result, sub actions).
new_obs = copy.deepcopy(self._last_obs)
del new_obs.actions[:]
new_obs.actions.extend(obs.actions)
new_obs.player_result.extend(obs.player_result)
obs = new_obs
self._last_obs = None
else:
self._last_obs = obs
if FLAGS.sc2_log_actions and obs.actions:
sys.stderr.write(" Executed actions ".center(60, "<") + "\n")
for action in obs.actions:
sys.stderr.write(str(action))
sys.stderr.flush()
return obs
def available_maps(self):
return self._client.send(available_maps=sc_pb.RequestAvailableMaps())
@valid_status(Status.in_game, Status.in_replay)
@catch_game_end
@sw.decorate
def step(self, count=1):
"""Step the engine forward by one (or more) step."""
return self._client.send(step=sc_pb.RequestStep(count=count))
@skip_status(Status.in_replay)
@valid_status(Status.in_game)
@catch_game_end
@sw.decorate
def actions(self, req_action):
"""Send a `sc_pb.RequestAction`, which may include multiple actions."""
if FLAGS.sc2_log_actions and req_action.actions:
sys.stderr.write(" Sending actions ".center(60, ">") + "\n")
for action in req_action.actions:
sys.stderr.write(str(action))
sys.stderr.flush()
return self._client.send(action=req_action)
def act(self, action):
"""Send a single action. This is a shortcut for `actions`."""
if action and action.ListFields(): # Skip no-ops.
return self.actions(sc_pb.RequestAction(actions=[action]))
@skip_status(Status.in_game)
@valid_status(Status.in_replay)
@sw.decorate
def observer_actions(self, req_observer_action):
"""Send a `sc_pb.RequestObserverAction`."""
if FLAGS.sc2_log_actions and req_observer_action.actions:
sys.stderr.write(" Sending observer actions ".center(60, ">") + "\n")
for action in req_observer_action.actions:
sys.stderr.write(str(action))
sys.stderr.flush()
return self._client.send(obs_action=req_observer_action)
def observer_act(self, action):
"""Send a single observer action. A shortcut for `observer_actions`."""
if action and action.ListFields(): # Skip no-ops.
return self.observer_actions(
sc_pb.RequestObserverAction(actions=[action]))
def chat(self, message, channel=sc_pb.ActionChat.Broadcast):
"""Send chat message as a broadcast."""
if message:
action_chat = sc_pb.ActionChat(
channel=channel, message=message)
action = sc_pb.Action(action_chat=action_chat)
return self.act(action)
@valid_status(Status.in_game, Status.ended)
@sw.decorate
def leave(self):
"""Disconnect from a multiplayer game."""
return self._client.send(leave_game=sc_pb.RequestLeaveGame())
@valid_status(Status.in_game, Status.in_replay, Status.ended)
@sw.decorate
def save_replay(self):
"""Save a replay, returning the data."""
res = self._client.send(save_replay=sc_pb.RequestSaveReplay())
return res.data
@valid_status(Status.in_game)
@sw.decorate
def debug(self, debug_commands):
"""Run a debug command."""
if isinstance(debug_commands, sc_debug.DebugCommand):
debug_commands = [debug_commands]
return self._client.send(debug=sc_pb.RequestDebug(debug=debug_commands))
@valid_status(Status.in_game, Status.in_replay)
@sw.decorate
def query(self, query):
"""Query the game state."""
return self._client.send(query=query)
@skip_status(Status.quit)
@sw.decorate
def quit(self):
"""Shut down the SC2 process."""
try:
# Don't expect a response.
self._client.write(sc_pb.Request(quit=sc_pb.RequestQuit(), id=999999999))
except protocol.ConnectionError:
pass # It's likely already (shutting) down, so continue as if it worked.
finally:
self.close()
@sw.decorate
def ping(self):
return self._client.send(ping=sc_pb.RequestPing())
@decorate_check_error(sc_pb.ResponseReplayInfo.Error)
@sw.decorate
def replay_info(self, replay_data):
return self._client.send(replay_info=sc_pb.RequestReplayInfo(
replay_data=replay_data))
@property
def status(self):
return self._client.status
| pysc2-master | pysc2/lib/remote_controller.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A viewer for starcraft observations/replays."""
import collections
import ctypes
import enum
import functools
import itertools
import math
import os
import platform
import queue
import re
import subprocess
import threading
import time
from absl import logging
import numpy as np
import pygame
from pysc2.lib import buffs
from pysc2.lib import colors
from pysc2.lib import features
from pysc2.lib import memoize
from pysc2.lib import point
from pysc2.lib import remote_controller
from pysc2.lib import stopwatch
from pysc2.lib import transform
from pysc2.lib import video_writer
from s2clientprotocol import error_pb2 as sc_err
from s2clientprotocol import raw_pb2 as sc_raw
from s2clientprotocol import sc2api_pb2 as sc_pb
from s2clientprotocol import spatial_pb2 as sc_spatial
from s2clientprotocol import ui_pb2 as sc_ui
# Disable attribute-error because of the multiple stages of initialization for
# RendererHuman.
# pytype: disable=attribute-error
sw = stopwatch.sw
render_lock = threading.Lock() # Serialize all window/render operations.
def with_lock(lock):
"""Make sure the lock is held while in this function."""
def decorator(func):
@functools.wraps(func)
def _with_lock(*args, **kwargs):
with lock:
return func(*args, **kwargs)
return _with_lock
return decorator
def clamp(n, smallest, largest):
return max(smallest, min(n, largest))
class MouseButtons(enum.IntEnum):
# https://www.pygame.org/docs/ref/mouse.html
LEFT = 1
MIDDLE = 2
RIGHT = 3
WHEEL_UP = 4
WHEEL_DOWN = 5
class SurfType(enum.IntEnum):
"""Used to tell what a mouse click refers to."""
CHROME = 1 # ie help, feature layer titles, etc
SCREEN = 2
MINIMAP = 4
FEATURE = 8
RGB = 16
class ActionCmd(enum.Enum):
STEP = 1
RESTART = 2
QUIT = 3
class _Ability(collections.namedtuple("_Ability", [
"ability_id", "name", "footprint_radius", "requires_point", "hotkey"])):
"""Hold the specifics of available abilities."""
def __new__(cls, ability, static_data):
specific_data = static_data[ability.ability_id]
if specific_data.remaps_to_ability_id:
general_data = static_data[specific_data.remaps_to_ability_id]
else:
general_data = specific_data
return super(_Ability, cls).__new__(
cls,
ability_id=general_data.ability_id,
name=(general_data.friendly_name or general_data.button_name or
general_data.link_name),
footprint_radius=general_data.footprint_radius,
requires_point=ability.requires_point,
hotkey=specific_data.hotkey)
class _Surface(object):
"""A surface to display on screen."""
def __init__(self, surf, surf_type, surf_rect, world_to_surf, world_to_obs,
draw):
"""A surface to display on screen.
Args:
surf: The actual pygame.Surface (or subsurface).
surf_type: A SurfType, used to tell how to treat clicks in that area.
surf_rect: Rect of the surface relative to the window.
world_to_surf: Convert a world point to a pixel on the surface.
world_to_obs: Convert a world point to a pixel in the observation.
draw: A function that draws onto the surface.
"""
self.surf = surf
self.surf_type = surf_type
self.surf_rect = surf_rect
self.world_to_surf = world_to_surf
self.world_to_obs = world_to_obs
self.draw = draw
def draw_line(self, color, start_loc, end_loc, thickness=1):
"""Draw a line using world coordinates and thickness."""
pygame.draw.line(self.surf, color,
self.world_to_surf.fwd_pt(start_loc).round(),
self.world_to_surf.fwd_pt(end_loc).round(),
max(1, thickness))
def draw_arc(self, color, world_loc, world_radius, start_angle, stop_angle,
thickness=1):
"""Draw an arc using world coordinates, radius, start and stop angles."""
center = self.world_to_surf.fwd_pt(world_loc).round()
radius = max(1, int(self.world_to_surf.fwd_dist(world_radius)))
rect = pygame.Rect(center - radius, (radius * 2, radius * 2))
pygame.draw.arc(self.surf, color, rect, start_angle, stop_angle,
thickness if thickness < radius else 0)
def draw_circle(self, color, world_loc, world_radius, thickness=0):
"""Draw a circle using world coordinates and radius."""
if world_radius > 0:
center = self.world_to_surf.fwd_pt(world_loc).round()
radius = max(1, int(self.world_to_surf.fwd_dist(world_radius)))
pygame.draw.circle(self.surf, color, center, radius,
thickness if thickness < radius else 0)
def draw_rect(self, color, world_rect, thickness=0):
"""Draw a rectangle using world coordinates."""
tl = self.world_to_surf.fwd_pt(world_rect.tl).round()
br = self.world_to_surf.fwd_pt(world_rect.br).round()
rect = pygame.Rect(tl, br - tl)
pygame.draw.rect(self.surf, color, rect, thickness)
def blit_np_array(self, array):
"""Fill this surface using the contents of a numpy array."""
with sw("make_surface"):
raw_surface = pygame.surfarray.make_surface(array.transpose([1, 0, 2]))
with sw("draw"):
pygame.transform.scale(raw_surface, self.surf.get_size(), self.surf)
def write_screen(self, font, color, screen_pos, text, align="left",
valign="top"):
"""Write to the screen in font.size relative coordinates."""
pos = point.Point(*screen_pos) * point.Point(0.75, 1) * font.get_linesize()
text_surf = font.render(str(text), True, color)
rect = text_surf.get_rect()
if pos.x >= 0:
setattr(rect, align, pos.x)
else:
setattr(rect, align, self.surf.get_width() + pos.x)
if pos.y >= 0:
setattr(rect, valign, pos.y)
else:
setattr(rect, valign, self.surf.get_height() + pos.y)
self.surf.blit(text_surf, rect)
def write_world(self, font, color, world_loc, text):
text_surf = font.render(text, True, color)
rect = text_surf.get_rect()
rect.center = self.world_to_surf.fwd_pt(world_loc)
self.surf.blit(text_surf, rect)
class MousePos(collections.namedtuple("MousePos", ["world_pos", "surf"])):
"""Holds the mouse position in world coordinates and the surf it came from."""
__slots__ = ()
@property
def surf_pos(self):
return self.surf.world_to_surf.fwd_pt(self.world_pos)
@property
def obs_pos(self):
return self.surf.world_to_obs.fwd_pt(self.world_pos)
def action_spatial(self, action):
"""Given an Action, return the right spatial action."""
if self.surf.surf_type & SurfType.FEATURE:
return action.action_feature_layer
elif self.surf.surf_type & SurfType.RGB:
return action.action_render
else:
assert self.surf.surf_type & (SurfType.RGB | SurfType.FEATURE)
class PastAction(collections.namedtuple("PastAction", [
"ability", "color", "pos", "time", "deadline"])):
"""Holds a past action for drawing over time."""
@memoize.memoize
def _get_desktop_size():
"""Get the desktop size."""
if platform.system() == "Linux":
try:
xrandr_query = subprocess.check_output(["xrandr", "--query"])
sizes = re.findall(r"\bconnected primary (\d+)x(\d+)", str(xrandr_query))
if sizes[0]:
return point.Point(int(sizes[0][0]), int(sizes[0][1]))
except: # pylint: disable=bare-except
logging.error("Failed to get the resolution from xrandr.")
# Most general, but doesn't understand multiple monitors.
display_info = pygame.display.Info()
return point.Point(display_info.current_w, display_info.current_h)
def circle_mask(shape, pt, radius):
# ogrid is confusing but seems to be the best way to generate a circle mask.
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.ogrid.html
# http://stackoverflow.com/questions/8647024/how-to-apply-a-disc-shaped-mask-to-a-numpy-array
y, x = np.ogrid[-pt.y:shape.y - pt.y, -pt.x:shape.x - pt.x]
# <= is important as radius will often come in as 0 due to rounding.
return x**2 + y**2 <= radius**2
class RendererHuman(object):
"""Render starcraft obs with pygame such that it's playable by humans."""
camera_actions = { # camera moves by 3 world units.
pygame.K_LEFT: point.Point(-3, 0),
pygame.K_RIGHT: point.Point(3, 0),
pygame.K_UP: point.Point(0, 3),
pygame.K_DOWN: point.Point(0, -3),
}
cmd_group_keys = {
pygame.K_0: 0,
pygame.K_1: 1,
pygame.K_2: 2,
pygame.K_3: 3,
pygame.K_4: 4,
pygame.K_5: 5,
pygame.K_6: 6,
pygame.K_7: 7,
pygame.K_8: 8,
pygame.K_9: 9,
}
shortcuts = [
("F1", "Select idle worker"),
("F2", "Select army"),
("F3", "Select larva (zerg) or warp gates (protoss)"),
("F4", "Quit the game"),
("F5", "Restart the map"),
("F8", "Save a replay"),
("F9", "Toggle RGB rendering"),
("F10", "Toggle rendering the player_relative layer."),
("F11", "Toggle synchronous rendering"),
("F12", "Toggle raw/feature layer actions"),
("Ctrl++", "Zoom in"),
("Ctrl+-", "Zoom out"),
("PgUp/PgDn", "Increase/decrease the max game speed"),
("Ctrl+PgUp/PgDn", "Increase/decrease the step multiplier"),
("Pause", "Pause the game"),
("?", "This help screen"),
]
upgrade_colors = [
colors.black, # unused...
colors.white * 0.6,
colors.white * 0.8,
colors.white,
]
def __init__(self, fps=22.4, step_mul=1, render_sync=False,
render_feature_grid=True, video=None):
"""Create a renderer for use by humans.
Make sure to call `init` with the game info, or just use `run`.
Args:
fps: How fast should the game be run.
step_mul: How many game steps to take per observation.
render_sync: Whether to wait for the obs to render before continuing.
render_feature_grid: When RGB and feature layers are available, whether
to render the grid of feature layers.
video: A filename to write the video to. Implicitly enables render_sync.
"""
self._fps = fps
self._step_mul = step_mul
self._render_sync = render_sync or bool(video)
self._raw_actions = False
self._render_player_relative = False
self._render_rgb = None
self._render_feature_grid = render_feature_grid
self._window = None
self._window_scale = 0.75
self._obs_queue = queue.Queue()
self._render_thread = threading.Thread(target=self.render_thread,
name="Renderer")
self._render_thread.start()
self._game_times = collections.deque(maxlen=100) # Avg FPS over 100 frames. # pytype: disable=wrong-keyword-args
self._render_times = collections.deque(maxlen=100) # pytype: disable=wrong-keyword-args
self._last_time = time.time()
self._last_game_loop = 0
self._name_lengths = {}
self._video_writer = video_writer.VideoWriter(video, fps) if video else None
def close(self):
if self._obs_queue:
self._obs_queue.put(None)
self._render_thread.join()
self._obs_queue = None
self._render_thread = None
if self._video_writer:
self._video_writer.close()
self._video_writer = None
def init(self, game_info, static_data):
"""Take the game info and the static data needed to set up the game.
This must be called before render or get_actions for each game or restart.
Args:
game_info: A `sc_pb.ResponseGameInfo` object for this game.
static_data: A `StaticData` object for this game.
Raises:
ValueError: if there is nothing to render.
"""
self._game_info = game_info
self._static_data = static_data
if not game_info.HasField("start_raw"):
raise ValueError("Raw observations are required for the renderer.")
self._map_size = point.Point.build(game_info.start_raw.map_size)
self._playable = point.Rect(
point.Point.build(game_info.start_raw.playable_area.p0),
point.Point.build(game_info.start_raw.playable_area.p1))
if game_info.options.HasField("feature_layer"):
fl_opts = game_info.options.feature_layer
self._feature_screen_px = point.Point.build(fl_opts.resolution)
self._feature_minimap_px = point.Point.build(fl_opts.minimap_resolution)
self._feature_camera_width_world_units = fl_opts.width
self._render_rgb = False
if not fl_opts.crop_to_playable_area:
self._playable = point.Rect(self._map_size)
else:
self._feature_screen_px = self._feature_minimap_px = None
if game_info.options.HasField("render"):
render_opts = game_info.options.render
self._rgb_screen_px = point.Point.build(render_opts.resolution)
self._rgb_minimap_px = point.Point.build(render_opts.minimap_resolution)
self._render_rgb = True
else:
self._rgb_screen_px = self._rgb_minimap_px = None
if not self._feature_screen_px and not self._rgb_screen_px:
raise ValueError("Nothing to render.")
try:
self.init_window()
self._initialized = True
except pygame.error as e:
self._initialized = False
logging.error("-" * 60)
logging.error("Failed to initialize pygame: %s", e)
logging.error("Continuing without pygame.")
logging.error("If you're using ssh and have an X server, try ssh -X.")
logging.error("-" * 60)
self._obs = sc_pb.ResponseObservation()
self._queued_action = None
self._queued_hotkey = ""
self._select_start = None
self._alerts = {}
self._past_actions = []
self._help = False
self._last_zoom_time = 0
@with_lock(render_lock)
@sw.decorate
def init_window(self):
"""Initialize the pygame window and lay out the surfaces."""
if platform.system() == "Windows":
# Enable DPI awareness on Windows to give the correct window size.
ctypes.windll.user32.SetProcessDPIAware() # pytype: disable=module-attr
pygame.init()
if self._render_rgb and self._rgb_screen_px:
main_screen_px = self._rgb_screen_px
else:
main_screen_px = self._feature_screen_px
window_size_ratio = main_screen_px
num_feature_layers = 0
if self._render_feature_grid:
# Want a roughly square grid of feature layers, each being roughly square.
if self._game_info.options.raw:
num_feature_layers += 5
if self._feature_screen_px:
num_feature_layers += len(features.SCREEN_FEATURES)
num_feature_layers += len(features.MINIMAP_FEATURES)
if num_feature_layers > 0:
feature_cols = math.ceil(math.sqrt(num_feature_layers))
feature_rows = math.ceil(num_feature_layers / feature_cols)
features_layout = point.Point(
feature_cols, feature_rows * 1.05) # Make room for titles.
# Scale features_layout to main_screen_px height so we know its width.
features_aspect_ratio = (features_layout * main_screen_px.y /
features_layout.y)
window_size_ratio += point.Point(features_aspect_ratio.x, 0)
window_size_px = window_size_ratio.scale_max_size(
_get_desktop_size() * self._window_scale).ceil()
# Create the actual window surface. This should only be blitted to from one
# of the sub-surfaces defined below.
self._window = pygame.display.set_mode(window_size_px, 0, 32)
pygame.display.set_caption("Starcraft Viewer")
# The sub-surfaces that the various draw functions will draw to.
self._surfaces = []
def add_surface(surf_type, surf_loc, world_to_surf, world_to_obs, draw_fn):
"""Add a surface. Drawn in order and intersect in reverse order."""
sub_surf = self._window.subsurface(
pygame.Rect(surf_loc.tl, surf_loc.size))
self._surfaces.append(_Surface(
sub_surf, surf_type, surf_loc, world_to_surf, world_to_obs, draw_fn))
self._scale = window_size_px.y // 32
self._font_small = pygame.font.Font(None, int(self._scale * 0.5))
self._font_large = pygame.font.Font(None, self._scale)
def check_eq(a, b):
"""Used to run unit tests on the transforms."""
assert (a - b).len() < 0.0001, "%s != %s" % (a, b)
# World has origin at bl, world_tl has origin at tl.
self._world_to_world_tl = transform.Linear(
point.Point(1, -1), point.Point(0, self._map_size.y))
check_eq(self._world_to_world_tl.fwd_pt(point.Point(0, 0)),
point.Point(0, self._map_size.y))
check_eq(self._world_to_world_tl.fwd_pt(point.Point(5, 10)),
point.Point(5, self._map_size.y - 10))
# Move the point to be relative to the camera. This gets updated per frame.
self._world_tl_to_world_camera_rel = transform.Linear(
offset=-self._map_size / 4)
check_eq(self._world_tl_to_world_camera_rel.fwd_pt(self._map_size / 4),
point.Point(0, 0))
check_eq(
self._world_tl_to_world_camera_rel.fwd_pt(
(self._map_size / 4) + point.Point(5, 10)),
point.Point(5, 10))
if self._feature_screen_px:
# Feature layer locations in continuous space.
feature_world_per_pixel = (self._feature_screen_px /
self._feature_camera_width_world_units)
world_camera_rel_to_feature_screen = transform.Linear(
feature_world_per_pixel, self._feature_screen_px / 2)
check_eq(world_camera_rel_to_feature_screen.fwd_pt(point.Point(0, 0)),
self._feature_screen_px / 2)
check_eq(
world_camera_rel_to_feature_screen.fwd_pt(
point.Point(-0.5, -0.5) * self._feature_camera_width_world_units),
point.Point(0, 0))
self._world_to_feature_screen = transform.Chain(
self._world_to_world_tl,
self._world_tl_to_world_camera_rel,
world_camera_rel_to_feature_screen)
self._world_to_feature_screen_px = transform.Chain(
self._world_to_feature_screen,
transform.PixelToCoord())
world_tl_to_feature_minimap = transform.Linear(
self._feature_minimap_px / self._playable.diagonal.max_dim())
world_tl_to_feature_minimap.offset = world_tl_to_feature_minimap.fwd_pt(
-self._world_to_world_tl.fwd_pt(self._playable.bl))
self._world_to_feature_minimap = transform.Chain(
self._world_to_world_tl,
world_tl_to_feature_minimap)
self._world_to_feature_minimap_px = transform.Chain(
self._world_to_feature_minimap,
transform.PixelToCoord())
# These are confusing since self._playable is in world coords which is
# (bl <= tr), but stored in a Rect that is (tl <= br).
check_eq(self._world_to_feature_minimap.fwd_pt(self._playable.bl),
point.Point(0, 0))
check_eq(self._world_to_feature_minimap.fwd_pt(self._playable.tr),
self._playable.diagonal.scale_max_size(self._feature_minimap_px))
if self._rgb_screen_px:
# RGB pixel locations in continuous space.
# TODO(tewalds): Use a real 3d projection instead of orthogonal.
rgb_world_per_pixel = (self._rgb_screen_px / 24)
world_camera_rel_to_rgb_screen = transform.Linear(
rgb_world_per_pixel, self._rgb_screen_px / 2)
check_eq(world_camera_rel_to_rgb_screen.fwd_pt(point.Point(0, 0)),
self._rgb_screen_px / 2)
check_eq(
world_camera_rel_to_rgb_screen.fwd_pt(
point.Point(-0.5, -0.5) * 24),
point.Point(0, 0))
self._world_to_rgb_screen = transform.Chain(
self._world_to_world_tl,
self._world_tl_to_world_camera_rel,
world_camera_rel_to_rgb_screen)
self._world_to_rgb_screen_px = transform.Chain(
self._world_to_rgb_screen,
transform.PixelToCoord())
world_tl_to_rgb_minimap = transform.Linear(
self._rgb_minimap_px / self._map_size.max_dim())
check_eq(world_tl_to_rgb_minimap.fwd_pt(point.Point(0, 0)),
point.Point(0, 0))
check_eq(world_tl_to_rgb_minimap.fwd_pt(self._map_size),
self._map_size.scale_max_size(self._rgb_minimap_px))
self._world_to_rgb_minimap = transform.Chain(
self._world_to_world_tl,
world_tl_to_rgb_minimap)
self._world_to_rgb_minimap_px = transform.Chain(
self._world_to_rgb_minimap,
transform.PixelToCoord())
# Renderable space for the screen.
screen_size_px = main_screen_px.scale_max_size(window_size_px)
minimap_size_px = self._playable.diagonal.scale_max_size(screen_size_px / 4)
minimap_offset = point.Point(0, (screen_size_px.y - minimap_size_px.y))
if self._render_rgb:
rgb_screen_to_main_screen = transform.Linear(
screen_size_px / self._rgb_screen_px)
add_surface(SurfType.RGB | SurfType.SCREEN,
point.Rect(point.origin, screen_size_px),
transform.Chain( # surf
self._world_to_rgb_screen,
rgb_screen_to_main_screen),
self._world_to_rgb_screen_px,
self.draw_screen)
rgb_minimap_to_main_minimap = transform.Linear(
minimap_size_px / self._rgb_minimap_px)
add_surface(SurfType.RGB | SurfType.MINIMAP,
point.Rect(minimap_offset,
minimap_offset + minimap_size_px),
transform.Chain( # surf
self._world_to_rgb_minimap,
rgb_minimap_to_main_minimap),
self._world_to_rgb_minimap_px,
self.draw_mini_map)
else: # Feature layer main screen
feature_screen_to_main_screen = transform.Linear(
screen_size_px / self._feature_screen_px)
add_surface(SurfType.FEATURE | SurfType.SCREEN,
point.Rect(point.origin, screen_size_px),
transform.Chain( # surf
self._world_to_feature_screen,
feature_screen_to_main_screen),
self._world_to_feature_screen_px,
self.draw_screen)
feature_minimap_to_main_minimap = transform.Linear(
minimap_size_px.max_dim() / self._feature_minimap_px.max_dim())
add_surface(SurfType.FEATURE | SurfType.MINIMAP,
point.Rect(minimap_offset,
minimap_offset + minimap_size_px),
transform.Chain( # surf
self._world_to_feature_minimap,
feature_minimap_to_main_minimap),
self._world_to_feature_minimap_px,
self.draw_mini_map)
if self._render_feature_grid and num_feature_layers > 0:
# Add the raw and feature layers
features_loc = point.Point(screen_size_px.x, 0)
feature_pane = self._window.subsurface(
pygame.Rect(features_loc, window_size_px - features_loc))
feature_pane.fill(colors.white / 2)
feature_pane_size = point.Point(*feature_pane.get_size())
feature_grid_size = feature_pane_size / point.Point(feature_cols,
feature_rows)
feature_layer_area = point.Point(1, 1).scale_max_size(
feature_grid_size)
feature_layer_padding = feature_layer_area // 20
feature_layer_size = feature_layer_area - feature_layer_padding * 2
feature_font_size = int(feature_grid_size.y * 0.09)
feature_font = pygame.font.Font(None, feature_font_size)
feature_counter = itertools.count()
def add_layer(surf_type, world_to_surf, world_to_obs, name, fn):
"""Add a layer surface."""
i = next(feature_counter)
grid_offset = point.Point(i % feature_cols,
i // feature_cols) * feature_grid_size
text = feature_font.render(name, True, colors.white)
rect = text.get_rect()
rect.center = grid_offset + point.Point(feature_grid_size.x / 2,
feature_font_size)
feature_pane.blit(text, rect)
surf_loc = (features_loc + grid_offset + feature_layer_padding +
point.Point(0, feature_font_size))
add_surface(surf_type,
point.Rect(surf_loc, surf_loc + feature_layer_size).round(),
world_to_surf, world_to_obs, fn)
raw_world_to_obs = transform.Linear()
raw_world_to_surf = transform.Linear(feature_layer_size / self._map_size)
def add_raw_layer(from_obs, name, color):
add_layer(SurfType.FEATURE | SurfType.MINIMAP,
raw_world_to_surf, raw_world_to_obs, "raw " + name,
lambda surf: self.draw_raw_layer(surf, from_obs, name, color))
if self._game_info.options.raw:
add_raw_layer(False, "terrain_height", colors.height_map(256))
add_raw_layer(False, "pathing_grid", colors.winter(2))
add_raw_layer(False, "placement_grid", colors.winter(2))
add_raw_layer(True, "visibility", colors.VISIBILITY_PALETTE)
add_raw_layer(True, "creep", colors.CREEP_PALETTE)
def add_feature_layer(feature, surf_type, world_to_surf, world_to_obs):
add_layer(surf_type, world_to_surf, world_to_obs, feature.full_name,
lambda surf: self.draw_feature_layer(surf, feature))
if self._feature_minimap_px:
# Add the minimap feature layers
feature_minimap_to_feature_minimap_surf = transform.Linear(
feature_layer_size / self._feature_minimap_px)
world_to_feature_minimap_surf = transform.Chain(
self._world_to_feature_minimap,
feature_minimap_to_feature_minimap_surf)
for feature in features.MINIMAP_FEATURES:
add_feature_layer(feature, SurfType.FEATURE | SurfType.MINIMAP,
world_to_feature_minimap_surf,
self._world_to_feature_minimap_px)
if self._feature_screen_px:
# Add the screen feature layers
feature_screen_to_feature_screen_surf = transform.Linear(
feature_layer_size / self._feature_screen_px)
world_to_feature_screen_surf = transform.Chain(
self._world_to_feature_screen,
feature_screen_to_feature_screen_surf)
for feature in features.SCREEN_FEATURES:
add_feature_layer(feature, SurfType.FEATURE | SurfType.SCREEN,
world_to_feature_screen_surf,
self._world_to_feature_screen_px)
# Add the help screen
help_size = point.Point(
(max(len(s) for s, _ in self.shortcuts) +
max(len(s) for _, s in self.shortcuts)) * 0.4 + 4,
len(self.shortcuts) + 3) * self._scale
help_rect = point.Rect(window_size_px / 2 - help_size / 2,
window_size_px / 2 + help_size / 2)
add_surface(SurfType.CHROME, help_rect, None, None, self.draw_help)
# Arbitrarily set the initial camera to the center of the map.
self._update_camera(self._map_size / 2)
def _update_camera(self, camera_center):
"""Update the camera transform based on the new camera center."""
self._world_tl_to_world_camera_rel.offset = (
-self._world_to_world_tl.fwd_pt(camera_center) *
self._world_tl_to_world_camera_rel.scale)
if self._feature_screen_px:
camera_radius = (self._feature_screen_px / self._feature_screen_px.x *
self._feature_camera_width_world_units / 2)
center = camera_center.bound(camera_radius,
self._map_size - camera_radius)
self._camera = point.Rect(
(center - camera_radius).bound(self._map_size),
(center + camera_radius).bound(self._map_size))
def zoom(self, factor):
"""Zoom the window in/out."""
self._window_scale *= factor
if time.time() - self._last_zoom_time < 1:
# Avoid a deadlock in pygame if you zoom too quickly.
time.sleep(time.time() - self._last_zoom_time)
self.init_window()
self._last_zoom_time = time.time()
def get_mouse_pos(self, window_pos=None):
"""Return a MousePos filled with the world position and surf it hit."""
window_pos = window_pos or pygame.mouse.get_pos()
# +0.5 to center the point on the middle of the pixel.
window_pt = point.Point(*window_pos) + 0.5
for surf in reversed(self._surfaces):
if (surf.surf_type != SurfType.CHROME and
surf.surf_rect.contains_point(window_pt)):
surf_rel_pt = window_pt - surf.surf_rect.tl
world_pt = surf.world_to_surf.back_pt(surf_rel_pt)
return MousePos(world_pt, surf)
def clear_queued_action(self):
self._queued_hotkey = ""
self._queued_action = None
def save_replay(self, run_config, controller):
if controller.status in (remote_controller.Status.in_game,
remote_controller.Status.ended):
prefix, _ = os.path.splitext(
os.path.basename(self._game_info.local_map_path))
replay_path = run_config.save_replay(
controller.save_replay(), "local", prefix)
print("Wrote replay to:", replay_path)
@sw.decorate
def get_actions(self, run_config, controller):
"""Get actions from the UI, apply to controller, and return an ActionCmd."""
if not self._initialized:
return ActionCmd.STEP
for event in pygame.event.get():
ctrl = pygame.key.get_mods() & pygame.KMOD_CTRL
shift = pygame.key.get_mods() & pygame.KMOD_SHIFT
alt = pygame.key.get_mods() & pygame.KMOD_ALT
if event.type == pygame.QUIT:
return ActionCmd.QUIT
elif event.type == pygame.KEYDOWN:
if self._help:
self._help = False
elif event.key in (pygame.K_QUESTION, pygame.K_SLASH):
self._help = True
elif event.key == pygame.K_PAUSE:
pause = True
while pause:
time.sleep(0.1)
for event2 in pygame.event.get():
if event2.type == pygame.KEYDOWN:
if event2.key in (pygame.K_PAUSE, pygame.K_ESCAPE):
pause = False
elif event2.key == pygame.K_F4:
return ActionCmd.QUIT
elif event2.key == pygame.K_F5:
return ActionCmd.RESTART
elif event.key == pygame.K_F4:
return ActionCmd.QUIT
elif event.key == pygame.K_F5:
return ActionCmd.RESTART
elif event.key == pygame.K_F9: # Toggle rgb rendering.
if self._rgb_screen_px and self._feature_screen_px:
self._render_rgb = not self._render_rgb
print("Rendering", self._render_rgb and "RGB" or "Feature Layers")
self.init_window()
elif event.key == pygame.K_F11: # Toggle synchronous rendering.
self._render_sync = not self._render_sync
print("Rendering", self._render_sync and "Sync" or "Async")
elif event.key == pygame.K_F12:
self._raw_actions = not self._raw_actions
print("Action space:", self._raw_actions and "Raw" or "Spatial")
elif event.key == pygame.K_F10: # Toggle player_relative layer.
self._render_player_relative = not self._render_player_relative
elif event.key == pygame.K_F8: # Save a replay.
self.save_replay(run_config, controller)
elif event.key in (pygame.K_PLUS, pygame.K_EQUALS) and ctrl:
self.zoom(1.1) # zoom in
elif event.key in (pygame.K_MINUS, pygame.K_UNDERSCORE) and ctrl:
self.zoom(1 / 1.1) # zoom out
elif event.key in (pygame.K_PAGEUP, pygame.K_PAGEDOWN):
if ctrl:
if event.key == pygame.K_PAGEUP:
self._step_mul += 1
elif self._step_mul > 1:
self._step_mul -= 1
print("New step mul:", self._step_mul)
else:
self._fps *= 1.25 if event.key == pygame.K_PAGEUP else 1 / 1.25
print("New max game speed: %.1f" % self._fps)
elif event.key == pygame.K_F1:
if self._obs.observation.player_common.idle_worker_count > 0:
controller.act(self.select_idle_worker(ctrl, shift))
elif event.key == pygame.K_F2:
if self._obs.observation.player_common.army_count > 0:
controller.act(self.select_army(shift))
elif event.key == pygame.K_F3:
if self._obs.observation.player_common.warp_gate_count > 0:
controller.act(self.select_warp_gates(shift))
if self._obs.observation.player_common.larva_count > 0:
controller.act(self.select_larva())
elif event.key in self.cmd_group_keys:
controller.act(self.control_group(self.cmd_group_keys[event.key],
ctrl, shift, alt))
elif event.key in self.camera_actions:
if self._obs:
pt = point.Point.build(self._obs.observation.raw_data.player.camera)
pt += self.camera_actions[event.key]
controller.act(self.camera_action_raw(pt))
controller.observer_act(self.camera_action_observer_pt(pt))
elif event.key == pygame.K_ESCAPE:
controller.observer_act(self.camera_action_observer_player(
self._obs.observation.player_common.player_id))
if self._queued_action:
self.clear_queued_action()
else:
cmds = self._abilities(lambda cmd: cmd.hotkey == "escape") # Cancel
for cmd in cmds: # There could be multiple cancels.
assert not cmd.requires_point
controller.act(self.unit_action(cmd, None, shift))
else:
if not self._queued_action:
key = pygame.key.name(event.key).lower()
new_cmd = self._queued_hotkey + key
cmds = self._abilities(lambda cmd, n=new_cmd: ( # pylint: disable=g-long-lambda
cmd.hotkey != "escape" and cmd.hotkey.startswith(n)))
if cmds:
self._queued_hotkey = new_cmd
if len(cmds) == 1:
cmd = cmds[0]
if cmd.hotkey == self._queued_hotkey:
if cmd.requires_point:
self.clear_queued_action()
self._queued_action = cmd
else:
controller.act(self.unit_action(cmd, None, shift))
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_pos = self.get_mouse_pos(event.pos)
if event.button == MouseButtons.LEFT and mouse_pos:
if self._queued_action:
controller.act(self.unit_action(
self._queued_action, mouse_pos, shift))
elif mouse_pos.surf.surf_type & SurfType.MINIMAP:
controller.act(self.camera_action(mouse_pos))
controller.observer_act(self.camera_action_observer_pt(
mouse_pos.world_pos))
else:
self._select_start = mouse_pos
elif event.button == MouseButtons.RIGHT:
if self._queued_action:
self.clear_queued_action()
cmds = self._abilities(lambda cmd: cmd.name == "Smart")
if cmds:
controller.act(self.unit_action(cmds[0], mouse_pos, shift))
elif event.type == pygame.MOUSEBUTTONUP:
mouse_pos = self.get_mouse_pos(event.pos)
if event.button == MouseButtons.LEFT and self._select_start:
if (mouse_pos and mouse_pos.surf.surf_type & SurfType.SCREEN and
mouse_pos.surf.surf_type == self._select_start.surf.surf_type):
controller.act(self.select_action(
self._select_start, mouse_pos, ctrl, shift))
self._select_start = None
return ActionCmd.STEP
def camera_action(self, mouse_pos):
"""Return a `sc_pb.Action` with the camera movement filled."""
action = sc_pb.Action()
action_spatial = mouse_pos.action_spatial(action)
mouse_pos.obs_pos.round().assign_to(
action_spatial.camera_move.center_minimap)
return action
def camera_action_raw(self, world_pos):
"""Return a `sc_pb.Action` with the camera movement filled."""
action = sc_pb.Action()
world_pos.assign_to(action.action_raw.camera_move.center_world_space)
return action
def camera_action_observer_pt(self, world_pos):
"""Return a `sc_pb.ObserverAction` with the camera movement filled."""
action = sc_pb.ObserverAction()
world_pos.assign_to(action.camera_move.world_pos)
return action
def camera_action_observer_player(self, player_id):
"""Return a `sc_pb.ObserverAction` with the camera movement filled."""
action = sc_pb.ObserverAction()
action.camera_follow_player.player_id = player_id
return action
def select_action(self, pos1, pos2, ctrl, shift):
"""Return a `sc_pb.Action` with the selection filled."""
assert pos1.surf.surf_type == pos2.surf.surf_type
assert pos1.surf.world_to_obs == pos2.surf.world_to_obs
action = sc_pb.Action()
if self._raw_actions:
unit_command = action.action_raw.unit_command
unit_command.ability_id = 0 # no-op
player_id = self._obs.observation.player_common.player_id
if pos1.world_pos == pos2.world_pos: # select a point
for u, p in reversed(list(self._visible_units())):
if (pos1.world_pos.contained_circle(p, u.radius) and
u.owner == player_id):
unit_command.unit_tags.append(u.tag)
break
else:
rect = point.Rect(pos1.world_pos, pos2.world_pos)
unit_command.unit_tags.extend([
u.tag for u, p in self._visible_units()
if u.owner == player_id and rect.intersects_circle(p, u.radius)])
else:
action_spatial = pos1.action_spatial(action)
if pos1.world_pos == pos2.world_pos: # select a point
select = action_spatial.unit_selection_point
pos1.obs_pos.assign_to(select.selection_screen_coord)
mod = sc_spatial.ActionSpatialUnitSelectionPoint
if ctrl:
select.type = mod.AddAllType if shift else mod.AllType
else:
select.type = mod.Toggle if shift else mod.Select
else:
select = action_spatial.unit_selection_rect
rect = select.selection_screen_coord.add()
pos1.obs_pos.assign_to(rect.p0)
pos2.obs_pos.assign_to(rect.p1)
select.selection_add = shift
# Clear the queued action if something will be selected. An alternative
# implementation may check whether the selection changed next frame.
units = self._units_in_area(point.Rect(pos1.world_pos, pos2.world_pos))
if units:
self.clear_queued_action()
return action
def select_idle_worker(self, ctrl, shift):
"""Select an idle worker."""
action = sc_pb.Action()
mod = sc_ui.ActionSelectIdleWorker
if ctrl:
select_worker = mod.AddAll if shift else mod.All
else:
select_worker = mod.Add if shift else mod.Set
action.action_ui.select_idle_worker.type = select_worker
return action
def select_army(self, shift):
"""Select the entire army."""
action = sc_pb.Action()
action.action_ui.select_army.selection_add = shift
return action
def select_warp_gates(self, shift):
"""Select all warp gates."""
action = sc_pb.Action()
action.action_ui.select_warp_gates.selection_add = shift
return action
def select_larva(self):
"""Select all larva."""
action = sc_pb.Action()
action.action_ui.select_larva.SetInParent() # Adds the empty proto field.
return action
def control_group(self, control_group_id, ctrl, shift, alt):
"""Act on a control group, selecting, setting, etc."""
action = sc_pb.Action()
select = action.action_ui.control_group
mod = sc_ui.ActionControlGroup
if not ctrl and not shift and not alt:
select.action = mod.Recall
elif ctrl and not shift and not alt:
select.action = mod.Set
elif not ctrl and shift and not alt:
select.action = mod.Append
elif not ctrl and not shift and alt:
select.action = mod.SetAndSteal
elif not ctrl and shift and alt:
select.action = mod.AppendAndSteal
else:
return # unknown
select.control_group_index = control_group_id
return action
def unit_action(self, cmd, pos, shift):
"""Return a `sc_pb.Action` filled with the cmd and appropriate target."""
action = sc_pb.Action()
if self._raw_actions:
unit_command = action.action_raw.unit_command
unit_command.ability_id = cmd.ability_id
unit_command.queue_command = shift
player_id = self._obs.observation.player_common.player_id
unit_command.unit_tags.extend([u.tag for u, _ in self._visible_units()
if u.is_selected and u.owner == player_id])
if pos:
for u, p in reversed(list(self._visible_units())):
if pos.world_pos.contained_circle(p, u.radius):
unit_command.target_unit_tag = u.tag
break
else:
pos.world_pos.assign_to(unit_command.target_world_space_pos)
else:
if pos:
action_spatial = pos.action_spatial(action)
unit_command = action_spatial.unit_command
unit_command.ability_id = cmd.ability_id
unit_command.queue_command = shift
if pos.surf.surf_type & SurfType.SCREEN:
pos.obs_pos.assign_to(unit_command.target_screen_coord)
elif pos.surf.surf_type & SurfType.MINIMAP:
pos.obs_pos.assign_to(unit_command.target_minimap_coord)
else:
if self._feature_screen_px:
action.action_feature_layer.unit_command.ability_id = cmd.ability_id
else:
action.action_render.unit_command.ability_id = cmd.ability_id
self.clear_queued_action()
return action
def _abilities(self, fn=None):
"""Return the list of abilities filtered by `fn`."""
out = {}
for cmd in self._obs.observation.abilities:
ability = _Ability(cmd, self._static_data.abilities)
if not fn or fn(ability):
out[ability.ability_id] = ability
return list(out.values())
def _visible_units(self):
"""A generator of visible units and their positions as `Point`s, sorted."""
# Sort the units by elevation, then owned (eg refinery) above world (ie 16)
# (eg geiser), small above big, and otherwise arbitrary but stable.
for u in sorted(self._obs.observation.raw_data.units,
key=lambda u: (u.pos.z, u.owner != 16, -u.radius, u.tag)):
yield u, point.Point.build(u.pos)
def _units_in_area(self, rect):
"""Return the list of units that intersect the rect."""
player_id = self._obs.observation.player_common.player_id
return [u for u, p in self._visible_units()
if rect.intersects_circle(p, u.radius) and u.owner == player_id]
def get_unit_name(self, surf, name, radius):
"""Get a length limited unit name for drawing units."""
key = (name, radius)
if key not in self._name_lengths:
max_len = surf.world_to_surf.fwd_dist(radius * 1.6)
for i in range(len(name)):
if self._font_small.size(name[:i + 1])[0] > max_len:
self._name_lengths[key] = name[:i]
break
else:
self._name_lengths[key] = name
return self._name_lengths[key]
@sw.decorate
def draw_units(self, surf):
"""Draw the units and buildings."""
unit_dict = None # Cache the units {tag: unit_proto} for orders.
tau = 2 * math.pi
for u, p in self._visible_units():
if self._camera.intersects_circle(p, u.radius):
fraction_damage = clamp((u.health_max - u.health) / (u.health_max or 1),
0, 1)
if u.display_type == sc_raw.Placeholder:
surf.draw_circle(colors.PLAYER_ABSOLUTE_PALETTE[u.owner] // 3, p,
u.radius)
else:
surf.draw_circle(colors.PLAYER_ABSOLUTE_PALETTE[u.owner], p, u.radius)
if fraction_damage > 0:
surf.draw_circle(colors.PLAYER_ABSOLUTE_PALETTE[u.owner] // 2,
p, u.radius * fraction_damage)
surf.draw_circle(colors.black, p, u.radius, thickness=1)
if self._static_data.unit_stats[u.unit_type].movement_speed > 0:
surf.draw_arc(colors.white, p, u.radius, u.facing - 0.1,
u.facing + 0.1, thickness=1)
def draw_arc_ratio(color, world_loc, radius, start, end, thickness=1):
surf.draw_arc(color, world_loc, radius, start * tau, end * tau,
thickness)
if u.shield and u.shield_max:
draw_arc_ratio(colors.blue, p, u.radius - 0.05, 0,
u.shield / u.shield_max)
if u.energy and u.energy_max:
draw_arc_ratio(colors.purple * 0.9, p, u.radius - 0.1, 0,
u.energy / u.energy_max)
if 0 < u.build_progress < 1:
draw_arc_ratio(colors.cyan, p, u.radius - 0.15, 0, u.build_progress)
elif u.orders and 0 < u.orders[0].progress < 1:
draw_arc_ratio(colors.cyan, p, u.radius - 0.15, 0,
u.orders[0].progress)
if u.buff_duration_remain and u.buff_duration_max:
draw_arc_ratio(colors.white, p, u.radius - 0.2, 0,
u.buff_duration_remain / u.buff_duration_max)
if u.attack_upgrade_level:
draw_arc_ratio(self.upgrade_colors[u.attack_upgrade_level], p,
u.radius - 0.25, 0.18, 0.22, thickness=3)
if u.armor_upgrade_level:
draw_arc_ratio(self.upgrade_colors[u.armor_upgrade_level], p,
u.radius - 0.25, 0.23, 0.27, thickness=3)
if u.shield_upgrade_level:
draw_arc_ratio(self.upgrade_colors[u.shield_upgrade_level], p,
u.radius - 0.25, 0.28, 0.32, thickness=3)
def write_small(loc, s):
surf.write_world(self._font_small, colors.white, loc, str(s))
name = self.get_unit_name(
surf, self._static_data.units.get(u.unit_type, "<none>"), u.radius)
if name:
write_small(p, name)
if u.ideal_harvesters > 0:
write_small(p + point.Point(0, 0.5),
"%s / %s" % (u.assigned_harvesters, u.ideal_harvesters))
if u.mineral_contents > 0:
write_small(p - point.Point(0, 0.5), u.mineral_contents)
elif u.vespene_contents > 0:
write_small(p - point.Point(0, 0.5), u.vespene_contents)
elif u.display_type == sc_raw.Snapshot:
write_small(p - point.Point(0, 0.5), "snapshot")
elif u.display_type == sc_raw.Placeholder:
write_small(p - point.Point(0, 0.5), "placeholder")
elif u.is_hallucination:
write_small(p - point.Point(0, 0.5), "hallucination")
elif u.is_burrowed:
write_small(p - point.Point(0, 0.5), "burrowed")
elif u.cloak != sc_raw.NotCloaked:
write_small(p - point.Point(0, 0.5), "cloaked")
if u.is_selected:
surf.draw_circle(colors.green, p, u.radius + 0.1, 1)
# Draw the orders of selected units.
start_point = p
for o in u.orders:
target_point = None
if o.HasField("target_world_space_pos"):
target_point = point.Point.build(o.target_world_space_pos)
elif o.HasField("target_unit_tag"):
if unit_dict is None:
unit_dict = {t.tag: t
for t in self._obs.observation.raw_data.units}
target_unit = unit_dict.get(o.target_unit_tag)
if target_unit:
target_point = point.Point.build(target_unit.pos)
if target_point:
surf.draw_line(colors.cyan * 0.75, start_point, target_point)
start_point = target_point
else:
break
for rally in u.rally_targets:
surf.draw_line(colors.cyan * 0.75, p,
point.Point.build(rally.point))
@sw.decorate
def draw_effects(self, surf):
"""Draw the effects."""
for effect in self._obs.observation.raw_data.effects:
color = [
colors.effects[effect.effect_id],
colors.effects[effect.effect_id],
colors.PLAYER_ABSOLUTE_PALETTE[effect.owner],
]
name = self.get_unit_name(
surf, features.Effects(effect.effect_id).name, effect.radius)
for pos in effect.pos:
p = point.Point.build(pos)
# pygame alpha transparency doesn't work, so just draw thin circles.
for r in range(1, int(effect.radius * 3)):
surf.draw_circle(color[r % 3], p, r / 3, thickness=2)
if name:
surf.write_world(self._font_small, colors.white, p, name)
@sw.decorate
def draw_selection(self, surf):
"""Draw the selection rectange."""
select_start = self._select_start # Cache to avoid a race condition.
if select_start:
mouse_pos = self.get_mouse_pos()
if (mouse_pos and mouse_pos.surf.surf_type & SurfType.SCREEN and
mouse_pos.surf.surf_type == select_start.surf.surf_type):
rect = point.Rect(select_start.world_pos, mouse_pos.world_pos)
surf.draw_rect(colors.green, rect, 1)
@sw.decorate
def draw_build_target(self, surf):
"""Draw the build target."""
round_half = lambda v, cond: round(v - 0.5) + 0.5 if cond else round(v)
queued_action = self._queued_action
if queued_action:
radius = queued_action.footprint_radius
if radius:
pos = self.get_mouse_pos()
if pos:
pos = point.Point(round_half(pos.world_pos.x, (radius * 2) % 2),
round_half(pos.world_pos.y, (radius * 2) % 2))
surf.draw_circle(
colors.PLAYER_ABSOLUTE_PALETTE[
self._obs.observation.player_common.player_id],
pos, radius)
@sw.decorate
def draw_overlay(self, surf):
"""Draw the overlay describing resources."""
obs = self._obs.observation
player = obs.player_common
surf.write_screen(
self._font_large, colors.green, (0.2, 0.2),
"Minerals: %s, Vespene: %s, Food: %s / %s" % (
player.minerals, player.vespene, player.food_used, player.food_cap))
times, steps = zip(*self._game_times)
sec = obs.game_loop // 22.4 # http://liquipedia.net/starcraft2/Game_Speed
surf.write_screen(
self._font_large, colors.green, (-0.2, 0.2),
"Score: %s, Step: %s, %.1f/s, Time: %d:%02d" % (
obs.score.score, obs.game_loop, sum(steps) / (sum(times) or 1),
sec // 60, sec % 60),
align="right")
surf.write_screen(
self._font_large, colors.green * 0.8, (-0.2, 1.2),
"APM: %d, EPM: %d, FPS: O:%.1f, R:%.1f" % (
obs.score.score_details.current_apm,
obs.score.score_details.current_effective_apm,
len(times) / (sum(times) or 1),
len(self._render_times) / (sum(self._render_times) or 1)),
align="right")
line = 3
for alert, ts in sorted(self._alerts.items(), key=lambda item: item[1]):
if time.time() < ts + 3: # Show for 3 seconds.
surf.write_screen(self._font_large, colors.red, (20, line), alert)
line += 1
else:
del self._alerts[alert]
@sw.decorate
def draw_help(self, surf):
"""Draw the help dialog."""
if not self._help:
return
def write(loc, text):
surf.write_screen(self._font_large, colors.black, loc, text)
surf.surf.fill(colors.white * 0.8)
write((1, 1), "Shortcuts:")
max_len = max(len(s) for s, _ in self.shortcuts)
for i, (hotkey, description) in enumerate(self.shortcuts, start=2):
write((2, i), hotkey)
write((3 + max_len * 0.7, i), description)
@sw.decorate
def draw_commands(self, surf):
"""Draw the list of upgrades and available commands."""
line = itertools.count(2)
def write(loc, text, color=colors.yellow):
surf.write_screen(self._font_large, color, loc, text)
def write_line(x, *args, **kwargs):
write((x, next(line)), *args, **kwargs)
action_count = len(self._obs.observation.abilities)
if action_count > 0:
write_line(0.2, "Available Actions:", colors.green)
past_abilities = {act.ability
for act in self._past_actions if act.ability}
for cmd in sorted(self._abilities(lambda c: c.name != "Smart"),
key=lambda c: c.name):
if self._queued_action and cmd == self._queued_action:
color = colors.green
elif self._queued_hotkey and cmd.hotkey.startswith(self._queued_hotkey):
color = colors.green * 0.75
elif cmd.ability_id in past_abilities:
color = colors.red
else:
color = colors.yellow
hotkey = cmd.hotkey[0:3] # truncate "escape" -> "esc"
y = next(line)
write((1, y), hotkey, color)
write((4, y), cmd.name, color)
next(line)
upgrade_count = len(self._obs.observation.raw_data.player.upgrade_ids)
if upgrade_count > 0:
write_line(0.2, "Upgrades: %s" % upgrade_count, colors.green)
upgrades = [
self._static_data.upgrades[upgrade_id].name
for upgrade_id in self._obs.observation.raw_data.player.upgrade_ids]
for name in sorted(upgrades):
write_line(1, name)
@sw.decorate
def draw_panel(self, surf):
"""Draw the unit selection or build queue."""
left = -14 # How far from the right border
line = itertools.count(3)
def unit_name(unit_type):
return self._static_data.units.get(unit_type, "<unknown>")
def write(loc, text, color=colors.yellow):
surf.write_screen(self._font_large, color, loc, text)
def write_line(x, *args, **kwargs):
write((left + x, next(line)), *args, **kwargs)
def write_single(unit):
"""Write a description of a single selected unit."""
write_line(1, unit_name(unit.unit_type), colors.cyan)
write_line(1, "Health: %s / %s" % (unit.health, unit.max_health))
if unit.max_shields:
write_line(1, "Shields: %s / %s" % (unit.shields, unit.max_shields))
if unit.max_energy:
write_line(1, "Energy: %s / %s" % (unit.energy, unit.max_energy))
if unit.build_progress > 0:
write_line(1, "Progress: %d%%" % (unit.build_progress * 100))
if unit.transport_slots_taken > 0:
write_line(1, "Slots: %s" % unit.transport_slots_taken)
def write_multi(units):
"""Write a description of multiple selected units."""
counts = collections.defaultdict(int)
for unit in units:
counts[unit_name(unit.unit_type)] += 1
for name, count in sorted(counts.items()):
y = next(line)
write((left + 1, y), count)
write((left + 3, y), name)
ui = self._obs.observation.ui_data
if ui.groups:
write_line(0, "Control Groups:", colors.green)
for group in ui.groups:
y = next(line)
write((left + 1, y), "%s:" % group.control_group_index, colors.green)
write((left + 3, y), "%s %s" % (group.count,
unit_name(group.leader_unit_type)))
next(line)
if ui.HasField("single"):
write_line(0, "Selection:", colors.green)
write_single(ui.single.unit)
if (ui.single.attack_upgrade_level or
ui.single.armor_upgrade_level or
ui.single.shield_upgrade_level):
write_line(1, "Upgrades:")
if ui.single.attack_upgrade_level:
write_line(2, "Attack: %s" % ui.single.attack_upgrade_level)
if ui.single.armor_upgrade_level:
write_line(2, "Armor: %s" % ui.single.armor_upgrade_level)
if ui.single.shield_upgrade_level:
write_line(2, "Shield: %s" % ui.single.shield_upgrade_level)
if ui.single.buffs:
write_line(1, "Buffs:")
for b in ui.single.buffs:
write_line(2, buffs.Buffs(b).name)
elif ui.HasField("multi"):
write_line(0, "Selection:", colors.green)
write_multi(ui.multi.units)
elif ui.HasField("cargo"):
write_line(0, "Selection:", colors.green)
write_single(ui.cargo.unit)
next(line)
write_line(0, "Cargo:", colors.green)
write_line(1, "Empty slots: %s" % ui.cargo.slots_available)
write_multi(ui.cargo.passengers)
elif ui.HasField("production"):
write_line(0, "Selection:", colors.green)
write_single(ui.production.unit)
next(line)
if ui.production.production_queue:
write_line(0, "Production:", colors.green)
for item in ui.production.production_queue:
specific_data = self._static_data.abilities[item.ability_id]
if specific_data.remaps_to_ability_id:
general_data = self._static_data.abilities[
specific_data.remaps_to_ability_id]
else:
general_data = specific_data
s = (general_data.friendly_name or general_data.button_name or
general_data.link_name)
s = s.replace("Research ", "").replace("Train ", "")
if item.build_progress > 0:
s += ": %d%%" % (item.build_progress * 100)
write_line(1, s)
elif ui.production.build_queue: # Handle old binaries, no research.
write_line(0, "Build Queue:", colors.green)
for unit in ui.production.build_queue:
s = unit_name(unit.unit_type)
if unit.build_progress > 0:
s += ": %d%%" % (unit.build_progress * 100)
write_line(1, s)
@sw.decorate
def draw_actions(self):
"""Draw the actions so that they can be inspected for accuracy."""
now = time.time()
for act in self._past_actions:
if act.pos and now < act.deadline:
remain = (act.deadline - now) / (act.deadline - act.time)
if isinstance(act.pos, point.Point):
size = remain / 3
self.all_surfs(_Surface.draw_circle, act.color, act.pos, size, 1)
else:
# Fade with alpha would be nice, but doesn't seem to work.
self.all_surfs(_Surface.draw_rect, act.color, act.pos, 1)
@sw.decorate
def prepare_actions(self, obs):
"""Keep a list of the past actions so they can be drawn."""
now = time.time()
while self._past_actions and self._past_actions[0].deadline < now:
self._past_actions.pop(0)
def add_act(ability_id, color, pos, timeout=1):
if ability_id:
ability = self._static_data.abilities[ability_id]
if ability.remaps_to_ability_id: # Prefer general abilities.
ability_id = ability.remaps_to_ability_id
self._past_actions.append(
PastAction(ability_id, color, pos, now, now + timeout))
for act in obs.actions:
if (act.HasField("action_raw") and
act.action_raw.HasField("unit_command") and
act.action_raw.unit_command.HasField("target_world_space_pos")):
pos = point.Point.build(
act.action_raw.unit_command.target_world_space_pos)
add_act(act.action_raw.unit_command.ability_id, colors.yellow, pos)
if act.HasField("action_feature_layer"):
act_fl = act.action_feature_layer
if act_fl.HasField("unit_command"):
if act_fl.unit_command.HasField("target_screen_coord"):
pos = self._world_to_feature_screen_px.back_pt(
point.Point.build(act_fl.unit_command.target_screen_coord))
add_act(act_fl.unit_command.ability_id, colors.cyan, pos)
elif act_fl.unit_command.HasField("target_minimap_coord"):
pos = self._world_to_feature_minimap_px.back_pt(
point.Point.build(act_fl.unit_command.target_minimap_coord))
add_act(act_fl.unit_command.ability_id, colors.cyan, pos)
else:
add_act(act_fl.unit_command.ability_id, None, None)
if (act_fl.HasField("unit_selection_point") and
act_fl.unit_selection_point.HasField("selection_screen_coord")):
pos = self._world_to_feature_screen_px.back_pt(point.Point.build(
act_fl.unit_selection_point.selection_screen_coord))
add_act(None, colors.cyan, pos)
if act_fl.HasField("unit_selection_rect"):
for r in act_fl.unit_selection_rect.selection_screen_coord:
rect = point.Rect(
self._world_to_feature_screen_px.back_pt(
point.Point.build(r.p0)),
self._world_to_feature_screen_px.back_pt(
point.Point.build(r.p1)))
add_act(None, colors.cyan, rect, 0.3)
if act.HasField("action_render"):
act_rgb = act.action_render
if act_rgb.HasField("unit_command"):
if act_rgb.unit_command.HasField("target_screen_coord"):
pos = self._world_to_rgb_screen_px.back_pt(
point.Point.build(act_rgb.unit_command.target_screen_coord))
add_act(act_rgb.unit_command.ability_id, colors.red, pos)
elif act_rgb.unit_command.HasField("target_minimap_coord"):
pos = self._world_to_rgb_minimap_px.back_pt(
point.Point.build(act_rgb.unit_command.target_minimap_coord))
add_act(act_rgb.unit_command.ability_id, colors.red, pos)
else:
add_act(act_rgb.unit_command.ability_id, None, None)
if (act_rgb.HasField("unit_selection_point") and
act_rgb.unit_selection_point.HasField("selection_screen_coord")):
pos = self._world_to_rgb_screen_px.back_pt(point.Point.build(
act_rgb.unit_selection_point.selection_screen_coord))
add_act(None, colors.red, pos)
if act_rgb.HasField("unit_selection_rect"):
for r in act_rgb.unit_selection_rect.selection_screen_coord:
rect = point.Rect(
self._world_to_rgb_screen_px.back_pt(
point.Point.build(r.p0)),
self._world_to_rgb_screen_px.back_pt(
point.Point.build(r.p1)))
add_act(None, colors.red, rect, 0.3)
@sw.decorate
def draw_base_map(self, surf):
"""Draw the base map."""
hmap_feature = features.SCREEN_FEATURES.height_map
hmap = hmap_feature.unpack(self._obs.observation)
if not hmap.any():
hmap = hmap + 100
hmap_color = hmap_feature.color(hmap)
out = hmap_color * 0.6
creep_feature = features.SCREEN_FEATURES.creep
creep = creep_feature.unpack(self._obs.observation)
creep_mask = creep > 0
creep_color = creep_feature.color(creep)
out[creep_mask, :] = (0.4 * out[creep_mask, :] +
0.6 * creep_color[creep_mask, :])
power_feature = features.SCREEN_FEATURES.power
power = power_feature.unpack(self._obs.observation)
power_mask = power > 0
power_color = power_feature.color(power)
out[power_mask, :] = (0.7 * out[power_mask, :] +
0.3 * power_color[power_mask, :])
if self._render_player_relative:
player_rel_feature = features.SCREEN_FEATURES.player_relative
player_rel = player_rel_feature.unpack(self._obs.observation)
player_rel_mask = player_rel > 0
player_rel_color = player_rel_feature.color(player_rel)
out[player_rel_mask, :] = player_rel_color[player_rel_mask, :]
visibility = features.SCREEN_FEATURES.visibility_map.unpack(
self._obs.observation)
visibility_fade = np.array([[0.5] * 3, [0.75]*3, [1]*3])
out *= visibility_fade[visibility]
surf.blit_np_array(out)
@sw.decorate
def draw_mini_map(self, surf):
"""Draw the minimap."""
if (self._render_rgb and self._obs.observation.HasField("render_data") and
self._obs.observation.render_data.HasField("minimap")):
# Draw the rendered version.
surf.blit_np_array(features.Feature.unpack_rgb_image(
self._obs.observation.render_data.minimap))
else: # Render it manually from feature layer data.
hmap_feature = features.MINIMAP_FEATURES.height_map
hmap = hmap_feature.unpack(self._obs.observation)
if not hmap.any():
hmap = hmap + 100
hmap_color = hmap_feature.color(hmap)
creep_feature = features.MINIMAP_FEATURES.creep
creep = creep_feature.unpack(self._obs.observation)
creep_mask = creep > 0
creep_color = creep_feature.color(creep)
if self._obs.observation.player_common.player_id in (0, 16): # observer
# If we're the observer, show the absolute since otherwise all player
# units are friendly, making it pretty boring.
player_feature = features.MINIMAP_FEATURES.player_id
else:
player_feature = features.MINIMAP_FEATURES.player_relative
player_data = player_feature.unpack(self._obs.observation)
player_mask = player_data > 0
player_color = player_feature.color(player_data)
visibility = features.MINIMAP_FEATURES.visibility_map.unpack(
self._obs.observation)
visibility_fade = np.array([[0.5] * 3, [0.75]*3, [1]*3])
# Compose and color the different layers.
out = hmap_color * 0.6
out[creep_mask, :] = (0.4 * out[creep_mask, :] +
0.6 * creep_color[creep_mask, :])
out[player_mask, :] = player_color[player_mask, :]
out *= visibility_fade[visibility]
# Render the bit of the composited layers that actually correspond to the
# map. This isn't all of it on non-square maps.
shape = self._playable.diagonal.scale_max_size(
self._feature_minimap_px).floor()
surf.blit_np_array(out[:shape.y, :shape.x, :])
surf.draw_rect(colors.white * 0.8, self._camera, 1) # Camera
# Sensor rings.
for radar in self._obs.observation.raw_data.radar:
surf.draw_circle(colors.white / 2, point.Point.build(radar.pos),
radar.radius, 1)
if self._obs.observation.game_loop < 22.4 * 20:
for loc in self._game_info.start_raw.start_locations:
surf.draw_circle(colors.red, point.Point.build(loc), 5, 1)
pygame.draw.rect(surf.surf, colors.red, surf.surf.get_rect(), 1) # Border
def check_valid_queued_action(self):
# Make sure the existing command is still valid
if (self._queued_hotkey and not self._abilities(
lambda cmd: cmd.hotkey.startswith(self._queued_hotkey))):
self._queued_hotkey = ""
if (self._queued_action and not self._abilities(
lambda cmd: self._queued_action == cmd)):
self._queued_action = None
@sw.decorate
def draw_rendered_map(self, surf):
"""Draw the rendered pixels."""
surf.blit_np_array(features.Feature.unpack_rgb_image(
self._obs.observation.render_data.map))
def draw_screen(self, surf):
"""Draw the screen area."""
# surf.fill(colors.black)
if (self._render_rgb and self._obs.observation.HasField("render_data") and
self._obs.observation.render_data.HasField("map")):
self.draw_rendered_map(surf)
else:
self.draw_base_map(surf)
self.draw_effects(surf)
self.draw_units(surf)
self.draw_selection(surf)
self.draw_build_target(surf)
self.draw_overlay(surf)
self.draw_commands(surf)
self.draw_panel(surf)
@sw.decorate
def draw_feature_layer(self, surf, feature):
"""Draw a feature layer."""
layer = feature.unpack(self._obs.observation)
if layer is not None:
surf.blit_np_array(feature.color(layer))
else: # Ignore layers that aren't in this version of SC2.
surf.surf.fill(colors.black)
@sw.decorate
def draw_raw_layer(self, surf, from_obs, name, color):
"""Draw a raw layer."""
if from_obs:
layer = getattr(self._obs.observation.raw_data.map_state, name)
else:
layer = getattr(self._game_info.start_raw, name)
layer = features.Feature.unpack_layer(layer)
if layer is not None:
surf.blit_np_array(color[layer])
else: # Ignore layers that aren't in this version of SC2.
surf.surf.fill(colors.black)
def all_surfs(self, fn, *args, **kwargs):
for surf in self._surfaces:
if surf.world_to_surf:
fn(surf, *args, **kwargs)
@sw.decorate
def render(self, obs):
"""Push an observation onto the queue to be rendered."""
if not self._initialized:
return
now = time.time()
self._game_times.append(
(now - self._last_time,
max(1, obs.observation.game_loop - self._obs.observation.game_loop)))
self._last_time = now
self._last_game_loop = self._obs.observation.game_loop
self._obs_queue.put(obs)
if self._render_sync:
self._obs_queue.join()
def render_thread(self):
"""A render loop that pulls observations off the queue to render."""
obs = True
while obs: # Send something falsy through the queue to shut down.
obs = self._obs_queue.get()
if obs:
for alert in obs.observation.alerts:
self._alerts[sc_pb.Alert.Name(alert)] = time.time()
for err in obs.action_errors:
if err.result != sc_err.Success:
self._alerts[sc_err.ActionResult.Name(err.result)] = time.time()
self.prepare_actions(obs)
if self._obs_queue.empty():
# Only render the latest observation so we keep up with the game.
self.render_obs(obs)
if self._video_writer:
self._video_writer.add(np.transpose(
pygame.surfarray.pixels3d(self._window), axes=(1, 0, 2)))
self._obs_queue.task_done()
@with_lock(render_lock)
@sw.decorate
def render_obs(self, obs):
"""Render a frame given an observation."""
start_time = time.time()
self._obs = obs
self.check_valid_queued_action()
self._update_camera(point.Point.build(
self._obs.observation.raw_data.player.camera))
for surf in self._surfaces:
# Render that surface.
surf.draw(surf)
mouse_pos = self.get_mouse_pos()
if mouse_pos:
# Draw a small mouse cursor
self.all_surfs(_Surface.draw_circle, colors.green, mouse_pos.world_pos,
0.1)
self.draw_actions()
with sw("flip"):
pygame.display.flip()
self._render_times.append(time.time() - start_time)
def run(self, run_config, controller, max_game_steps=0, max_episodes=0,
game_steps_per_episode=0, save_replay=False):
"""Run loop that gets observations, renders them, and sends back actions."""
is_replay = (controller.status == remote_controller.Status.in_replay)
total_game_steps = 0
start_time = time.time()
num_episodes = 0
try:
while True:
self.init(controller.game_info(), controller.data())
episode_steps = 0
num_episodes += 1
controller.step()
while True:
total_game_steps += self._step_mul
episode_steps += self._step_mul
frame_start_time = time.time()
obs = controller.observe()
self.render(obs)
if obs.player_result:
break
cmd = self.get_actions(run_config, controller)
if cmd == ActionCmd.STEP:
pass
elif cmd == ActionCmd.QUIT:
if not is_replay and save_replay:
self.save_replay(run_config, controller)
return
elif cmd == ActionCmd.RESTART:
break
else:
raise Exception("Unexpected command: %s" % cmd)
controller.step(self._step_mul)
if max_game_steps and total_game_steps >= max_game_steps:
if not is_replay and save_replay:
self.save_replay(run_config, controller)
return
if game_steps_per_episode and episode_steps >= game_steps_per_episode:
break
with sw("sleep"):
elapsed_time = time.time() - frame_start_time
time.sleep(max(0, 1 / self._fps - elapsed_time))
if is_replay:
break
if save_replay:
self.save_replay(run_config, controller)
if max_episodes and num_episodes >= max_episodes:
break
print("Restarting")
controller.restart()
except KeyboardInterrupt:
pass
finally:
self.close()
elapsed_time = time.time() - start_time
print("took %.3f seconds for %s steps: %.3f fps" %
(elapsed_time, total_game_steps, total_game_steps / elapsed_time))
def __del__(self):
self.close()
| pysc2-master | pysc2/lib/renderer_human.py |
# Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This replaces google's resources used for locating data deps."""
def GetResourceFilename(path):
return path
| pysc2-master | pysc2/lib/resources.py |
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Diff proto objects returning paths to changed attributes."""
import numpy as np
def summarize_array_diffs(lhs, rhs):
"""Output value differences, with index for each, between two arrays."""
result = []
indices = np.transpose(np.nonzero(lhs - rhs))
for row in indices:
index = tuple(np.array([e]) for e in row.tolist())
lhs_element = lhs[index]
rhs_element = rhs[index]
result.append("{}: {} -> {}".format(
"".join("[{}]".format(i) for i in row), lhs_element[0], rhs_element[0]))
if indices.size:
return "{} element(s) changed - ".format(len(indices)) + "; ".join(result)
else:
return ""
| pysc2-master | pysc2/lib/np_util.py |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define the static list of upgrades for SC2."""
import enum
# pylint: disable=invalid-name
class Upgrades(enum.IntEnum):
"""The list of upgrades, as generated by bin/gen_data.py."""
AdaptiveTalons = 293
AdrenalGlands = 65
AdvancedBallistics = 140
AnabolicSynthesis = 88
AnionPulseCrystals = 99
Blink = 87
Burrow = 64
CentrificalHooks = 75
Charge = 86
ChitinousPlating = 4
CloakingField = 20
CombatShield = 16
ConcussiveShells = 17
CorvidReactor = 22
CycloneRapidFireLaunchers = 291
DrillingClaws = 122
EnhancedShockwaves = 296
ExtendedThermalLance = 50
GlialReconstitution = 2
GraviticBooster = 48
GraviticDrive = 49
GravitonCatapult = 1
GroovedSpines = 134
HiSecAutoTracking = 5
HighCapacityFuelTanks = 139
HyperflightRotors = 136
InfernalPreigniter = 19
LockOn = 144
MetabolicBoost = 66
MuscularAugments = 135
NeosteelFrame = 10
NeuralParasite = 101
PathogenGlands = 74
PersonalCloaking = 25
PneumatizedCarapace = 62
ProtossAirArmorsLevel1 = 81
ProtossAirArmorsLevel2 = 82
ProtossAirArmorsLevel3 = 83
ProtossAirWeaponsLevel1 = 78
ProtossAirWeaponsLevel2 = 79
ProtossAirWeaponsLevel3 = 80
ProtossGroundArmorsLevel1 = 42
ProtossGroundArmorsLevel2 = 43
ProtossGroundArmorsLevel3 = 44
ProtossGroundWeaponsLevel1 = 39
ProtossGroundWeaponsLevel2 = 40
ProtossGroundWeaponsLevel3 = 41
ProtossShieldsLevel1 = 45
ProtossShieldsLevel2 = 46
ProtossShieldsLevel3 = 47
PsiStorm = 52
ResonatingGlaives = 130
ShadowStrike = 141
SmartServos = 289
Stimpack = 15
TerranInfantryArmorsLevel1 = 11
TerranInfantryArmorsLevel2 = 12
TerranInfantryArmorsLevel3 = 13
TerranInfantryWeaponsLevel1 = 7
TerranInfantryWeaponsLevel2 = 8
TerranInfantryWeaponsLevel3 = 9
TerranShipWeaponsLevel1 = 36
TerranShipWeaponsLevel2 = 37
TerranShipWeaponsLevel3 = 38
TerranStructureArmor = 6
TerranVehicleAndShipArmorsLevel1 = 116
TerranVehicleAndShipArmorsLevel2 = 117
TerranVehicleAndShipArmorsLevel3 = 118
TerranVehicleWeaponsLevel1 = 30
TerranVehicleWeaponsLevel2 = 31
TerranVehicleWeaponsLevel3 = 32
TunnelingClaws = 3
WarpGateResearch = 84
WeaponRefit = 76
ZergFlyerArmorsLevel1 = 71
ZergFlyerArmorsLevel2 = 72
ZergFlyerArmorsLevel3 = 73
ZergFlyerWeaponsLevel1 = 68
ZergFlyerWeaponsLevel2 = 69
ZergFlyerWeaponsLevel3 = 70
ZergGroundArmorsLevel1 = 56
ZergGroundArmorsLevel2 = 57
ZergGroundArmorsLevel3 = 58
ZergMeleeWeaponsLevel1 = 53
ZergMeleeWeaponsLevel2 = 54
ZergMeleeWeaponsLevel3 = 55
ZergMissileWeaponsLevel1 = 59
ZergMissileWeaponsLevel2 = 60
ZergMissileWeaponsLevel3 = 61
| pysc2-master | pysc2/lib/upgrades.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A thread pool for running a set of functions synchronously in parallel.
This is mainly intended for use where the functions have a barrier and none will
return until all have been called.
"""
from concurrent import futures
import functools
class RunParallel(object):
"""Run all funcs in parallel."""
def __init__(self, timeout=None):
self._timeout = timeout
self._executor = None
self._workers = 0
def run(self, funcs):
"""Run a set of functions in parallel, returning their results.
Make sure any function you pass exits with a reasonable timeout. If it
doesn't return within the timeout or the result is ignored due an exception
in a separate thread it will continue to stick around until it finishes,
including blocking process exit.
Args:
funcs: An iterable of functions or iterable of args to functools.partial.
Returns:
A list of return values with the values matching the order in funcs.
Raises:
Propagates the first exception encountered in one of the functions.
"""
funcs = [f if callable(f) else functools.partial(*f) for f in funcs]
if len(funcs) == 1: # Ignore threads if it's not needed.
return [funcs[0]()]
if len(funcs) > self._workers: # Lazy init and grow as needed.
self.shutdown()
self._workers = len(funcs)
while True:
try:
# Temporary workaround for "<frozen importlib._bootstrap>", line 110.
# Race condition on import of ThreadPoolExecutor.
self._executor = futures.ThreadPoolExecutor(self._workers)
break
except KeyError:
pass
futs = [self._executor.submit(f) for f in funcs]
done, not_done = futures.wait(futs, self._timeout, futures.FIRST_EXCEPTION)
# Make sure to propagate any exceptions.
for f in done:
if not f.cancelled() and f.exception() is not None:
if not_done:
# If there are some calls that haven't finished, cancel and recreate
# the thread pool. Otherwise we may have a thread running forever
# blocking parallel calls.
for nd in not_done:
nd.cancel()
self.shutdown(False) # Don't wait, they may be deadlocked.
raise f.exception()
# Either done or timed out, so don't wait again.
return [f.result(timeout=0) for f in futs]
def shutdown(self, wait=True):
if self._executor:
self._executor.shutdown(wait)
self._executor = None
self._workers = 0
def __del__(self):
self.shutdown()
| pysc2-master | pysc2/lib/run_parallel.py |
#!/usr/bin/python
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for proto_diff.py."""
from absl.testing import absltest
from pysc2.lib import proto_diff
from s2clientprotocol import sc2api_pb2 as sc_pb
from s2clientprotocol import score_pb2
class ProtoPathTest(absltest.TestCase):
def testCreationFromTuple(self):
self.assertEqual(
str(proto_diff.ProtoPath(("observation", "actions"))),
"observation.actions")
def testCreationFromList(self):
self.assertEqual(
str(proto_diff.ProtoPath(["observation", "actions"])),
"observation.actions")
def testCreationFromGenerator(self):
self.assertEqual(
str(proto_diff.ProtoPath(a for a in "abc")),
"a.b.c")
def testStringRepr(self):
self.assertEqual(
str(proto_diff.ProtoPath(("observation", "actions", 1, "target"))),
"observation.actions[1].target")
def testOrdering(self):
self.assertLess(
proto_diff.ProtoPath(("observation", "actions", 1, "game_loop")),
proto_diff.ProtoPath(("observation", "actions", 1, "target")))
self.assertLess(
proto_diff.ProtoPath(("observation", "actions", 1)),
proto_diff.ProtoPath(("observation", "actions", 1, "target")))
self.assertGreater(
proto_diff.ProtoPath(("observation", "actions", 1)),
proto_diff.ProtoPath(("observation",)))
def testEquals(self):
a = proto_diff.ProtoPath(("observation", "actions", 1))
b = proto_diff.ProtoPath(("observation", "actions", 1))
self.assertEqual(a, b)
self.assertEqual(hash(a), hash(b))
def testNotEqual(self):
a = proto_diff.ProtoPath(("observation", "actions", 1))
b = proto_diff.ProtoPath(("observation", "actions", 2))
self.assertNotEqual(a, b)
self.assertNotEqual(hash(a), hash(b))
def testIndexing(self):
path = proto_diff.ProtoPath(("observation", "actions", 1))
self.assertEqual(path[0], "observation")
self.assertEqual(path[1], "actions")
self.assertEqual(path[-2], "actions")
self.assertEqual(path[-1], 1)
def testGetField(self):
proto = sc_pb.ResponseObservation(
observation=sc_pb.Observation(game_loop=1, alerts=[sc_pb.AlertError]))
game_loop = proto_diff.ProtoPath(("observation", "game_loop"))
alert = proto_diff.ProtoPath(("observation", "alerts", 0))
self.assertEqual(game_loop.get_field(proto), 1)
self.assertEqual(alert.get_field(proto), sc_pb.AlertError)
self.assertEqual(
proto_diff.ProtoPath(game_loop.path[:-1]).get_field(proto),
sc_pb.Observation(game_loop=1, alerts=[sc_pb.AlertError]))
def testWithAnonymousArrayIndices(self):
a = proto_diff.ProtoPath(("observation", "actions"))
b = proto_diff.ProtoPath(("observation", "actions", 1))
c = proto_diff.ProtoPath(("observation", "actions", 2))
self.assertEqual(str(a), "observation.actions")
self.assertEqual(
str(b.with_anonymous_array_indices()), "observation.actions[*]")
self.assertEqual(
b.with_anonymous_array_indices(),
c.with_anonymous_array_indices())
def _alert_formatter(path, proto_a, proto_b):
field_a = path.get_field(proto_a)
if path[-2] == "alerts":
field_b = path.get_field(proto_b)
return "{} -> {}".format(
sc_pb.Alert.Name(field_a), sc_pb.Alert.Name(field_b))
class ProtoDiffTest(absltest.TestCase):
def testNoDiffs(self):
a = sc_pb.ResponseObservation()
b = sc_pb.ResponseObservation()
diff = proto_diff.compute_diff(a, b)
self.assertIsNone(diff)
def testAddedField(self):
a = sc_pb.ResponseObservation()
b = sc_pb.ResponseObservation(
observation=sc_pb.Observation(game_loop=1))
diff = proto_diff.compute_diff(a, b)
self.assertIsNotNone(diff)
self.assertLen(diff.added, 1, diff)
self.assertEqual(str(diff.added[0]), "observation")
self.assertEqual(diff.added, diff.all_diffs())
self.assertEqual(diff.report(), "Added observation.")
def testAddedFields(self):
a = sc_pb.ResponseObservation(
observation=sc_pb.Observation(
alerts=[sc_pb.AlertError]))
b = sc_pb.ResponseObservation(
observation=sc_pb.Observation(
alerts=[sc_pb.AlertError, sc_pb.MergeComplete]),
player_result=[sc_pb.PlayerResult()])
diff = proto_diff.compute_diff(a, b)
self.assertIsNotNone(diff)
self.assertLen(diff.added, 2, diff)
self.assertEqual(str(diff.added[0]), "observation.alerts[1]")
self.assertEqual(str(diff.added[1]), "player_result")
self.assertEqual(diff.added, diff.all_diffs())
self.assertEqual(
diff.report(),
"Added observation.alerts[1].\n"
"Added player_result.")
def testRemovedField(self):
a = sc_pb.ResponseObservation(observation=sc_pb.Observation(game_loop=1))
b = sc_pb.ResponseObservation(observation=sc_pb.Observation())
diff = proto_diff.compute_diff(a, b)
self.assertIsNotNone(diff)
self.assertLen(diff.removed, 1, diff)
self.assertEqual(str(diff.removed[0]), "observation.game_loop")
self.assertEqual(diff.removed, diff.all_diffs())
self.assertEqual(
diff.report(),
"Removed observation.game_loop.")
def testRemovedFields(self):
a = sc_pb.ResponseObservation(observation=sc_pb.Observation(
game_loop=1,
score=score_pb2.Score(),
alerts=[sc_pb.AlertError, sc_pb.MergeComplete]))
b = sc_pb.ResponseObservation(observation=sc_pb.Observation(
alerts=[sc_pb.AlertError]))
diff = proto_diff.compute_diff(a, b)
self.assertIsNotNone(diff)
self.assertLen(diff.removed, 3, diff)
self.assertEqual(str(diff.removed[0]), "observation.alerts[1]")
self.assertEqual(str(diff.removed[1]), "observation.game_loop")
self.assertEqual(str(diff.removed[2]), "observation.score")
self.assertEqual(diff.removed, diff.all_diffs())
self.assertEqual(
diff.report(),
"Removed observation.alerts[1].\n"
"Removed observation.game_loop.\n"
"Removed observation.score.")
def testChangedField(self):
a = sc_pb.ResponseObservation(observation=sc_pb.Observation(game_loop=1))
b = sc_pb.ResponseObservation(observation=sc_pb.Observation(game_loop=2))
diff = proto_diff.compute_diff(a, b)
self.assertIsNotNone(diff)
self.assertLen(diff.changed, 1, diff)
self.assertEqual(str(diff.changed[0]), "observation.game_loop")
self.assertEqual(diff.changed, diff.all_diffs())
self.assertEqual(diff.report(), "Changed observation.game_loop: 1 -> 2.")
def testChangedFields(self):
a = sc_pb.ResponseObservation(observation=sc_pb.Observation(
game_loop=1, alerts=[sc_pb.AlertError, sc_pb.LarvaHatched]))
b = sc_pb.ResponseObservation(observation=sc_pb.Observation(
game_loop=2, alerts=[sc_pb.AlertError, sc_pb.MergeComplete]))
diff = proto_diff.compute_diff(a, b)
self.assertIsNotNone(diff)
self.assertLen(diff.changed, 2, diff)
self.assertEqual(str(diff.changed[0]), "observation.alerts[1]")
self.assertEqual(str(diff.changed[1]), "observation.game_loop")
self.assertEqual(diff.changed, diff.all_diffs())
self.assertEqual(
diff.report(),
"Changed observation.alerts[1]: 7 -> 8.\n"
"Changed observation.game_loop: 1 -> 2.")
self.assertEqual(
diff.report([_alert_formatter]),
"Changed observation.alerts[1]: LarvaHatched -> MergeComplete.\n"
"Changed observation.game_loop: 1 -> 2.")
def testTruncation(self):
a = sc_pb.ResponseObservation(observation=sc_pb.Observation(
game_loop=1, alerts=[sc_pb.AlertError, sc_pb.LarvaHatched]))
b = sc_pb.ResponseObservation(observation=sc_pb.Observation(
game_loop=2, alerts=[sc_pb.AlertError, sc_pb.MergeComplete]))
diff = proto_diff.compute_diff(a, b)
self.assertIsNotNone(diff)
self.assertEqual(
diff.report([_alert_formatter], truncate_to=9),
"Changed observation.alerts[1]: LarvaH....\n"
"Changed observation.game_loop: 1 -> 2.")
self.assertEqual(
diff.report([_alert_formatter], truncate_to=-1),
"Changed observation.alerts[1]: ....\n"
"Changed observation.game_loop: ... -> ....")
if __name__ == "__main__":
absltest.main()
| pysc2-master | pysc2/lib/proto_diff_test.py |
#!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for lib.run_parallel."""
import threading
from absl.testing import absltest
from pysc2.lib import run_parallel
class Barrier(object):
def __init__(self, n):
self.n = n
self.count = 0
self.cond = threading.Condition()
def wait(self):
self.cond.acquire()
me = self.count
self.count += 1
if self.count < self.n:
self.cond.wait()
else:
self.count = 0
self.cond.notify_all()
self.cond.release()
return me
def clear(self):
self.cond.acquire()
self.cond.notify_all()
self.cond.release()
def bad():
raise ValueError()
class RunParallelTest(absltest.TestCase):
def test_returns_expected_values(self):
pool = run_parallel.RunParallel()
out = pool.run([int])
self.assertListEqual(out, [0])
out = pool.run([lambda: 1, lambda: 2, lambda: "asdf", lambda: {1: 2}])
self.assertListEqual(out, [1, 2, "asdf", {1: 2}])
pool.shutdown()
def test_run_in_parallel(self):
b = Barrier(3)
pool = run_parallel.RunParallel()
out = pool.run([b.wait, b.wait, b.wait])
self.assertCountEqual(out, [0, 1, 2])
pool.shutdown()
def test_avoids_deadlock(self):
b = Barrier(2)
pool = run_parallel.RunParallel(timeout=2)
with self.assertRaises(ValueError):
pool.run([int, b.wait, bad])
# Release the thread waiting on the barrier so the process can exit cleanly.
b.clear()
pool.shutdown()
def test_exception(self):
pool = run_parallel.RunParallel()
out = pool.run([lambda: 1, ValueError])
self.assertEqual(out[0], 1)
self.assertIsInstance(out[1], ValueError)
with self.assertRaises(ValueError):
pool.run([bad])
with self.assertRaises(ValueError):
pool.run([int, bad])
pool.shutdown()
def test_partial(self):
pool = run_parallel.RunParallel()
out = pool.run((max, 0, i - 2) for i in range(5))
self.assertListEqual(out, [0, 0, 0, 1, 2])
pool.shutdown()
if __name__ == "__main__":
absltest.main()
| pysc2-master | pysc2/lib/run_parallel_test.py |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A basic Color class."""
import collections
import math
import random
import numpy
from pysc2.lib import static_data
class Color(collections.namedtuple("Color", ["r", "g", "b"])):
"""A basic Color class."""
__slots__ = ()
def set(self, r=None, g=None, b=None):
return Color(r or self.r, b or self.b, g or self.g)
def round(self):
return Color(int(round(self.r)), int(round(self.g)), int(round(self.b)))
def floor(self):
return Color(int(math.floor(self.r)), int(math.floor(self.g)),
int(math.floor(self.b)))
def ceil(self):
return Color(int(math.ceil(self.r)), int(math.ceil(self.g)),
int(math.ceil(self.b)))
def __str__(self):
return "%d,%d,%d" % self
def __add__(self, o):
return Color(self.r + o.r, self.g + o.g, self.b + o.b)
def __sub__(self, o):
return Color(self.r - o.r, self.g - o.g, self.b - o.b)
def __mul__(self, val):
return Color(self.r * val, self.g * val, self.b * val)
def __truediv__(self, val):
return Color(self.r / val, self.g / val, self.b / val)
def __floordiv__(self, val):
return Color(self.r // val, self.g // val, self.b // val)
__div__ = __truediv__
black = Color(0, 0, 0)
white = Color(255, 255, 255)
red = Color(255, 0, 0)
green = Color(0, 255, 0)
blue = Color(0, 0, 255)
cyan = Color(0, 255, 255)
yellow = Color(255, 255, 0)
purple = Color(255, 0, 255)
def smooth_hue_palette(scale):
"""Takes an array of ints and returns a corresponding colored rgb array."""
# http://en.wikipedia.org/wiki/HSL_and_HSV#From_HSL
# Based on http://stackoverflow.com/a/17382854 , with simplifications and
# optimizations. Assumes S=1, L=0.5, meaning C=1 and m=0.
# 0 stays black, everything else moves into a hue.
# Some initial values and scaling. Check wikipedia for variable meanings.
array = numpy.arange(scale)
h = array * (6 / scale) # range of [0,6)
x = 255 * (1 - numpy.absolute(numpy.mod(h, 2) - 1))
c = 255
# Initialize outputs to zero/black
out = numpy.zeros(h.shape + (3,), float)
r = out[..., 0]
g = out[..., 1]
b = out[..., 2]
mask = (0 < h) & (h < 1)
r[mask] = c
g[mask] = x[mask]
mask = (1 <= h) & (h < 2)
r[mask] = x[mask]
g[mask] = c
mask = (2 <= h) & (h < 3)
g[mask] = c
b[mask] = x[mask]
mask = (3 <= h) & (h < 4)
g[mask] = x[mask]
b[mask] = c
mask = (4 <= h) & (h < 5)
r[mask] = x[mask]
b[mask] = c
mask = 5 <= h
r[mask] = c
b[mask] = x[mask]
return out
def shuffled_hue(scale):
palette = list(smooth_hue_palette(scale))
random.Random(21).shuffle(palette) # Return a fixed shuffle
return numpy.array(palette)
def piece_wise_linear(scale, points):
"""Create a palette that is piece-wise linear given some colors at points."""
assert len(points) >= 2
assert points[0][0] == 0
assert points[-1][0] == 1
assert all(i < j for i, j in zip(points[:-1], points[1:]))
out = numpy.zeros((scale, 3))
p1, c1 = points[0]
p2, c2 = points[1]
next_pt = 2
for i in range(1, scale): # Leave 0 as black.
v = i / scale
if v > p2:
p1, c1 = p2, c2
p2, c2 = points[next_pt]
next_pt += 1
frac = (v - p1) / (p2 - p1)
out[i, :] = c1 * (1 - frac) + c2 * frac
return out
def winter(scale):
return piece_wise_linear(scale, [(0, Color(0, 0.5, 0.4) * 255),
(1, Color(1, 1, 0.4) * 255)])
def hot(scale):
return piece_wise_linear(scale, [(0, Color(0.5, 0, 0) * 255),
(0.2, Color(1, 0, 0) * 255),
(0.6, Color(1, 1, 0) * 255),
(1, Color(1, 1, 1) * 255)])
def height_map(scale):
return piece_wise_linear(scale, [
(0, Color(0, 0, 0)), # Abyss
(40/255, Color(67, 109, 95)), # Water, little below this height.
(50/255, Color(168, 152, 129)), # Beach
(60/255, Color(154, 124, 90)), # Sand, the mode height.
(70/255, Color(117, 150, 96)), # Grass
(80/255, Color(166, 98, 97)), # Dirt, should be the top.
(1, Color(255, 255, 100)), # Heaven. Shouldn't be seen.
])
# Palette used to color player_relative features.
PLAYER_RELATIVE_PALETTE = numpy.array([
black, # Background.
Color(0, 142, 0), # Self. (Green).
yellow, # Ally.
Color(129, 166, 196), # Neutral. (Cyan.)
Color(113, 25, 34), # Enemy. (Red).
])
PLAYER_ABSOLUTE_PALETTE = numpy.array([
black, # Background
Color(0, 142, 0), # 1: Green
Color(113, 25, 34), # 2: Red
Color(223, 215, 67), # 3: Yellow
Color(66, 26, 121), # 4: Purple
Color(222, 144, 50), # 5: Orange
Color(46, 72, 237), # 6: Blue
Color(207, 111, 176), # 7: Pink
Color(189, 251, 157), # 8: Light green
white * 0.1, # 9: Does the game ever have more than 8 players?
white * 0.1, # 10: Does the game ever have more than 8 players?
white * 0.1, # 11: Does the game ever have more than 8 players?
white * 0.1, # 12: Does the game ever have more than 8 players?
white * 0.1, # 13: Does the game ever have more than 8 players?
white * 0.1, # 14: Does the game ever have more than 8 players?
white * 0.1, # 15: Does the game ever have more than 8 players?
Color(129, 166, 196), # 16 Neutral: Cyan
])
VISIBILITY_PALETTE = numpy.array([
black, # Hidden
white * 0.25, # Fogged
white * 0.6, # Visible
])
CAMERA_PALETTE = numpy.array([black, white * 0.6])
CREEP_PALETTE = numpy.array([black, purple * 0.4])
POWER_PALETTE = numpy.array([black, cyan * 0.7])
SELECTED_PALETTE = numpy.array([black, green * 0.7])
def unit_type(scale=None):
"""Returns a palette that maps unit types to rgb colors."""
return categorical(static_data.UNIT_TYPES, scale)
def buffs(scale=None):
"""Returns a palette that maps buffs to rgb colors."""
return categorical(static_data.BUFFS, scale)
def categorical(options, scale=None):
# Can specify a scale to match the api or to accept unknown unit types.
palette_size = scale or max(options) + 1
palette = shuffled_hue(palette_size)
assert len(options) <= len(distinct_colors)
for i, v in enumerate(options):
palette[v] = distinct_colors[i]
return palette
effects = numpy.array([
[0, 0, 0],
[72, 173, 207],
[203, 76, 49],
[122, 98, 209],
[109, 183, 67],
[192, 80, 181],
[86, 185, 138],
[211, 63, 115],
[81, 128, 60],
[182, 135, 208],
[182, 174, 73],
[95, 123, 196],
[220, 146, 71],
[187, 102, 147],
[138, 109, 48],
[197, 103, 99],
])
# Generated with http://tools.medialab.sciences-po.fr/iwanthue/
# 350 colors: H: 0-360, C: 0-100, L: 35-100; then shuffled.
distinct_colors = numpy.array([
[99, 91, 0],
[195, 211, 0],
[57, 206, 255],
[255, 172, 106],
[255, 187, 77],
[255, 195, 114],
[0, 102, 201],
[3, 249, 197],
[79, 84, 81],
[255, 252, 198],
[0, 132, 134],
[255, 155, 144],
[255, 211, 140],
[41, 91, 83],
[101, 77, 73],
[0, 144, 124],
[146, 41, 97],
[2, 223, 228],
[173, 77, 0],
[255, 93, 193],
[54, 92, 36],
[119, 255, 202],
[154, 0, 183],
[0, 156, 121],
[144, 173, 0],
[255, 254, 173],
[62, 90, 54],
[144, 54, 5],
[2, 169, 191],
[132, 255, 249],
[196, 158, 255],
[187, 8, 0],
[138, 255, 99],
[236, 163, 255],
[78, 255, 187],
[128, 64, 56],
[255, 195, 148],
[0, 101, 209],
[149, 193, 255],
[0, 239, 125],
[134, 65, 240],
[0, 222, 123],
[255, 249, 146],
[0, 247, 164],
[8, 169, 0],
[156, 36, 46],
[255, 174, 81],
[0, 102, 84],
[139, 213, 0],
[142, 87, 0],
[215, 255, 55],
[203, 255, 124],
[0, 96, 93],
[63, 78, 147],
[227, 255, 115],
[160, 0, 131],
[69, 148, 0],
[142, 149, 0],
[255, 72, 70],
[0, 229, 224],
[127, 63, 76],
[248, 139, 255],
[2, 188, 206],
[0, 128, 203],
[113, 151, 0],
[255, 203, 103],
[0, 178, 172],
[128, 53, 122],
[163, 4, 83],
[2, 79, 204],
[235, 128, 0],
[0, 106, 247],
[164, 156, 255],
[179, 173, 0],
[255, 124, 221],
[115, 209, 0],
[62, 249, 255],
[240, 118, 0],
[45, 84, 135],
[106, 96, 255],
[39, 89, 109],
[0, 86, 192],
[255, 133, 151],
[90, 192, 0],
[156, 0, 154],
[127, 51, 133],
[216, 255, 82],
[160, 255, 212],
[106, 43, 191],
[224, 255, 221],
[167, 47, 227],
[255, 217, 85],
[251, 173, 255],
[92, 55, 185],
[162, 28, 1],
[126, 102, 255],
[212, 140, 255],
[113, 66, 111],
[216, 0, 205],
[70, 242, 69],
[120, 109, 255],
[0, 132, 180],
[122, 67, 62],
[255, 166, 54],
[140, 173, 255],
[105, 79, 0],
[39, 227, 55],
[255, 71, 238],
[112, 75, 18],
[149, 83, 255],
[255, 130, 205],
[255, 138, 39],
[0, 184, 21],
[202, 154, 0],
[145, 52, 41],
[185, 255, 85],
[151, 46, 8],
[255, 215, 128],
[2, 192, 148],
[80, 81, 101],
[255, 166, 114],
[0, 161, 80],
[255, 56, 89],
[2, 223, 146],
[98, 246, 255],
[150, 251, 255],
[255, 125, 56],
[144, 51, 53],
[83, 133, 255],
[1, 82, 173],
[122, 118, 0],
[255, 86, 174],
[67, 87, 78],
[131, 65, 4],
[170, 255, 204],
[0, 108, 66],
[248, 96, 255],
[212, 101, 255],
[99, 230, 34],
[140, 41, 121],
[173, 0, 175],
[255, 190, 175],
[186, 179, 255],
[171, 221, 255],
[78, 255, 135],
[220, 0, 32],
[255, 217, 192],
[46, 58, 215],
[68, 255, 230],
[96, 81, 53],
[1, 174, 246],
[72, 70, 166],
[255, 233, 77],
[255, 166, 197],
[255, 208, 241],
[183, 255, 62],
[255, 226, 226],
[107, 255, 119],
[148, 122, 0],
[171, 255, 143],
[255, 109, 232],
[156, 142, 255],
[124, 148, 255],
[178, 236, 255],
[168, 91, 0],
[255, 255, 248],
[255, 92, 91],
[132, 238, 255],
[225, 131, 0],
[255, 149, 111],
[171, 157, 0],
[255, 133, 181],
[196, 158, 0],
[2, 162, 246],
[193, 110, 0],
[255, 243, 244],
[255, 180, 181],
[255, 79, 221],
[255, 211, 109],
[0, 99, 118],
[255, 167, 214],
[89, 81, 83],
[147, 255, 120],
[2, 210, 200],
[255, 244, 113],
[255, 197, 248],
[0, 122, 37],
[255, 194, 57],
[130, 130, 255],
[107, 77, 29],
[255, 153, 56],
[178, 104, 255],
[17, 98, 0],
[0, 119, 128],
[146, 106, 0],
[117, 255, 186],
[255, 155, 232],
[1, 87, 232],
[61, 83, 120],
[200, 255, 187],
[196, 221, 255],
[100, 73, 112],
[115, 218, 255],
[85, 114, 0],
[208, 142, 0],
[255, 30, 147],
[156, 0, 200],
[239, 0, 122],
[255, 43, 170],
[0, 87, 241],
[237, 255, 248],
[0, 151, 44],
[255, 155, 161],
[218, 0, 107],
[139, 57, 29],
[148, 255, 174],
[100, 69, 131],
[195, 0, 29],
[177, 64, 0],
[93, 81, 60],
[2, 162, 172],
[205, 0, 134],
[255, 168, 135],
[225, 93, 0],
[125, 39, 165],
[187, 255, 126],
[2, 196, 237],
[234, 119, 255],
[240, 0, 182],
[115, 181, 0],
[255, 125, 125],
[67, 90, 26],
[242, 255, 69],
[185, 81, 255],
[255, 195, 130],
[32, 95, 35],
[215, 0, 153],
[197, 125, 0],
[46, 104, 0],
[72, 73, 155],
[177, 183, 0],
[149, 40, 81],
[255, 145, 88],
[164, 16, 58],
[215, 187, 255],
[119, 204, 255],
[198, 255, 237],
[255, 92, 65],
[197, 244, 255],
[0, 146, 22],
[118, 179, 255],
[255, 94, 144],
[208, 1, 182],
[28, 200, 0],
[0, 121, 97],
[167, 0, 111],
[25, 84, 143],
[2, 191, 98],
[175, 0, 127],
[48, 92, 57],
[119, 71, 31],
[255, 169, 186],
[2, 115, 247],
[111, 74, 50],
[255, 82, 41],
[41, 94, 11],
[42, 155, 255],
[235, 52, 0],
[243, 167, 0],
[255, 96, 134],
[61, 255, 216],
[220, 255, 177],
[3, 162, 206],
[183, 0, 90],
[255, 237, 208],
[86, 153, 0],
[207, 255, 220],
[255, 194, 229],
[255, 93, 34],
[3, 95, 57],
[0, 160, 99],
[1, 89, 165],
[167, 128, 0],
[1, 215, 245],
[167, 255, 97],
[187, 0, 77],
[173, 0, 32],
[0, 101, 130],
[58, 90, 66],
[255, 102, 112],
[0, 120, 89],
[240, 182, 255],
[125, 90, 0],
[216, 210, 255],
[244, 0, 78],
[88, 85, 18],
[228, 181, 0],
[169, 207, 0],
[24, 134, 0],
[217, 255, 255],
[216, 255, 147],
[133, 55, 93],
[205, 90, 255],
[255, 119, 97],
[255, 227, 164],
[50, 129, 0],
[1, 138, 243],
[0, 134, 68],
[98, 255, 245],
[255, 94, 158],
[186, 204, 255],
[0, 191, 163],
[1, 141, 207],
[2, 228, 103],
[255, 208, 171],
[207, 78, 0],
[0, 147, 86],
[217, 32, 0],
[194, 0, 50],
[0, 122, 68],
[255, 235, 48],
[183, 28, 217],
[193, 167, 0],
[250, 0, 200],
[154, 36, 64],
[126, 58, 107],
[103, 127, 0],
[210, 106, 0],
[220, 0, 49],
[0, 107, 143],
[255, 181, 242],
[166, 255, 183],
[95, 66, 149],
[0, 210, 151],
[1, 217, 81],
[255, 238, 184],
[253, 255, 0],
[201, 0, 75],
[0, 170, 49],
[255, 215, 209],
[94, 61, 168],
[117, 54, 151],
[91, 83, 37],
[190, 1, 209],
[216, 241, 0],
[243, 230, 255],
[233, 255, 193],
[169, 141, 0],
[80, 96, 0],
[0, 101, 34],
])
| pysc2-master | pysc2/lib/colors.py |
# Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SC2 replays -> ResponseObservation proto streams."""
import io
import json
import time
from absl import logging
import mpyq
from pysc2 import run_configs
from s2clientprotocol import sc2api_pb2 as sc_pb
def _get_replay_version(replay_data):
replay_io = io.BytesIO()
replay_io.write(replay_data)
replay_io.seek(0)
archive = mpyq.MPQArchive(replay_io).extract()
metadata = json.loads(
bytes.decode(archive[b"replay.gamemetadata.json"], "utf-8"))
return run_configs.lib.Version(
game_version=".".join(metadata["GameVersion"].split(".")[:-1]),
build_version=int(metadata["BaseBuild"][4:]),
data_version=metadata.get("DataVersion"), # Only in replays version 4.1+.
binary=None)
class ReplayError(Exception):
pass
class ReplayObservationStream(object):
"""Wrapper class for iterating over replay observation data.
Yields the observations for a replay from player_id's perspective. The class
can throw connection errors from the controller. These should be caught
at the top level, since they are unrecoverable (the controller is unusable).
Determines the replay version from the protocol information in the replay
file. Uses a cache of processes, one for each binary version required
to process corresponding replays.
How to use the class:
with ReplayObservationStream() as replay_stream:
replay_stream.start_replay(replay_data, player_id)
# Get game data if needed.
info = replay_stream.game_info()
for observation in replay_stream.observations():
# Do something with each observation.
"""
def __init__(self,
interface_options: sc_pb.InterfaceOptions,
step_mul: int = 1,
disable_fog: bool = False,
game_steps_per_episode: int = 0,
add_opponent_observations: bool = False):
"""Constructs the replay stream object.
Args:
interface_options: Interface format to use.
step_mul: Number of skipped observations in between environment steps.
disable_fog: Bool, True to disable fog of war.
game_steps_per_episode: Int, truncate after this many steps (0 for inf.).
add_opponent_observations: Bool, True to return the opponent's
observations in addition to the observing player. Note that this will
start two SC2 processes simultaneously if set to True. By default is
False and returns observations from one player's perspective.
"""
self._step_mul = step_mul
self._disable_fog = disable_fog
self._game_steps_per_episode = game_steps_per_episode
self._add_opponent_observations = add_opponent_observations
self._packet_count = 0
self._info = None
self._player_id = None
if not interface_options:
raise ValueError("Please specify interface_options")
self._interface = interface_options
self._want_rgb = self._interface.HasField("render")
self._run_config = None
self._sc2_procs = []
self._controllers = []
def _get_controllers(self, version):
"""Get controllers."""
if not self._run_config or self._run_config.version != version:
# Close current process and create a new one.
self._close()
self._run_config = run_configs.get(version=version)
self._sc2_procs = [self._run_config.start(want_rgb=self._want_rgb)]
if self._add_opponent_observations:
self._sc2_procs.append(self._run_config.start(want_rgb=self._want_rgb))
self._controllers = [
proc.controller for proc in self._sc2_procs
]
return self._controllers
def _close(self):
self._run_config = None
for controller in self._controllers:
if controller:
controller.quit()
self._controllers = []
for proc in self._sc2_procs:
if proc:
proc.close()
self._sc2_procs = []
def start_replay_from_data(self, replay_data, player_id):
"""Starts the stream of replay observations from an in-memory replay."""
self._player_id = player_id
try:
version = _get_replay_version(replay_data)
except (ValueError, AttributeError) as err:
logging.exception("Error getting replay version from data: %s", err)
raise ReplayError(err)
if self._add_opponent_observations:
player_ids = [player_id, (player_id % 2) + 1]
else:
player_ids = [player_id]
start_requests = []
for p_id in player_ids:
start_requests.append(
sc_pb.RequestStartReplay(
replay_data=replay_data,
options=self._interface,
disable_fog=self._disable_fog,
observed_player_id=p_id))
logging.info("Starting replay...")
self._controllers = self._get_controllers(version)
self._info = info = self._controllers[0].replay_info(replay_data)
logging.info(" Replay info ".center(60, "-"))
logging.info(info)
logging.info("-" * 60)
if (info.local_map_path and
info.local_map_path.lower().endswith(".sc2map")):
logging.info("Map path: %s", info.local_map_path)
for start_replay in start_requests:
start_replay.map_data = self._run_config.map_data(info.local_map_path)
for controller, start_replay in zip(self._controllers, start_requests):
controller.start_replay(start_replay)
logging.info("Getting started...")
def replay_info(self):
return self._info
def game_info(self):
return self._controllers[0].game_info()
def static_data(self):
return self._controllers[0].data()
def observations(self, step_sequence=None):
"""Yields a ResponseObservation proto for each environment step.
If using the opponent's observations, this will yield a list of
observations, one for each player.
Args:
step_sequence: A list of integers, the step sizes to apply to the stream.
"""
self._packet_count = 0
period_start = time.time()
period = 1000 # log packet rate every 1000 packets
logging.info("Begin iterating over frames...")
while True:
obs = [controller.observe() for controller in self._controllers]
if self._packet_count == 0:
logging.info("The first packet has been read")
self._packet_count += 1
if len(obs) == 1:
yield obs[0]
else:
yield obs
if (obs[0].player_result or
(step_sequence and self._packet_count > len(step_sequence))):
# End of game.
break
if self._game_steps_per_episode > 0:
if obs[0].observation.game_loop >= self._game_steps_per_episode - 1:
break
for controller in self._controllers:
if step_sequence and self._packet_count <= len(step_sequence):
step_mul = step_sequence[self._packet_count - 1]
else:
step_mul = self._step_mul
controller.step(step_mul)
if self._packet_count % period == 0:
time_taken = time.time() - period_start
period_start = time.time()
logging.info(
"Frame: %d, packets per sec: %.1f",
obs[0].observation.game_loop, period / time_taken)
def close(self):
"""Close the replay process connection."""
logging.info("Quitting...")
self._close()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
if exception_value:
logging.error("[%s]: %s", exception_type, exception_value)
self.close()
| pysc2-master | pysc2/lib/replay/replay_observation_stream.py |
# Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
from pysc2.lib.replay import sc2_replay
from pysc2.lib.replay import sc2_replay_utils
from pysc2.lib import gfile
from pysc2.lib import resources
FLAGS = flags.FLAGS
PATH = "pysc2/lib/replay/test_data"
def _read_replay(name):
replay_path = resources.GetResourceFilename(os.path.join(PATH, name))
with gfile.Open(replay_path, mode="rb") as f:
replay_data = f.read()
return sc2_replay.SC2Replay(replay_data)
def _read_skips(name):
skips_path = resources.GetResourceFilename(os.path.join(PATH, name))
with gfile.Open(skips_path, mode="r") as f:
return [int(i) for i in f.readlines()[0].split(",")]
class Sc2ReplayUtilsTest(parameterized.TestCase):
@parameterized.parameters(
((f"replay_0{i}.SC2Replay", f"replay_0{i}.skips.txt")
for i in range(1, 10)))
def test_raw_action_skips(self, replay_name, skips_file):
replay = _read_replay(replay_name)
skips = _read_skips(skips_file)
result = sc2_replay_utils.raw_action_skips(replay)
self.assertEqual(result[1], skips)
if __name__ == "__main__":
absltest.main()
| pysc2-master | pysc2/lib/replay/sc2_replay_utils_test.py |
# Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| pysc2-master | pysc2/lib/replay/__init__.py |
# Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities built on top of sc2_replay."""
import collections
import dataclasses
from typing import List, Mapping
from pysc2.lib.replay import sc2_replay
_EVENT_TYPES_TO_FILTER_OUT = frozenset([
# Not related to actions.
"SetSyncLoadingTime",
"SetSyncPlayingTime",
"TriggerSoundLengthSync",
"UserFinishedLoadingSync",
"UserOptions",
# Always accompanied by a CommandManagerState, which we track.
"CmdUpdateTargetPoint",
# Of interest for the visual interface, but skipped for now as we are
# targeting raw.
"CameraSave",
"ControlGroupUpdate",
"SelectionDelta",
])
def _readable_event_type(full_event_type):
return full_event_type[len("NNet.Game.S"):-5]
@dataclasses.dataclass
class EventData:
game_loop: int
event_type: str
def raw_action_skips(replay: sc2_replay.SC2Replay) -> Mapping[int, List[int]]:
"""Returns player id -> list, the game loops on which each player acted.
Args:
replay: An sc2_replay.SC2Replay instance.
Note that these skips are specific to the raw interface - further work will
be needed to support visual.
"""
action_frames = collections.defaultdict(list)
last_game_loop = None
# Extract per-user events of interest.
for event in replay.game_events():
event_type = _readable_event_type(event["_event"])
if event_type not in _EVENT_TYPES_TO_FILTER_OUT:
game_loop = event["_gameloop"]
last_game_loop = game_loop
# As soon as anyone leaves, we stop tracking events.
if event_type == "GameUserLeave":
break
user_id = event["_userid"]["m_userId"]
player_id = user_id + 1
if player_id < 1 or player_id > 2:
raise ValueError(f"Unexpected player_id: {player_id}")
if (action_frames[player_id] and
action_frames[player_id][-1].game_loop == game_loop):
# Later (non-camera) events on the same game loop take priority.
if event_type != "CameraUpdate":
action_frames[player_id][-1].event_type = event_type
else:
action_frames[player_id].append(EventData(game_loop, event_type))
for player_id in action_frames:
# Filter out repeated camera updates.
filtered = []
for v in action_frames[player_id]:
if (v.event_type == "CameraUpdate" and filtered and
filtered[-1].event_type == "CameraUpdate"):
filtered[-1].game_loop = v.game_loop
else:
filtered.append(v)
# If the last update is a camera move, remove it (only camera moves with a
# raw action following them should be added).
if filtered and filtered[-1].event_type == "CameraUpdate":
filtered.pop()
# Extract game loops.
action_frames[player_id] = [v.game_loop for v in filtered]
if not action_frames[player_id] or (action_frames[player_id][-1] !=
last_game_loop):
action_frames[player_id].append(last_game_loop)
return action_frames
| pysc2-master | pysc2/lib/replay/sc2_replay_utils.py |
# Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SC2 replay data -> converted observations."""
import collections
from typing import Any, Dict, Iterable, Sequence
import numpy as np
from pysc2.env.converter import converter as converter_lib
from pysc2.env.converter import derive_interface_options
from pysc2.env.converter.proto import converter_pb2
from pysc2.lib.replay import replay_observation_stream
from pysc2.lib.replay import sc2_replay
from pysc2.lib.replay import sc2_replay_utils
import tree
from s2clientprotocol import sc2api_pb2
def _unconverted_observation(observation, actions):
return converter_pb2.Observation(
player=observation[0],
opponent=observation[1],
force_action=sc2api_pb2.RequestAction(
actions=actions
),
# This will be populated when we look ahead.
force_action_delay=0,
)
def get_flat_action(obs: Dict[str, Any]) -> Dict[str, Any]:
"""Extracts action, with components starting action/, from an observation."""
result = {
k[len('action/'):]: v for k, v in obs.items() if k.startswith('action/')
}
if not result:
raise ValueError(f'Failed to parse action from observation: {obs}')
return result
def _convert_observation(converter, player_observation, force_action_delay,
force_action_fn):
"""Convert a raw observation proto and set action delay."""
player_observation.force_action_delay = force_action_delay
converted_observation = converter.convert_observation(player_observation)
converter.convert_action(force_action_fn(converted_observation))
def _squeeze_if_necessary(x):
if x.shape == (1,):
return np.squeeze(x)
return x
return tree.map_structure(_squeeze_if_necessary, converted_observation)
def converted_observations(observations_iterator, converter, accept_step_fn,
force_action_fn=get_flat_action):
"""Generator of transformed observations (incl. action and time delay)."""
current_observation = next(observations_iterator)
current_step = current_observation[0].observation.game_loop
assert current_step == 0
player_obs_queue = collections.deque()
for next_observation in observations_iterator:
step = next_observation[0].observation.game_loop
if (step == 0 or (current_step > 0 and not accept_step_fn(step - 1))):
# Save the observation even if it didn't have any actions. The step
# stream also yields the observations immediately before the actions
# are reported to capture the time the player actually issued the
# action. If actions were reported at time steps t1 and t2
# subsequently, we need to yield observation at step t2-1 instead of
# t1 (this is also what is recorded in the action skips dataset).
current_observation = next_observation
continue
player_obs_queue.append(_unconverted_observation(
observation=current_observation,
actions=next_observation[0].actions))
while len(player_obs_queue) >= 2:
# We have saved at least 2 observations in the queue, we can now
# correctly calculate the true action delay.
player_obs = player_obs_queue.popleft()
player_obs_next = player_obs_queue[0]
converted_observation = _convert_observation(
converter,
player_obs,
force_action_delay=(player_obs_next.player.observation.game_loop -
player_obs.player.observation.game_loop),
force_action_fn=force_action_fn)
yield converted_observation
current_step = step
current_observation = next_observation
# Always use last observation, it contains the player result.
player_obs_queue.append(_unconverted_observation(
observation=current_observation,
actions=current_observation[0].actions))
previous_delay = 1
while player_obs_queue:
player_obs = player_obs_queue.popleft()
if len(player_obs_queue) >= 1:
player_obs_next = player_obs_queue[0]
force_action_delay = (player_obs_next.player.observation.game_loop -
player_obs.player.observation.game_loop)
else:
# Use previous force action delay, this is only done in the last step.
# Preserve for reproducibility. In theory the actual delay value
# shouldn't matter if we retrain checkpoints, since the actions from
# the last step are never taken.
force_action_delay = previous_delay
converted_observation = _convert_observation(
converter,
player_obs,
force_action_delay=force_action_delay,
force_action_fn=force_action_fn)
previous_delay = force_action_delay
yield converted_observation
def converted_observation_stream(
replay_data: bytes,
player_id: int,
converter_settings: converter_pb2.ConverterSettings,
disable_fog: bool = False,
max_steps: int = int(1e6)):
"""Generator of transformed observations (incl. action and time delay)."""
with replay_observation_stream.ReplayObservationStream(
step_mul=1,
game_steps_per_episode=max_steps,
add_opponent_observations=True,
interface_options=derive_interface_options.from_settings(
converter_settings),
disable_fog=disable_fog,
) as replay_stream:
replay_stream.start_replay_from_data(replay_data, player_id=player_id)
obs_converter = converter_lib.Converter(
converter_settings,
environment_info=converter_pb2.EnvironmentInfo(
game_info=replay_stream.game_info(),
replay_info=replay_stream.replay_info()))
replay_file = sc2_replay.SC2Replay(replay_data)
action_skips = sc2_replay_utils.raw_action_skips(replay_file)
player_action_skips = action_skips[player_id]
step_sequence = get_step_sequence(player_action_skips)
observations_iterator = replay_stream.observations(
step_sequence=step_sequence)
def _accept_step_fn(step):
return step in player_action_skips
yield from converted_observations(observations_iterator, obs_converter,
_accept_step_fn)
# Current step sequence will yield observations right before
# the last camera move in a contiguous sequence of camera moves. Consider
# whether we want to change the observation at which the camera action is being
# reported.
def get_step_sequence(action_skips: Iterable[int]) -> Sequence[int]:
"""Generates a sequence of step muls for the replay stream.
In SC2 we train on observations with actions but actions in replays are
reported in frames after they were taken. We need a step sequence so we can
advance the SC2 environment to the relevant observations before the action was
taken and then step again with delta=1 to get the actual action on the next
frame. A step sequence is key from a performance point of view since at the
steps where no actions were taken we do not really need to render which is the
expensive part of processing a replay. We can advance the simulation without
rendering at a relatively low cost.
An example stream looks like this:
(obs_{0},)------(obs_{k-1},)---(obs_{k}, a_{k-1})---(obs_{k+1}, a_{k})...
The first observation where an action was taken is `obs_{k-1}`, but the replay
will not report the action until we request the next observation `obs_{k}`.
In the above case we also have an action taken at timestep k, but it will be
reported when we request `obs_{k+1}`. A step sequence would allow us to
get a union of the observations that we want to report for training and
those that have actions in them. An example step sequence for the above stream
would be `[k-1, 1, 1]` where we first step k-1 times to get to the first
observation where an action was taken, then step once to get the actual action
as it is reported late.
Args:
action_skips: A sequence of game loops where actions were taken in the
replay. This contains the game loops of the observations that happened
before the action was reported by the replay to align it with the time
step when the player took the action (replays report past actions). Note
that the provided action skips sequence is assumed to have already been
processed to include only relevant frames depending on the action types of
interest (e.g., with or without camera moves).
Returns:
A sequence of step_muls to use in the replay stream.
"""
prev_game_loop = 0
steps = []
for current_game_loop in action_skips:
if prev_game_loop == 0:
steps.append(current_game_loop)
elif current_game_loop - prev_game_loop > 1:
# We need to yield twice: to get the observation immediately before the
# action (this is the game loop number we stored in the index), and to
# get the replay observation that will return the actual actions. This
# is needed because replays return actions that humans have taken on
# previous frames.
steps.append(1)
steps.append(current_game_loop - prev_game_loop - 1)
elif current_game_loop - prev_game_loop == 1:
# Both previous and current observations had actions, step 1.
steps.append(1)
prev_game_loop = current_game_loop
return steps
| pysc2-master | pysc2/lib/replay/replay_converter.py |
# Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import flags
from absl.testing import absltest
from pysc2.lib.replay import sc2_replay
from pysc2.lib import gfile
from pysc2.lib import resources
FLAGS = flags.FLAGS
PATH = "pysc2/lib/replay/test_data/replay_01.SC2Replay"
class Sc2ReplayTest(absltest.TestCase):
def setUp(self):
super(Sc2ReplayTest, self).setUp()
replay_path = resources.GetResourceFilename(PATH)
with gfile.Open(replay_path, mode="rb") as f:
replay_data = f.read()
self._replay = sc2_replay.SC2Replay(replay_data)
def testDetails(self):
replay_details = self._replay.details()
self.assertEmpty(replay_details["m_cacheHandles"])
self.assertEqual(replay_details["m_campaignIndex"], 0)
self.assertEqual(replay_details["m_defaultDifficulty"], 3)
self.assertEqual(replay_details["m_description"], "")
self.assertEqual(replay_details["m_difficulty"], "")
self.assertFalse(replay_details["m_disableRecoverGame"])
self.assertEqual(replay_details["m_gameSpeed"], 4)
self.assertEqual(replay_details["m_imageFilePath"], "")
self.assertFalse(replay_details["m_isBlizzardMap"])
self.assertEqual(
replay_details["m_mapFileName"],
"Ladder2019Season1May/CyberForestLE.SC2Map")
self.assertFalse(replay_details["m_miniSave"])
self.assertEqual(
replay_details["m_modPaths"], [
"Mods/Liberty.SC2Mod",
"Mods/Swarm.SC2Mod",
"Mods/Void.SC2Mod",
"Mods/VoidMulti.SC2Mod"
])
# (there is more data here, just listing the most interesting bits)
self.assertEqual(replay_details["m_playerList"][0]["m_name"], "Supervised")
self.assertFalse(replay_details["m_playerList"][0]["m_observe"])
self.assertEqual(replay_details["m_playerList"][0]["m_race"], "Protoss")
self.assertEqual(replay_details["m_playerList"][0]["m_result"], 2)
self.assertEqual(replay_details["m_playerList"][1]["m_name"],
"temp_x1_5_beast3f_6571236_final")
self.assertFalse(replay_details["m_playerList"][1]["m_observe"])
self.assertEqual(replay_details["m_playerList"][1]["m_race"], "Protoss")
self.assertEqual(replay_details["m_playerList"][1]["m_result"], 1)
self.assertFalse(replay_details["m_restartAsTransitionMap"])
self.assertEqual(replay_details["m_thumbnail"]["m_file"], "Minimap.tga")
self.assertEqual(replay_details["m_timeLocalOffset"], 0)
self.assertEqual(replay_details["m_timeUTC"], 132772394814660570)
self.assertEqual(replay_details["m_title"], "Cyber Forest LE")
def testInitData(self):
init_data = self._replay.init_data()
# (there is more data here, just listing the most interesting bits)
game_description = init_data["m_syncLobbyState"]["m_gameDescription"]
self.assertEqual(game_description["m_gameOptions"]["m_fog"], 0)
self.assertEqual(game_description["m_gameSpeed"], 4)
self.assertEqual(game_description["m_isBlizzardMap"], False)
self.assertEqual(game_description["m_isRealtimeMode"], False)
self.assertEqual(
game_description["m_mapFileName"],
"Ladder2019Season1May/CyberForestLE.SC2Map")
def testTrackerEvents(self):
events = list(self._replay.tracker_events())
event_types = set(s["_event"] for s in events)
self.assertEqual(
event_types,
{"NNet.Replay.Tracker.SPlayerSetupEvent",
"NNet.Replay.Tracker.SPlayerStatsEvent",
"NNet.Replay.Tracker.SUnitBornEvent",
"NNet.Replay.Tracker.SUnitDiedEvent",
"NNet.Replay.Tracker.SUnitDoneEvent",
"NNet.Replay.Tracker.SUnitInitEvent",
"NNet.Replay.Tracker.SUnitPositionsEvent",
"NNet.Replay.Tracker.SUnitTypeChangeEvent",
"NNet.Replay.Tracker.SUpgradeEvent"})
def testGameEvents(self):
events = list(self._replay.game_events())
event_types = set(s["_event"] for s in events)
self.assertEqual(
event_types,
{"NNet.Game.SCameraUpdateEvent",
"NNet.Game.SCmdEvent",
"NNet.Game.SCmdUpdateTargetPointEvent",
"NNet.Game.SCmdUpdateTargetUnitEvent",
"NNet.Game.SCommandManagerStateEvent",
"NNet.Game.SPeerSetSyncLoadingTimeEvent",
"NNet.Game.SPeerSetSyncPlayingTimeEvent",
"NNet.Game.SSelectionDeltaEvent",
"NNet.Game.SUserFinishedLoadingSyncEvent",
"NNet.Game.SUserOptionsEvent"})
def testMessageEvents(self):
events = list(self._replay.message_events())
event_types = set(s["_event"] for s in events)
self.assertEqual(
event_types,
{"NNet.Game.SLoadingProgressMessage"})
def testAttributesEvents(self):
events = list(self._replay.attributes_events())
self.assertEmpty(events)
if __name__ == "__main__":
absltest.main()
| pysc2-master | pysc2/lib/replay/sc2_replay_test.py |
# Copyright 2021 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for loading replay data using the s2protocol library."""
import io
import json
import types
import mpyq
from s2protocol import versions as s2versions
import tree
def _convert_to_str(s):
if isinstance(s, bytes):
return bytes.decode(s)
else:
return s
def _convert_all_to_str(structure):
if isinstance(structure, types.GeneratorType):
return tree.map_structure(_convert_to_str, list(structure))
else:
return tree.map_structure(_convert_to_str, structure)
class SC2Replay(object):
"""Helper class for loading and extracting data using s2protocol library."""
def __init__(self, replay_data):
"""Construct SC2Replay helper for extracting data from a replay."""
(self._header, self._metadata, self._extracted,
self._protocol) = _extract(replay_data)
def details(self):
details_key = b"replay.details"
if details_key not in self._extracted:
details_key = b"replay.details.backup"
return _convert_all_to_str(
self._protocol.decode_replay_details(self._extracted[details_key]))
def init_data(self):
return _convert_all_to_str(
self._protocol.decode_replay_initdata(
self._extracted[b"replay.initData"]))
def tracker_events(self, filter_fn=None):
"""Yield tracker events from the replay in s2protocol format."""
for event in _convert_all_to_str(
self._protocol.decode_replay_tracker_events(
self._extracted[b"replay.tracker.events"])):
if not filter_fn or filter_fn(event):
yield event
def game_events(self, filter_fn=None):
"""Yield game events from the replay in s2protocol format."""
for event in _convert_all_to_str(
self._protocol.decode_replay_game_events(
self._extracted[b"replay.game.events"])):
if not filter_fn or filter_fn(event):
yield event
def message_events(self, filter_fn=None):
"""Yield message events from the replay in s2protocol format."""
for event in _convert_all_to_str(
self._protocol.decode_replay_message_events(
self._extracted[b"replay.message.events"])):
if not filter_fn or filter_fn(event):
yield event
def attributes_events(self, filter_fn=None):
"""Yield attribute events from the replay in s2protocol format."""
for event in _convert_all_to_str(
self._protocol.decode_replay_attributes_events(
self._extracted[b"replay.attributes.events"])):
if not filter_fn or filter_fn(event):
yield event
@property
def metadata(self):
return self._metadata
@property
def protocol(self):
return self._protocol
def _extract(contents):
"""Extract a replay using s2protocol."""
replay_io = io.BytesIO()
replay_io.write(contents)
replay_io.seek(0)
archive = mpyq.MPQArchive(replay_io)
extracted = archive.extract()
metadata = json.loads(
bytes.decode(extracted[b"replay.gamemetadata.json"], "utf-8"))
contents = archive.header["user_data_header"]["content"]
header = s2versions.latest().decode_replay_header(contents)
base_build = header["m_version"]["m_baseBuild"]
protocol = s2versions.build(base_build)
if protocol is None:
raise ValueError("Could not load protocol {} for replay".format(base_build))
return header, metadata, extracted, protocol
| pysc2-master | pysc2/lib/replay/sc2_replay.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config file for monatomic water in the cubic or hexagonal ice phases."""
from flows_for_atomic_solids.experiments import utils
from flows_for_atomic_solids.models import attention
from flows_for_atomic_solids.models import coupling_flows
from flows_for_atomic_solids.models import particle_model_constructors
from flows_for_atomic_solids.models import particle_models
from flows_for_atomic_solids.systems.monatomic_water import MonatomicWaterEnergy
import jax.numpy as jnp
from ml_collections import config_dict
# Density, temperature and system shapes and sizes below chosen for comparison
# with the paper by Quigley (https://doi.org/10.1063/1.4896376, see Table 1).
BOLTZMANN_CONSTANT = 0.0019872067 # in units of kcal/mol K
QUIGLEY_DENSITY = 0.033567184 # inverse cubic Angstrom
QUIGLEY_TEMPERATURE = 200. # Kelvin
BOX_FUNS = {
'hex': utils.get_hexagonal_box_lengths,
'cubic': utils.get_cubic_box_lengths,
}
LATTICE_MODELS = {
'hex': particle_models.HexagonalIceLattice,
'cubic': particle_models.DiamondCubicLattice,
}
FREQUENCIES = {
8: 8,
64: 8,
216: 16,
512: 24,
}
def get_config(num_particles: int, lattice: str) -> config_dict.ConfigDict:
"""Returns the config."""
box_fun = BOX_FUNS[lattice]
lattice_model = LATTICE_MODELS[lattice]
box_lengths = box_fun(num_particles, density=QUIGLEY_DENSITY, dim=3)
num_frequencies = FREQUENCIES[num_particles]
config = config_dict.ConfigDict()
config.state = dict(
num_particles=num_particles,
beta=1./(QUIGLEY_TEMPERATURE * BOLTZMANN_CONSTANT),
lower=-box_lengths/2.,
upper=box_lengths/2.,
)
conditioner = dict(
constructor=coupling_flows.make_equivariant_conditioner,
kwargs=dict(
embedding_size=256,
num_frequencies=num_frequencies,
conditioner_constructor=attention.Transformer,
conditioner_kwargs=dict(
num_heads=2,
num_layers=2,
dropout_rate=0.,
use_layernorm=False,
w_init_final=jnp.zeros))
)
translation_invariant = True
config.model = dict(
constructor=particle_model_constructors.make_particle_model,
kwargs=dict(
bijector=dict(
constructor=coupling_flows.make_split_coupling_flow,
kwargs=dict(
num_layers=24,
num_bins=16,
conditioner=conditioner,
permute_variables=True,
split_axis=-1,
use_circular_shift=True,
prng=42,
),
),
base=dict(
constructor=lattice_model,
kwargs=dict(
noise_scale=0.2,
remove_corner=translation_invariant,
),
),
translation_invariant=translation_invariant,
),
)
shared_kwargs = dict(box_length=box_lengths)
config.train_energy = dict(
constructor=MonatomicWaterEnergy,
kwargs=dict(min_distance=0.01, linearize_below=1.2, **shared_kwargs)
)
config.test_energy = dict(
constructor=MonatomicWaterEnergy,
kwargs=dict(**shared_kwargs)
)
config.train = dict(
batch_size=128,
learning_rate=7e-5,
learning_rate_decay_steps=[250000, 500000],
learning_rate_decay_factor=0.1,
seed=42,
max_gradient_norm=10000.,
)
config.test = dict(
test_every=500,
batch_size=2048,
)
return config
| flows_for_atomic_solids-main | experiments/monatomic_water_config.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| flows_for_atomic_solids-main | experiments/__init__.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for running experiments."""
from typing import Callable, Optional, Sequence, Union
import chex
import jax.numpy as jnp
import numpy as np
Array = chex.Array
Numeric = Union[Array, int, float]
def get_lr_schedule(base_lr: float,
lr_decay_steps: Sequence[int],
lr_decay_factor: float) -> Callable[[Numeric], Numeric]:
"""Returns a callable that defines the learning rate for a given step."""
if not lr_decay_steps:
return lambda _: base_lr
lr_decay_steps = jnp.array(lr_decay_steps)
if not jnp.all(lr_decay_steps[1:] > lr_decay_steps[:-1]):
raise ValueError('Expected learning rate decay steps to be increasing, got '
f'{lr_decay_steps}.')
def lr_schedule(update_step: Numeric) -> Array:
i = jnp.sum(lr_decay_steps <= update_step)
return base_lr * lr_decay_factor**i
return lr_schedule
def get_orthorhombic_box_lengths(
num_particles: int, density: float, dim: int, shape_factor: Array,
repeats: Optional[Array]) -> Array:
"""Returns edge lengths of an orthorhombic box."""
assert dim == len(shape_factor)
vol = num_particles / density
if repeats is None:
repeats = np.ones(dim, dtype=int)
base = (vol / np.prod(shape_factor * repeats)) ** (1./dim)
return base * shape_factor * repeats
def get_hexagonal_box_lengths(
num_particles: int, density: float, dim: int,
repeats: Optional[Array] = None) -> Array:
"""Returns edge lengths of an orthorhombic box for Ih packing."""
shape_factor = np.array([1.0, np.sqrt(3), np.sqrt(8/3)])
return get_orthorhombic_box_lengths(
num_particles, density, dim, shape_factor, repeats)
def get_cubic_box_lengths(
num_particles: int, density: float, dim: int) -> Array:
"""Returns the edge lengths of a cubic simulation box."""
edge_length = (num_particles / density) ** (1./dim)
return np.full([dim], edge_length)
| flows_for_atomic_solids-main | experiments/utils.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config file for a Lennard-Jones system in the solid FCC phase."""
from flows_for_atomic_solids.experiments import utils
from flows_for_atomic_solids.models import attention
from flows_for_atomic_solids.models import coupling_flows
from flows_for_atomic_solids.models import particle_model_constructors
from flows_for_atomic_solids.models import particle_models
from flows_for_atomic_solids.systems.lennard_jones import LennardJonesEnergy
import jax.numpy as jnp
from ml_collections import config_dict
FREQUENCIES = {
32: 8,
256: 16,
500: 32,
}
def get_config(num_particles: int) -> config_dict.ConfigDict:
"""Returns the config."""
box_lengths = utils.get_cubic_box_lengths(num_particles,
density=1.28,
dim=3)
num_frequencies = FREQUENCIES[num_particles]
config = config_dict.ConfigDict()
config.state = dict(
num_particles=num_particles,
beta=0.5,
lower=-box_lengths/2.,
upper=box_lengths/2.,
)
conditioner = dict(
constructor=coupling_flows.make_equivariant_conditioner,
kwargs=dict(
embedding_size=256,
num_frequencies=num_frequencies,
conditioner_constructor=attention.Transformer,
conditioner_kwargs=dict(
num_heads=2,
num_layers=2,
dropout_rate=0.,
use_layernorm=False,
w_init_final=jnp.zeros))
)
translation_invariant = True
config.model = dict(
constructor=particle_model_constructors.make_particle_model,
kwargs=dict(
bijector=dict(
constructor=coupling_flows.make_split_coupling_flow,
kwargs=dict(
num_layers=24,
num_bins=16,
conditioner=conditioner,
permute_variables=True,
split_axis=-1,
use_circular_shift=True,
prng=42,
),
),
base=dict(
constructor=particle_models.FaceCentredCubicLattice,
kwargs=dict(
noise_scale=0.01,
remove_corner=translation_invariant,
),
),
translation_invariant=translation_invariant,
),
)
shared_kwargs = dict(
cutoff=2.7,
box_length=box_lengths,
lambda_lj=1.,
shift_energy=True)
config.train_energy = dict(
constructor=LennardJonesEnergy,
kwargs=dict(min_distance=0.01, linearize_below=0.8, **shared_kwargs)
)
config.test_energy = dict(
constructor=LennardJonesEnergy,
kwargs=dict(min_distance=0., **shared_kwargs)
)
config.train = dict(
batch_size=128,
learning_rate=7e-5,
learning_rate_decay_steps=[250000, 500000],
learning_rate_decay_factor=0.1,
seed=42,
max_gradient_norm=10000.,
)
config.test = dict(
test_every=500,
batch_size=2048,
)
return config
| flows_for_atomic_solids-main | experiments/lennard_jones_config.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Energy-based training of a flow model on an atomistic system."""
from typing import Callable, Dict, Tuple, Union
from absl import app
from absl import flags
import chex
import distrax
from flows_for_atomic_solids.experiments import lennard_jones_config
from flows_for_atomic_solids.experiments import monatomic_water_config
from flows_for_atomic_solids.experiments import utils
from flows_for_atomic_solids.utils import observable_utils as obs_utils
import haiku as hk
import jax
import jax.numpy as jnp
import optax
Array = chex.Array
Numeric = Union[Array, float]
flags.DEFINE_enum('system', 'mw_cubic_64',
['mw_cubic_8', 'mw_cubic_64', 'mw_cubic_216', 'mw_cubic_512',
'mw_hex_64', 'mw_hex_216', 'mw_hex_512',
'lj_32', 'lj_256', 'lj_500',
], 'System and number of particles to train.')
flags.DEFINE_integer('num_iterations', int(10**6), 'Number of training steps.')
FLAGS = flags.FLAGS
def _num_particles(system: str) -> int:
return int(system.split('_')[-1])
def _get_loss(
model: distrax.Distribution,
energy_fn: Callable[[Array], Array],
beta: Numeric,
num_samples: int) -> Tuple[Array, Dict[str, Array]]:
"""Returns the loss and stats."""
rng_key = hk.next_rng_key()
samples, log_prob = model.sample_and_log_prob(
seed=rng_key, sample_shape=num_samples)
energies = energy_fn(samples)
energy_loss = jnp.mean(beta * energies + log_prob)
loss = energy_loss
stats = {
'energy': energies,
'model_log_prob': log_prob,
'target_log_prob': -beta * energies
}
return loss, stats
def main(_):
system = FLAGS.system
if system.startswith('lj'):
config = lennard_jones_config.get_config(_num_particles(system))
elif system.startswith('mw_cubic'):
config = monatomic_water_config.get_config(_num_particles(system), 'cubic')
elif system.startswith('mw_hex'):
config = monatomic_water_config.get_config(_num_particles(system), 'hex')
else:
raise KeyError(system)
state = config.state
energy_fn_train = config.train_energy.constructor(
**config.train_energy.kwargs)
energy_fn_test = config.test_energy.constructor(**config.test_energy.kwargs)
lr_schedule_fn = utils.get_lr_schedule(
config.train.learning_rate, config.train.learning_rate_decay_steps,
config.train.learning_rate_decay_factor)
optimizer = optax.chain(
optax.scale_by_adam(),
optax.scale_by_schedule(lr_schedule_fn),
optax.scale(-1))
if config.train.max_gradient_norm is not None:
optimizer = optax.chain(
optax.clip_by_global_norm(config.train.max_gradient_norm), optimizer)
def create_model():
return config.model['constructor'](
num_particles=state.num_particles,
lower=state.lower,
upper=state.upper,
**config.model['kwargs'])
def loss_fn():
"""Loss function for training."""
model = create_model()
loss, stats = _get_loss(
model=model,
energy_fn=energy_fn_train,
beta=state.beta,
num_samples=config.train.batch_size,
)
metrics = {
'loss': loss,
'energy': jnp.mean(stats['energy']),
'model_entropy': -jnp.mean(stats['model_log_prob']),
}
return loss, metrics
def eval_fn():
"""Evaluation function."""
model = create_model()
loss, stats = _get_loss(
model=model,
energy_fn=energy_fn_test,
beta=state.beta,
num_samples=config.test.batch_size,
)
log_probs = {
'model_log_probs': stats['model_log_prob'],
'target_log_probs': stats['target_log_prob'],
}
metrics = {
'loss': loss,
'energy': jnp.mean(stats['energy']),
'model_entropy': -jnp.mean(stats['model_log_prob']),
'ess': obs_utils.compute_ess(**log_probs),
'logz': obs_utils.compute_logz(**log_probs),
'logz_per_particle':
obs_utils.compute_logz(**log_probs) / state.num_particles,
}
return metrics
print(f'Initialising system {system}')
rng_key = jax.random.PRNGKey(config.train.seed)
init_fn, apply_fn = hk.transform(loss_fn)
_, apply_eval_fn = hk.transform(eval_fn)
rng_key, init_key = jax.random.split(rng_key)
params = init_fn(init_key)
opt_state = optimizer.init(params)
def _loss(params, rng):
loss, metrics = apply_fn(params, rng)
return loss, metrics
jitted_loss = jax.jit(jax.value_and_grad(_loss, has_aux=True))
jitted_eval = jax.jit(apply_eval_fn)
step = 0
print('Beginning of training.')
while step < FLAGS.num_iterations:
# Training update.
rng_key, loss_key = jax.random.split(rng_key)
(_, metrics), g = jitted_loss(params, loss_key)
if (step % 50) == 0:
print(f'Train[{step}]: {metrics}')
updates, opt_state = optimizer.update(g, opt_state, params)
params = optax.apply_updates(params, updates)
if (step % config.test.test_every) == 0:
rng_key, val_key = jax.random.split(rng_key)
metrics = jitted_eval(params, val_key)
print(f'Valid[{step}]: {metrics}')
step += 1
print('Done')
if __name__ == '__main__':
app.run(main)
| flows_for_atomic_solids-main | experiments/train.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to build lattice-based particle models."""
from typing import Tuple, Union
import chex
import numpy as np
Array = chex.Array
def gcd(values: Array, tol: float = 3e-2) -> Union[int, float]:
"""GCD of a list of numbers (possibly floats)."""
def _gcd2(a, b):
if np.abs(b) < tol:
return a
else:
return _gcd2(b, a % b)
x = values[0]
for v in values[1:]:
x = _gcd2(x, v)
return x
def make_simple_lattice(lower: Array,
upper: Array,
cell_aspect: Array,
n: int) -> Tuple[Array, Array, Array]:
"""Returns a shifted cubic lattice with atoms at the unit cell centres."""
dim = len(lower)
assert len(upper) == dim
assert len(cell_aspect) == dim
box_size = upper - lower
normalized_box = (upper - lower) / cell_aspect
integer_aspect = np.round(normalized_box / gcd(normalized_box)).astype(int)
num_per_dim = np.round((n/np.prod(integer_aspect)) ** (1/dim)).astype(int)
repeats = num_per_dim * integer_aspect
if np.prod(repeats) != n:
raise ValueError(f'The number of lattice points {n} does not '
f'match the box size {box_size} and cell aspect '
f'{cell_aspect}, got integer aspect {integer_aspect}, '
f'{repeats} repeats.')
points = [np.linspace(lower[i], upper[i], repeats[i], endpoint=False).T
for i in range(dim)]
xs = np.meshgrid(*points)
lattice = np.concatenate([x[..., None] for x in xs], axis=-1)
lattice = lattice.reshape(np.prod(repeats), dim)
lattice_constant = (upper - lower) / repeats
return lattice, lattice_constant, repeats
def make_lattice(lower: Array,
upper: Array,
cell_aspect: Array,
atom_positions_in_cell: Array,
n: int) -> Tuple[Array, Array, Array]:
"""An orthorhombic lattice of repeated unit cells.
Args:
lower: vector of lower limits of lattice box (number of elements
determines the dimensionality)
upper: vector of upper limits of lattice box (number of elements
must be the same as `lower`)
cell_aspect: relative lengths of unit cell edges. A cubic cell would have
`cell_aspect==[1, 1, 1]` for example. The box basis `lower - upper`,
divided by `cell_aspect`, should have low-integer length ratios.
atom_positions_in_cell: a n_u x dimensionality matrix with the fractional
positions of atoms within each unit cell. `n_u` will be the number
of atoms per unit cell.
n: number of atoms in lattice. Should be the product of the low-integer
length ratios of the aspect-normalized box, times some integer to the
power of the number of dimensions, times the number of atoms per
cell.
Returns:
A 3-tuple (lattice, lattice_constant, repeats):
lattice: n x dimension array of lattice sites.
lattice_constant: a vector of length equal to the dimensionality, with
the side lengths of the unit cell.
repeats: an integer vector of length equal to the dimensionality, with
the number of cells in each dimension. `repeats x lattice_constant`
equals `upper - lower`.
"""
num_cells = n // len(atom_positions_in_cell)
if num_cells * len(atom_positions_in_cell) != n:
raise ValueError(f'Number of particles {n} is not divisible by the number '
f'of particles per cell {len(atom_positions_in_cell)}')
base, lc, repeats = make_simple_lattice(lower, upper, cell_aspect, num_cells)
sites = atom_positions_in_cell * lc
lattice = base[..., None, :] + sites
lattice = lattice.reshape(-1, lattice.shape[-1])
return lattice, lc, repeats
| flows_for_atomic_solids-main | utils/lattice_utils.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for computing observables in atomistic systems."""
from typing import Optional, Tuple
import chex
import jax
import jax.numpy as jnp
import scipy.special
Array = chex.Array
def _volume_sphere(radius: Array, dim: int) -> Array:
"""Computes the volume of a Euclidean ball in `dim` dimensions."""
c = jnp.pi**(dim/2) / scipy.special.gamma(1 + dim/2) # Volume of unit sphere.
return c * radius**dim
def _check_same_dim(coordinates: Array, box_length: Array) -> None:
"""Check that `coordinates` and `box_length` have equal spatial dimension."""
if coordinates.ndim < 1:
raise ValueError('The coordinates cannot be a scalar.')
if box_length.ndim < 1:
raise ValueError('The box length cannot be a scalar.')
dim_coords = coordinates.shape[-1]
dim_box = box_length.shape[-1]
if dim_coords != dim_box:
raise ValueError(
f'The dimensionality of the coordinates (got {dim_coords}) must be '
f'equal to the dimensionality of the box (got {dim_box}).')
def _pairwise_difference(coordinates: Array) -> Array:
"""Computes pairwise difference vectors.
Args:
coordinates: array with shape [..., num_particles, dim] containing particle
coordinates.
Returns:
Array with shape [..., num_particles, num_particles, dim], difference
vectors for all pairs of particles.
"""
if coordinates.ndim < 2:
raise ValueError(
f'Expected at least 2 array dimensions, got {coordinates.ndim}.')
x = coordinates[..., :, None, :]
y = coordinates[..., None, :, :]
return x - y
def pairwise_difference_pbc(coordinates: Array, box_length: Array) -> Array:
"""Computes pairwise distance vectors obeying periodic boundary conditions.
Args:
coordinates: array with shape [..., num_particles, dim] containing particle
coordinates.
box_length: array with shape [..., dim], the edge lengths of the box.
Returns:
Array with shape [..., num_particles, num_particles, dim], the pairwise
distance vectors with respect to periodic boundary conditions.
"""
_check_same_dim(coordinates, box_length)
deltas = _pairwise_difference(coordinates)
chex.assert_is_broadcastable(box_length.shape[:-1], coordinates.shape[:-2])
box_length = box_length[..., None, None, :]
return deltas - box_length * jnp.round(deltas / box_length)
def squared_distance_pbc(coordinate_deltas: Array, box_length: Array) -> Array:
"""Computes squared distance obeying periodic boundary conditions.
Args:
coordinate_deltas: array with shape [..., dim] containing difference
vectors.
box_length: array with shape [..., dim], the edge lengths of the box.
Returns:
Array with shape [...], the squared distances with respect to periodic
boundary conditions.
"""
_check_same_dim(coordinate_deltas, box_length)
chex.assert_is_broadcastable(
box_length.shape[:-1], coordinate_deltas.shape[:-1])
coordinate_deltas_pbc = (coordinate_deltas - box_length *
jnp.round(coordinate_deltas / box_length))
return jnp.sum(coordinate_deltas_pbc**2, axis=-1)
def pairwise_squared_distance_pbc(coordinates: Array,
box_length: Array) -> Array:
"""Computes pairwise squared distance obeying periodic boundary conditions.
Args:
coordinates: array with shape [..., num_particles, dim] containing particle
coordinates.
box_length: array with shape [..., dim], the edge lengths of the box.
Returns:
Array with shape [..., num_particles, num_particles] with pairwise squared
distances.
"""
_check_same_dim(coordinates, box_length)
coordinate_deltas = _pairwise_difference(coordinates)
chex.assert_is_broadcastable(box_length.shape[:-1], coordinates.shape[:-2])
return squared_distance_pbc(coordinate_deltas, box_length[..., None, None, :])
def pairwise_distance_pbc(coordinates: Array, box_length: Array) -> Array:
"""Computes pairwise distance obeying periodic boundary conditions.
Args:
coordinates: array with shape [..., num_particles, dim] containing particle
coordinates.
box_length: array with shape [..., dim], the edge lengths of the box.
Returns:
Array of shape [..., num_particles, num_particles] with pairwise distances.
"""
return jnp.sqrt(pairwise_squared_distance_pbc(coordinates, box_length))
def radial_distribution_function(coordinates: Array,
box_length: Array,
num_bins: int = 300) -> Array:
"""Computes the radial distribution function.
The radial distribution function `g(r)`, also known as the pair correlation
function, quantifies the variation in density as a function of distance with
respect to a reference particle.
Args:
coordinates: array with shape [..., num_particles, dim] containing particle
coordinates.
box_length: array with shape [dim], the edge lengths of the simulation box.
num_bins: the number of bins (resolution) for computing the radial
distribution function.
Returns:
gr: array with shape [num_bins, 2] where the first column is the centre
of the bin, `r`, and the second the estimated function value `g(r)`.
"""
num_particles, dim = coordinates.shape[-2:]
if jnp.shape(box_length) != (dim,):
raise ValueError(
f'`box_length` must be a vector of length {dim}.'
f' Got `box_length = {box_length}`.')
min_box_length = jnp.min(box_length)
box_volume = jnp.product(box_length)
coordinates = jnp.reshape(coordinates, [-1, num_particles, dim])
batch_size = coordinates.shape[0]
dr = pairwise_distance_pbc(coordinates, box_length)
# Extract all upper triangular matrix elements.
dr = jax.vmap(lambda x: x[jnp.triu_indices(x.shape[0], k=1)])(dr)
hist, bins = jnp.histogram(dr, bins=num_bins, range=(0, min_box_length / 2.))
density = num_particles / box_volume
volume_shell = _volume_sphere(bins[1:], dim) - _volume_sphere(bins[:-1], dim)
# Normaliser for histogram so that `g(r)` approaches unity for large `r`.
normaliser = volume_shell * density * batch_size * (num_particles - 1) / 2
gr = jnp.column_stack(((bins[:-1] + bins[1:]) / 2, hist / normaliser))
return gr
def compute_histogram(data: Array,
num_bins: int = 100,
data_range: Optional[Tuple[float, float]] = None,
density: bool = True) -> Array:
"""Returns a histogram of the input data."""
hist, bins = jnp.histogram(
data, bins=num_bins, density=density, range=data_range)
centers = (bins[:-1] + bins[1:]) / 2
return jnp.column_stack((centers, hist))
def free_energy_fep(forward: Array,
beta: float) -> Array:
"""Returns the FEP estimate of the free energy difference.
The free energy estimate is computed using the Free Energy Perturbation (FEP)
estimator (for details see doi.org/10.1063/1.1740409). This is an importance
sampling based estimator that requires specification of forward values.
Args:
forward: a one-dimensional array that contains the forward work values.
beta: the inverse temperature.
Returns:
the estimated free energy difference.
"""
log_sum_exp = jax.scipy.special.logsumexp(-beta * forward)
log_average_exp = log_sum_exp - jnp.log(forward.size)
return - log_average_exp / beta
def free_energy_fep_running(forward: Array,
beta: float,
num_evals: int = 10) -> Array:
"""Returns a running average of the FEP estimate.
Args:
forward: a one-dimensional array that contains the forward work values.
beta: the inverse temperature.
num_evals: number of times the free energy difference is evaluated.
Returns:
an array with shape [num_evals, 2] where the first column corresponds to the
number of samples and the second column to the estimated free energy
difference.
"""
num_samples = forward.size
sample_increment = num_samples // num_evals
running_average = []
for i in range(num_evals):
samples_i = sample_increment * (i + 1)
df_i = free_energy_fep(forward[:samples_i], beta)
running_average.append((samples_i, df_i))
return jnp.array(running_average)
def _compute_importance_weights(model_log_probs: Array,
target_log_probs: Array) -> Array:
"""Returns the normalised importance weights.
Args:
model_log_probs: an array containing the log_probs computed by the model.
target_log_probs: an array containing the target log_probs.
Returns:
the normalised importance weights.
"""
assert model_log_probs.shape == target_log_probs.shape
# Make sure all computations are done in double precision.
model_log_probs = model_log_probs.astype(jnp.float64)
target_log_probs = target_log_probs.astype(jnp.float64)
# Compute the self-normalised importance weights.
return jax.nn.softmax(target_log_probs - model_log_probs, axis=None)
def compute_ess(model_log_probs: Array, target_log_probs: Array) -> Array:
"""Returns the standard estimate of the effective sample size (ESS).
More details can be found in https://arxiv.org/pdf/1602.03572.pdf (see Eq. 6).
Args:
model_log_probs: an array containing the log_probs computed by the model.
target_log_probs: an array containing the target log_probs.
Returns:
the ESS as a percentage between 0 and 100.
"""
weights = _compute_importance_weights(model_log_probs, target_log_probs)
return 100 / (jnp.sum(weights**2) * weights.size)
def compute_logz(model_log_probs: Array, target_log_probs: Array) -> Array:
"""Returns an estimate of the logarithm of the ratio of normalisers.
Args:
model_log_probs: an array containing the log_probs computed by the model.
target_log_probs: an array containing the target log_probs.
Returns:
the estimated difference of the log normalisers.
"""
assert model_log_probs.shape == target_log_probs.shape
log_prob_diff = target_log_probs - model_log_probs
log_sum_exp = jax.scipy.special.logsumexp(log_prob_diff, axis=None)
return log_sum_exp - jnp.log(target_log_probs.size)
def compute_importance_estimate(x: Array,
model_log_probs: Array,
target_log_probs: Array) -> Array:
"""Return an importance sampling estimate of the mean value of `x`."""
weights = _compute_importance_weights(model_log_probs, target_log_probs)
if weights.shape != x.shape:
raise ValueError('The shape of the importance sampling weights '
f'{weights.shape} differs from the shape of the data '
f'{x.shape} but is expected to be the same.')
return jnp.sum(weights * x)
| flows_for_atomic_solids-main | utils/observable_utils.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distrax bijectors."""
from typing import Union
import chex
import distrax
import jax.numpy as jnp
Array = chex.Array
class CircularShift(distrax.Lambda):
"""Shift with wrapping around. Or, translation on a torus."""
def __init__(self,
shift: Array,
lower: Union[float, Array],
upper: Union[float, Array]):
# Checking `upper >= lower` causes jitting problems when `upper` and
# `lower` are arrays, we check only for scalars. The check is not
# really necessary, since wrapping works equally well when upper < lower.
if jnp.isscalar(lower) and jnp.isscalar(upper) and (lower >= upper):
raise ValueError('`lower` must be less than `upper`.')
try:
width = upper - lower
except TypeError as e:
raise ValueError('`lower` and `upper` must be broadcastable to same '
f'shape, but `lower`={lower} and `upper`={upper}') from e
wrap = lambda x: jnp.mod(x - lower, width) + lower
# We first wrap the shift to avoid `x + shift` when `shift` is very large,
# which can lead to loss of precision. This gives the same result, since
# `wrap(x + wrap(shift)) == wrap(x + shift)`. Same holds for `y - shift`.
shift = wrap(shift)
super().__init__(
forward=lambda x: wrap(x + shift),
inverse=lambda y: wrap(y - shift),
forward_log_det_jacobian=jnp.zeros_like,
inverse_log_det_jacobian=jnp.zeros_like,
event_ndims_in=0,
is_constant_jacobian=True)
class Rescale(distrax.ScalarAffine):
"""Rescales from the range [lower_in, upper_in] to [lower_out, upper_out]."""
def __init__(self,
lower_in: Union[float, Array],
upper_in: Union[float, Array],
lower_out: Union[float, Array],
upper_out: Union[float, Array]):
width_in = upper_in - lower_in
width_out = upper_out - lower_out
scale = width_out / width_in
super().__init__(scale=scale, shift=lower_out - scale * lower_in)
| flows_for_atomic_solids-main | models/bijectors.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Attention modules."""
import math
from typing import Optional
import chex
import haiku as hk
import jax
import jax.numpy as jnp
Array = chex.Array
class Attention(hk.Module):
"""Multi-headed dot-product attention."""
def __init__(self,
num_heads: int,
w_init: Optional[hk.initializers.Initializer] = None,
w_init_final: Optional[hk.initializers.Initializer] = None,
name: Optional[str] = None):
super().__init__(name=name)
self._num_heads = num_heads
default = hk.initializers.VarianceScaling(1.)
self._w_init = default if w_init is None else w_init
self._w_init_final = default if w_init_final is None else w_init_final
@hk.transparent
def _multihead_linear(self, x: Array, head_size: int) -> Array:
"""Linearly project `x` to have `head_size` dimensions per head."""
out = hk.Linear(self._num_heads * head_size, w_init=self._w_init)(x)
shape = out.shape[:-1] + (self._num_heads, head_size)
return jnp.reshape(out, shape)
def __call__(self, q: Array, k: Array, v: Array) -> Array:
"""Apply attention with queries `q`, keys `k` and values `v`.
Args:
q: array of shape [..., N_q, D_q].
k: array of shape [..., N_k, D_k].
v: array of shape [..., N_k, D_v].
Returns:
Array of shape [..., N_q, D_q].
"""
num_dims = q.shape[-1]
if num_dims % self._num_heads != 0:
raise ValueError(f'The number of dimensions ({num_dims}) is not divisible'
f' by the number of heads ({self._num_heads}).')
head_size = num_dims // self._num_heads
# Preprocess queries, keys and values.
q = self._multihead_linear(q, head_size)
k = self._multihead_linear(k, head_size)
v = self._multihead_linear(v, head_size)
# Compute attention matrix.
scale = math.sqrt(head_size)
attention = jnp.einsum('...thd,...Thd->...htT', q, k) / scale
attention = jax.nn.softmax(attention)
# Attend over values and concatenate head outputs.
attended_v = jnp.einsum('...htT,...Thd->...thd', attention, v)
attended_v = jnp.reshape(attended_v, attended_v.shape[:-2] + (num_dims,))
return hk.Linear(num_dims, w_init=self._w_init_final)(attended_v)
class SelfAttention(Attention):
"""Multi-headed dot-product self-attention."""
def __call__(self, x: Array) -> Array:
return super().__call__(x, x, x)
class _DenseBlock(hk.Module):
"""An MLP with one hidden layer, whose output has the size of the input."""
def __init__(self,
widening_factor: int,
w_init: hk.initializers.Initializer,
w_init_final: hk.initializers.Initializer,
name: Optional[str] = None):
super().__init__(name=name)
self._widening_factor = widening_factor
self._w_init = w_init
self._w_init_final = w_init_final
def __call__(self, x: Array) -> Array:
num_dims = x.shape[-1]
num_hiddens = self._widening_factor * num_dims
x = hk.Linear(num_hiddens, w_init=self._w_init)(x)
x = jax.nn.gelu(x)
return hk.Linear(num_dims, w_init=self._w_init_final)(x)
def _layer_norm(x: Array, name: Optional[str] = None) -> Array:
"""Apply a unique LayerNorm to `x` with default settings."""
return hk.LayerNorm(axis=-1,
create_scale=True,
create_offset=True,
name=name)(x)
class Transformer(hk.Module):
"""A transformer model."""
def __init__(self,
num_heads: int,
num_layers: int,
dropout_rate: float = 0.,
use_layernorm: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
w_init_final: Optional[hk.initializers.Initializer] = None,
name: Optional[str] = None):
super().__init__(name=name)
self._num_heads = num_heads
self._num_layers = num_layers
self._dropout_rate = dropout_rate
if use_layernorm:
self._maybe_layer_norm = _layer_norm
else:
self._maybe_layer_norm = lambda h, name: h
default = hk.initializers.VarianceScaling(2. / math.sqrt(num_layers))
self._w_init = default if w_init is None else w_init
self._w_init_final = default if w_init_final is None else w_init_final
def __call__(self, x: Array, is_training: Optional[bool] = None) -> Array:
"""Applies the transformer.
Args:
x: array of shape [..., num_points, num_dims].
is_training: whether we're training or not. Must be provided when dropout
is used, otherwise it can be left unspecified.
Returns:
Array of same shape as `x`.
"""
if self._dropout_rate != 0. and is_training is None:
raise ValueError('`is_training` must be specified when dropout is used.')
dropout_rate = self._dropout_rate if is_training else 0.
h = x
for i in range(self._num_layers):
h_norm = self._maybe_layer_norm(h, name=f'h{i}_ln_1')
h_attn = SelfAttention(num_heads=self._num_heads,
w_init=self._w_init,
w_init_final=self._w_init_final,
name=f'h{i}_attn')(h_norm)
h_attn = hk.dropout(hk.next_rng_key(), dropout_rate, h_attn)
h = h + h_attn
h_norm = self._maybe_layer_norm(h, name=f'h{i}_ln_2')
h_dense = _DenseBlock(widening_factor=4,
w_init=self._w_init,
w_init_final=self._w_init_final,
name=f'h{i}_mlp')(h_norm)
h_dense = hk.dropout(hk.next_rng_key(), dropout_rate, h_dense)
h = h + h_dense
return self._maybe_layer_norm(h, name='ln_f')
| flows_for_atomic_solids-main | models/attention.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Particle models."""
import abc
import math
from typing import Optional, Tuple
import chex
import distrax
from flows_for_atomic_solids.models import distributions
from flows_for_atomic_solids.utils import lattice_utils
from flows_for_atomic_solids.utils import observable_utils as obs_utils
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
Array = chex.Array
PRNGKey = Array
class ParticleModel(distrax.Distribution, metaclass=abc.ABCMeta):
"""A distribution over particles confined in a box.
It assumes that the box has periodic boundary conditions (it's a torus).
A sample from this distribution is a set of N particles in a D-dimensional
box. Each particle is described by a vector of D position coordinates. A
sample from this distribution has shape [N, D].
"""
def __init__(self,
num_particles: int,
lower: Array,
upper: Array):
"""Constructor.
Args:
num_particles: number of particles.
lower: array of shape [dim], the lower ranges of the box.
upper: array of shape [dim], the upper ranges of the box.
"""
if num_particles < 1:
raise ValueError(
f'The number of particles must be at least 1, got {num_particles}.')
if lower.ndim != 1:
raise ValueError(f'`lower` must have one array dimension, '
f'got `lower.ndim = {lower.ndim}`.')
if upper.ndim != 1:
raise ValueError(f'`upper` must have one array dimension, '
f'got `upper.ndim = {upper.ndim}`.')
(dim,) = lower.shape
if upper.shape != (dim,):
raise ValueError(
f'`lower` and `upper` must have the same shape. Got '
f'`lower.shape = {lower.shape}` and `upper.shape = {upper.shape}`.')
if np.any(lower >= upper):
raise ValueError(
f'`lower` components must be less than `upper` components. '
f'Got `lower == {lower}` and `upper == {upper}`.')
self._num_particles = num_particles
self._dim = dim
self._lower = lower
self._upper = upper
self._width = upper - lower
super().__init__()
@property
def num_particles(self) -> int:
return self._num_particles
@property
def dim(self) -> int:
return self._dim
@property
def lower(self) -> Array:
return self._lower
@property
def upper(self) -> Array:
return self._upper
@property
def width(self) -> Array:
return self._width
@property
def event_shape(self) -> Tuple[int, int]:
return (self._num_particles, self._dim)
def wrap(self, x: Array) -> Array:
"""Wraps `x` back into the box."""
return jnp.mod(x - self._lower, self._width) + self._lower
def log_prob(self, particles: Array) -> Array:
if particles.shape[-2:] != self.event_shape:
raise ValueError(
f'Events of shape {particles.shape[-2:]} were passed to `log_prob`,'
f' but `{self.name}` expects events of shape {self.event_shape}.')
return self._log_prob_no_checks(particles)
def _log_prob_no_checks(self, particles: Array) -> Array:
"""Called by `log_prob`. Should be implemented in a subclass."""
raise NotImplementedError('`log_prob` is not implemented for '
f'`{self.name}`.')
class FlowModel(ParticleModel):
"""A particle model transformed by a flow.
It takes in a base model and a flow (a Distrax bijector). The model is the
pushforward of the base model through the flow.
If the base model is invariant to particle permutations and the flow is
equivariant to particle permutations, the resulting model will be invariant to
particle permutations too.
"""
def __init__(self, base_model: ParticleModel, bijector: distrax.BijectorLike):
"""Constructor.
Args:
base_model: the base model to transform with the flow.
bijector: the flow, a Distrax bijector.
"""
super().__init__(base_model.num_particles, base_model.lower,
base_model.upper)
self._flow_model = distrax.Transformed(base_model, bijector)
def _sample_n(self, key: PRNGKey, n: int) -> Array:
return self._flow_model.sample(seed=key, sample_shape=n)
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
return self._flow_model.sample_and_log_prob(seed=key, sample_shape=n)
def _log_prob_no_checks(self, particles: Array) -> Array:
return self._flow_model.log_prob(particles)
class TranslationInvariant(ParticleModel):
"""Translation-invariant particle model: works by inserting an extra particle.
This model takes a base model with N particles, and produces a translation-
invariant model with N+1 particles. It works as follows:
1. We draw N particles from the base model.
2. We add an extra particle at a fixed location.
3. We choose a translation uniformly at random and apply it to all particles.
The probability density of N+1 particles `x` is `p(x) = p(z) p(u)`, where:
- `u` is the random translation.
- `p(u) = 1 / (upper - lower) ^ dim` is the uniform density on the box.
- `z` are the N particles before translation by `u`.
- `p(z)` is the density of the base model.
NOTE: the above procedure breaks permutation invariance. If the base model is
permutation-invariant, the resulting distribution over N+1 particles will be
invariant to permutations of the first N particles, but not to permutations
of all N+1 particles.
"""
def __init__(self, base_model: ParticleModel):
"""Constructor.
Args:
base_model: The base model. The number of particles of the translation-
invariant model is the number of particles of the base model plus one.
"""
super().__init__(base_model.num_particles + 1, base_model.lower,
base_model.upper)
# We place the extra particle at the corner of the box.
self._extra_particle = self.lower
# Log density of the random shift: log uniform on the box.
self._shift_log_prob = -np.sum(np.log(self.width))
self._base_model = base_model
def _add_particle(self, key: PRNGKey, particles: Array) -> Array:
batch_shape = particles.shape[:-2]
extra = jnp.tile(self._extra_particle, batch_shape + (1, 1))
particles = jnp.concatenate((particles, extra), axis=-2)
shift = self.width * jax.random.uniform(key, batch_shape + (1, self.dim))
particles = self.wrap(particles + shift)
return particles
def _sample_n(self, key: PRNGKey, n: int) -> Array:
key1, key2 = jax.random.split(key)
samples = self._base_model.sample(seed=key1, sample_shape=n)
samples = self._add_particle(key2, samples)
return samples
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
key1, key2 = jax.random.split(key)
samples, log_prob = self._base_model.sample_and_log_prob(
seed=key1, sample_shape=n)
samples = self._add_particle(key2, samples)
log_prob += self._shift_log_prob
return samples, log_prob
def _log_prob_no_checks(self, particles: Array) -> Array:
shift = particles[..., -1:, :] - self._extra_particle
particles = self.wrap(particles[..., :-1, :] - shift)
return self._base_model.log_prob(particles) + self._shift_log_prob
def _log_factorial(n: int) -> float:
return sum(math.log(i + 1) for i in range(n))
class Lattice(ParticleModel, metaclass=abc.ABCMeta):
"""A particle model based on a lattice for sampling particles.
Samples are generated by adding random noise to the lattice followed by a
random permutation of particles. The noise is drawn i.i.d. from a distribution
that is defined on a suitable interval such that perturbed particles never
escape their lattice sites.
"""
def __init__(self,
num_particles: int,
lower: Array,
upper: Array,
noise_scale: float,
cell_aspect: Array,
atom_positions_in_cell: Array,
remove_corner: bool,
spherical_noise: bool):
"""Constructor.
Args:
num_particles: number of particles.
lower: array of shape [dim], the lower ranges of the box.
upper: array of shape [dim], the upper ranges of the box.
noise_scale: scale for the noise distribution.
cell_aspect: vector of length `dim` with the relative length of each
of the unit cell axes.
atom_positions_in_cell: [N x dim] matrix of fractional coordinates of
atoms within each unit cell (N being the number of atoms per cell).
remove_corner: whether the lattice excludes one particle, at the
coordinate origin. Notice that if True, `num_particles` should not
count the missing particle, e.g., a 2x2x2 simple cubic lattice should
have `num_particles` equal 7 and not 8 if `remove_corner` is True.
spherical_noise: whether to cut off the noise spherically or
indepedently across each axis.
"""
super().__init__(num_particles, lower, upper)
if noise_scale < 0.:
raise ValueError(
f'`noise_scale` can\'t be negative; got {noise_scale}.')
self._cell_aspect = cell_aspect
self._atom_positions_in_cell = atom_positions_in_cell
self._remove_corner = remove_corner
self._lattice, self._lattice_constant, self._repeats = self._make_lattice()
self._spherical_noise = spherical_noise
self._noise_dist = self._make_noise_dist(noise_scale, spherical_noise)
self._log_num_permutations = _log_factorial(num_particles)
@property
def lattice(self) -> Array:
return self._lattice
@property
def lattice_constant(self) -> Array:
return self._lattice_constant
def _make_lattice(self) -> Tuple[Array, Array, Array]:
"""Returns a lattice and its lattice constant."""
lattice, lc, repeats = lattice_utils.make_lattice(
self.lower, self.upper, self._cell_aspect,
self._atom_positions_in_cell, self.num_particles + self._remove_corner)
if self._remove_corner:
# Make sure the removed site is at the lower corner
lattice = lattice - lattice[0] + self.lower
lattice = lattice[1:]
lattice = self.wrap(lattice)
return lattice, lc, repeats
def _get_lattice_cutoff(self) -> float:
r_matrix = obs_utils.pairwise_distance_pbc(self.lattice, self.width)
r_matrix = r_matrix + jnp.eye(self.num_particles) * r_matrix.max()
cutoff = r_matrix.min() / 2
return cutoff
def _make_noise_dist(
self, noise_scale: float, spherical: bool) -> distrax.Distribution:
"""Returns a distribution to sample the noise from."""
if spherical:
# Find closest lattice points, cutoff is half of that distance
cutoff = self._get_lattice_cutoff()
proposal = distrax.Normal(
loc=jnp.zeros((self.num_particles, self.dim)),
scale=noise_scale)
proposal = distrax.Independent(proposal, 1)
# We rejection-sample for each particle independently.
log_z = tfp.distributions.Chi(self.dim).log_cdf(cutoff / noise_scale)
proposal = distributions.SphericalTruncation(proposal, cutoff, log_z)
return distrax.Independent(proposal, 1)
else:
# Independent noise up to half the smallest distance along each axis
dx = jnp.abs(obs_utils.pairwise_difference_pbc(self.lattice, self.width))
# Ignore zeros when computing minimum distance (this is a heuristic
# to obtain the maximal non-overlapping noise - we presume that
# atoms that coincide on one axis are well separated on some other axis)
dx = jnp.where(dx < 1e-9, np.amax(dx), dx)
cutoffs = jnp.amin(dx.reshape(-1, dx.shape[-1]), axis=0) / 2.0
# TruncatedNormal does not allow different truncation cutoffs for each
# axis, so we scale down the std dev by the cutoffs and then scale up the
# result by the same amount.
proposal = tfp.distributions.TruncatedNormal(
loc=jnp.zeros((self.num_particles, self.dim)),
scale=noise_scale / cutoffs,
low=-1.0,
high=1.0)
return distrax.Independent(
distrax.Transformed(proposal, distrax.ScalarAffine(0., cutoffs)),
2)
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
keys = jax.random.split(key, 1 + n)
noise, log_prob = self._noise_dist.sample_and_log_prob(
sample_shape=n, seed=keys[0])
samples = self.wrap(noise + self._lattice)
samples = jax.vmap(jax.random.permutation)(keys[1:], samples)
# The probability density p(x) is the average of p(x|z), where z is a
# particle permutation. But because we assume perturbed particles never
# escape their lattice cell, all but one of p(x|z) are zero, so the average
# reduces to a division by the number of permutations.
log_prob -= self._log_num_permutations
return samples, log_prob
def _sample_n(self, key: PRNGKey, n: int) -> Array:
samples, _ = self._sample_n_and_log_prob(key, n)
return samples
def _wrap_displacement(self, dx: Array) -> Array:
"""Returns the difference vector to the nearest image under PBCs."""
return dx - self.width * jnp.round(dx / self.width)
def _get_nearest_lattice_indices(self, x: Array) -> Array:
"""Returns indices of the nearest lattice sites."""
deltas = self.lattice[..., :, None, :] - x[..., None, :, :]
deltas = self._wrap_displacement(deltas)
sq_dist = jnp.sum(deltas**2, axis=-1)
return jnp.argmin(sq_dist, axis=-2)
def _check_single_occupation(self, indices: Array) -> bool:
"""Returns True if each lattice index appears once otherwise False."""
index_error = jnp.sort(indices) - jnp.arange(self.num_particles)
return jnp.all(index_error == 0)
def _log_prob_no_checks(self, particles: Array) -> Array:
if not self._spherical_noise:
raise NotImplementedError('The log_prob for non-spherical noise is not '
'yet implemented.')
indices = self._get_nearest_lattice_indices(particles)
is_valid = jnp.vectorize(self._check_single_occupation,
signature='(m)->()')(indices)
noise = self._wrap_displacement(particles - self.lattice[indices])
log_prob = self._noise_dist.log_prob(noise) - self._log_num_permutations
return jnp.where(is_valid, log_prob, -jnp.inf)
class SimpleCubicLattice(Lattice):
"""A particle model based on a simple cubic lattice for sampling particles.
The noise is drawn i.i.d. from a truncated Gaussian that is defined on a
suitable interval such that perturbed particles never escape their lattice
cell.
"""
def __init__(self,
num_particles: int,
lower: Array,
upper: Array,
noise_scale: float,
remove_corner: bool = False,
cell_aspect: Optional[Array] = None):
dim = lower.shape[-1]
if cell_aspect is None:
cell_aspect = np.ones(dim)
atom_positions_in_cell = np.ones((1, dim)) * 0.5
spherical_noise = False
super().__init__(num_particles, lower, upper, noise_scale, cell_aspect,
atom_positions_in_cell, remove_corner, spherical_noise)
def _log_prob_no_checks(self, particles: Array) -> Array:
if self._remove_corner:
raise NotImplementedError(
'`_log_prob_no_checks` not implemented in '
f'{self.name} when `remove_corner` is True.')
noise = jnp.mod(particles - self.lower, self._lattice_constant)
noise -= self._lattice_constant / 2.
log_prob = self._noise_dist.log_prob(noise)
log_prob -= self._log_num_permutations
bins = [l + jnp.arange(r + 1) * lc for (l, r, lc) in zip(
self.lower, self._repeats.tolist(), self.lattice_constant)]
def has_no_overlap(x):
hist, _ = jnp.histogramdd(x, bins=bins)
return jnp.all(hist == 1)
no_overlap = jnp.vectorize(has_no_overlap, signature='(m,n)->()')(particles)
in_range = (particles >= self.lower) & (particles <= self.upper)
in_range = jnp.all(in_range, axis=[-2, -1])
return jnp.where(in_range & no_overlap, log_prob, -jnp.inf)
class FaceCentredCubicLattice(Lattice):
"""A particle model based on a face centred cubic (FCC) lattice.
We construct the lattice by translating a unit cell containing four atoms. We
then draw i.i.d. noise for each particle from a spherically truncated normal.
The support of this distribution is chosen such that the noise distributions
of neighbouring lattice sites are pairwise disjoint.
Note that this lattice requires three spatial dimensions (`dim == 3`).
"""
def __init__(self,
num_particles: int,
lower: Array,
upper: Array,
noise_scale: float,
remove_corner: bool):
"""Constructor.
Args:
num_particles: number of particles.
lower: array of shape [dim], the lower ranges of the box.
upper: array of shape [dim], the upper ranges of the box.
noise_scale: scale for the noise distribution.
remove_corner: if True, we remove the lattice site located at the corner
of the box.
"""
dim = lower.shape[-1]
if dim != 3:
raise ValueError(f'Expected the box dimensionality to be 3, got {dim}.')
cell_aspect = np.ones(dim)
atom_positions_in_cell = np.array([[0., 0., 0.],
[1., 1., 0.],
[1., 0., 1.],
[0., 1., 1.]]) / 2
spherical_noise = True
super().__init__(num_particles, lower, upper, noise_scale, cell_aspect,
atom_positions_in_cell, remove_corner, spherical_noise)
class DiamondCubicLattice(Lattice):
"""A particle model based on a diamond cubic lattice.
The unit cell of a diamond cubic lattice contains eight atoms: the four atoms
of the FCC unit cell and an additional four atoms that are located within the
unit cell. See here for more details on the mathematical structure and an
illustration: https://en.wikipedia.org/wiki/Diamond_cubic.
Note that this lattice requires three spatial dimensions (`dim == 3`).
"""
def __init__(self,
num_particles: int,
lower: Array,
upper: Array,
noise_scale: float,
remove_corner: bool):
"""Constructor.
Args:
num_particles: number of particles.
lower: array of shape [dim], the lower ranges of the box.
upper: array of shape [dim], the upper ranges of the box.
noise_scale: scale for the noise distribution.
remove_corner: if True, we remove the lattice site located at the corner
of the box.
"""
dim = lower.shape[-1]
if dim != 3:
raise ValueError(f'Expected the box dimensionality to be 3, got {dim}.')
cell_aspect = np.ones(dim)
atom_positions_in_cell = np.array([
# The first 4 atoms are the same as for FCC.
[0., 0., 0.],
[0., 2., 2.],
[2., 0., 2.],
[2., 2., 0.],
# The additional are located within the unit cell.
[3., 3., 3.],
[3., 1., 1.],
[1., 3., 1.],
[1., 1., 3.],
]) / 4
spherical_noise = True
super().__init__(num_particles, lower, upper, noise_scale, cell_aspect,
atom_positions_in_cell, remove_corner, spherical_noise)
class HexagonalIceLattice(Lattice):
"""A particle model based on a hexagonal ice (Ice Ih) lattice.
Note that this lattice requires three spatial dimensions (`dim == 3`).
"""
def __init__(self,
num_particles: int,
lower: Array,
upper: Array,
noise_scale: float,
remove_corner: bool):
"""Constructor.
Args:
num_particles: number of particles.
lower: an array of shape [dim], the lower ranges of the box.
upper: an array of shape [dim], the upper ranges of the box.
noise_scale: scale for the noise distribution.
remove_corner: if True, we remove the lattice site located at the corner
of the box.
"""
dim = lower.shape[-1]
if dim != 3:
raise ValueError(f'Expected the box dimensionality to be 3, got {dim}.')
# According to http://people.cs.uchicago.edu/~ridg/digbio12/primerice.pdf
# within the non-orthogonal base cell:
#
# base = np.array([[1, 0, 0],
# [-0.5, np.sqrt(3)/2, 0],
# [0, 0, 2*np.sqrt(6)/3]])
#
# we need 4 atoms at absolute coordinates:
#
# atom_coords = np.array([[0.5, np.sqrt(3)/6, np.sqrt(6)/24],
# [1.0, np.sqrt(3)/3, 15*np.sqrt(6)/24],
# [0.5, np.sqrt(3)/6, 7*np.sqrt(6)/24],
# [1.0, np.sqrt(3)/3, 8*np.sqrt(6)/2]])
#
# We make an orthogonal cell from integer multiples of the
# non-orthogonal base:
#
# ortho_base = np.array([[1, 0, 0],
# [1, 2, 0],
# [0, 0, 1]]) @ base
#
# This orthogonal cell has twice the volume of the non-orthogonal base
# and the relative atom coordinates within it can be easily found by
# replicating the non-orthogonal cell and retaining only atoms within
# the orthogonal one. This results in the 8 atom positions below.
cell_aspect = np.array([1.0, np.sqrt(3), np.sqrt(8/3)])
a = 6 * 0.0625
atom_positions_in_cell = np.array([
[3., 5., 3 + a],
[0., 4., 0 + a],
[0., 2., 3 + a],
[3., 1., 0 + a],
[0., 2., 6 - a],
[3., 1., 3 - a],
[3., 5., 6 - a],
[0., 4., 3 - a],
]) / 6.0
spherical_noise = True
super().__init__(num_particles, lower, upper, noise_scale, cell_aspect,
atom_positions_in_cell, remove_corner, spherical_noise)
class HalfDiamondLattice(Lattice):
"""A particle model based on a half-cell diamond lattice.
The unit cell is 1/2 of a standard diamond cubic cell (and therefore has
4 atoms per unit cell).
If the base vectors of the diamond cubic cell are a=(1, 0, 0), b=(0, 1, 0)
and c=(0, 0, 1), this cell has basis a'=(1/2, 1/2, 0), b'=(1/2, -1/2, 0),
and c'=c.
Note that this lattice requires three spatial dimensions (`dim == 3`).
"""
def __init__(self,
num_particles: int,
lower: Array,
upper: Array,
noise_scale: float,
remove_corner: bool):
"""Constructor.
Args:
num_particles: number of particles.
lower: array of shape [dim], the lower ranges of the box.
upper: array of shape [dim], the upper ranges of the box.
noise_scale: scale for the noise distribution.
remove_corner: if True, we remove the lattice site located at the corner
of the box.
"""
dim = lower.shape[-1]
if dim != 3:
raise ValueError(f'Expected the box dimensionality to be 3, got {dim}.')
cell_aspect = np.array([1.0, 1.0, np.sqrt(2.0)])
# Half a diamond cubic cell by using base (0.5, 0.5) and (-0.5, 0.5)
# in the x-y plane instead of (1, 0) and (0, 1)
atom_positions_in_cell = np.array([
[0, 0, 0],
[0, 2, 1],
[2, 2, 2],
[2, 0, 3]
]) / 4.0
spherical_noise = True
super().__init__(num_particles, lower, upper, noise_scale, cell_aspect,
atom_positions_in_cell, remove_corner, spherical_noise)
| flows_for_atomic_solids-main | models/particle_models.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| flows_for_atomic_solids-main | models/__init__.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constructors for particle models."""
from typing import Any, Mapping
import chex
import distrax
from flows_for_atomic_solids.models import bijectors
from flows_for_atomic_solids.models import particle_models
import numpy as np
Array = chex.Array
def make_particle_model(num_particles: int,
lower: Array,
upper: Array,
bijector: Mapping[str, Any],
base: Mapping[str, Any],
translation_invariant: bool,
) -> particle_models.ParticleModel:
"""Constructs a particle model, with various configuration options.
With N particles, the model is implemented as follows:
1. We draw N particles randomly from a base distribution.
2. We jointly transform the particles with a flow (a Distrax bijector).
Optionally, the model can be made invariant to translations. We do this as
follows:
1. We draw N-1 particles and transform them with the flow as above.
2. We add an extra particle at a fixed location.
3. We choose a translation uniformly at random and apply it to all particles.
Args:
num_particles: number of particles.
lower: array of shape [dim], the lower ranges of the box.
upper: array of shape [dim], the upper ranges of the box.
bijector: configures the bijector that transforms particles. Expected to
have the following keys:
* 'constructor': a callable that creates the bijector.
* 'kwargs': keyword arguments to pass to the constructor.
base: configures the base distribution. Expected to have the following keys:
* 'constructor': a callable that creates the base distribution.
* 'kwargs': keyword arguments to pass to the constructor.
translation_invariant: if True, the model is constructed to be invariant
to translations.
Returns:
A particle model.
"""
if translation_invariant:
num_mapped_particles = num_particles - 1
else:
num_mapped_particles = num_particles
base_model = base['constructor'](
num_particles=num_mapped_particles,
lower=lower,
upper=upper,
**base['kwargs'])
if len(np.unique(lower)) == 1 and len(np.unique(upper)) == 1:
# Box is cubic.
bij = bijector['constructor'](
event_shape=base_model.event_shape,
lower=lower[0],
upper=upper[0],
**bijector['kwargs'])
else:
# Box is rectangular.
scaling_bijector = distrax.Block(
bijectors.Rescale(lower_in=lower, upper_in=upper,
lower_out=0., upper_out=1.), 2)
descaling_bijector = distrax.Inverse(scaling_bijector)
bij = distrax.Chain([
descaling_bijector,
bijector['constructor'](
event_shape=base_model.event_shape,
lower=0.0,
upper=1.0,
**bijector['kwargs']),
scaling_bijector,
])
particle_model = particle_models.FlowModel(base_model, bij)
if translation_invariant:
particle_model = particle_models.TranslationInvariant(particle_model)
return particle_model
| flows_for_atomic_solids-main | models/particle_model_constructors.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distrax distributions."""
from typing import Callable, Optional, Tuple, Union
import chex
import distrax
import jax
import jax.numpy as jnp
Array = chex.Array
Numeric = Union[Array, float]
PRNGKey = Array
class RejectionSampling(distrax.Distribution):
"""A rejection-sampling based distribution.
Samples are drawn from the `proposal` distribution provided and are accepted
only if they are valid as judged by `are_valid_fn`.
NOTE: By default, the density of this distribution is unnormalized and doesn't
integrate to unity. Specifically, calling `prob` returns the density of the
proposal distribution if the sample is valid, otherwise it returns 0. This
makes the normalizing constant equal to the acceptance rate. Optionally, the
user can specify the normalizing constant, which will be used when computing
probabilities.
"""
def __init__(self,
proposal: distrax.Distribution,
are_valid_fn: Callable[[Array], Array],
log_z: Optional[Numeric] = None):
"""Constructor.
Args:
proposal: the proposal distribution to sample from.
are_valid_fn: checks whether samples are valid. If the input to the
function is of shape `batch_shape + event_shape`, then the shape of the
output should `batch_shape`.
log_z: if specified, this value will be used as the log normalizing
constant when computing probabilities. Should be equal to the log of
the acceptance probability. Must be broadcastable to the distribution's
batch shape.
"""
super().__init__()
self._proposal = proposal
self._are_valid_fn = are_valid_fn
self._log_z = 0. if log_z is None else log_z
chex.assert_is_broadcastable(jnp.shape(self._log_z), self.batch_shape)
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
return self._proposal.event_shape
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Batch shape of distribution samples."""
return self._proposal.batch_shape
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""Returns samples and log_probs obtained via rejection sampling."""
# This function uses a jittable implementation of rejection sampling that
# yields exact independent samples after a (random) number of iterations.
def body(args):
# In each iteration we propose `n` new samples and accept those that are
# valid.
key, samples, log_probs, accepted = args
key, subkey = jax.random.split(key)
proposals, proposal_log_probs = self._proposal.sample_and_log_prob(
seed=subkey, sample_shape=n)
valid_sample = self._are_valid_fn(proposals)
valid_sample_event = valid_sample.reshape(valid_sample.shape + (1,) *
len(self.event_shape))
samples = jnp.where(valid_sample_event, proposals, samples)
log_probs = jnp.where(valid_sample, proposal_log_probs, log_probs)
accepted = valid_sample | accepted
return key, samples, log_probs, accepted
def cond(args):
# If not all samples have been accepted yet, we continue iterating.
_, _, _, accepted = args
return ~jnp.all(accepted)
samples = jnp.empty((n,) + self.batch_shape + self.event_shape)
log_probs = jnp.empty((n,) + self.batch_shape)
accepted = jnp.full((n,) + self.batch_shape, False)
_, samples, log_probs, _ = jax.lax.while_loop(
cond, body, (key, samples, log_probs, accepted))
return samples, log_probs - self._log_z
def _sample_n(self, key: PRNGKey, n: int) -> Array:
samples, _ = self._sample_n_and_log_prob(key, n)
return samples
def log_prob(self, x: Array) -> Array:
valid = self._are_valid_fn(x)
log_prob = self._proposal.log_prob(x) - self._log_z
return jnp.where(valid, log_prob, -jnp.inf)
class SphericalTruncation(RejectionSampling):
"""A rejection-sampling based distribution for spherical truncation.
A sample from the `proposal` is accepted if its norm, computed across the
entire event, is within the `cutoff`.
"""
def __init__(self,
proposal: distrax.Distribution,
cutoff: float,
log_z: Optional[Numeric] = None):
"""Constructor.
Args:
proposal: the proposal distribution to sample from.
cutoff: the radial cutoff outside which samples are rejected.
log_z: the log normalizing constant, same as in the parent class.
"""
def are_within_cutoff(x: Array) -> Array:
event_axes = list(range(-len(proposal.event_shape), 0))
sq_dists = jnp.sum(x**2, axis=event_axes)
return sq_dists <= cutoff**2
super().__init__(proposal, are_within_cutoff, log_z)
| flows_for_atomic_solids-main | models/distributions.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Coupling flows for particle models."""
import functools
from typing import Any, Callable, Mapping, Sequence, Union
import chex
import distrax
from flows_for_atomic_solids.models import bijectors
from flows_for_atomic_solids.models import embeddings
from flows_for_atomic_solids.models import utils
import haiku as hk
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
Array = chex.Array
PRNGKey = Array
def _reshape_last(x: Array, ndims: int, new_shape: Sequence[int]) -> Array:
"""Reshapes the last `ndims` dimensions of `x` to shape `new_shape`."""
if ndims <= 0:
raise ValueError(
f'Number of dimensions to reshape must be positive, got {ndims}.')
return jnp.reshape(x, x.shape[:-ndims] + tuple(new_shape))
def make_equivariant_conditioner(
shape_transformed: Sequence[int],
num_bijector_params: int,
lower: float,
upper: float,
embedding_size: int,
conditioner_constructor: Callable[..., Any],
conditioner_kwargs: Mapping[str, Any],
num_frequencies: int,
w_init_final: hk.initializers.Initializer = jnp.zeros,
) -> Callable[[Array], Array]:
"""Make a permutation-equivariant conditioner for the coupling flow."""
# This conditioner assumes that the input is of shape [..., N, D1]. It returns
# an output of shape [..., N, D2, K], where:
# D2 = `shape_transformed[-1]`
# K = `num_bijector_params`
output_size = shape_transformed[-1]
conditioner = conditioner_constructor(**conditioner_kwargs)
return hk.Sequential([
functools.partial(
embeddings.circular, lower=lower, upper=upper,
num_frequencies=num_frequencies),
hk.Linear(embedding_size),
conditioner,
hk.Linear(output_size * num_bijector_params, w_init=w_init_final),
functools.partial(
_reshape_last, ndims=1, new_shape=(output_size, num_bijector_params)),
])
def make_split_coupling_flow(
event_shape: Sequence[int],
lower: float,
upper: float,
num_layers: int,
num_bins: int,
conditioner: Mapping[str, Any],
permute_variables: bool,
split_axis: int,
use_circular_shift: bool,
prng: Union[int, PRNGKey],
circular_shift_init: hk.initializers.Initializer = jnp.zeros,
) -> distrax.Bijector:
"""Create a flow that consists of a sequence of split coupling layers.
All coupling layers use rational-quadratic splines. Each layer of the flow
is composed of two split coupling bijectors, where each coupling bijector
transforms a different part of the input.
The flow maps to and from the range `[lower, upper]`, obeying periodic
boundary conditions.
Args:
event_shape: the shape of the event generated by the flow. Does not
include the batch dimensions.
lower: lower range of the flow.
upper: upper range of the flow.
num_layers: the number of layers to use. Each layer consists of two split
coupling bijectors, where each coupling bijector transforms a different
part of the input.
num_bins: number of bins to use in the rational-quadratic splines.
conditioner: a Mapping containing 'constructor' and 'kwargs' keys that
configures the conditioner used in the coupling layers.
permute_variables: whether to permute the dimensions along the splitting
axis between successive layers.
split_axis: a negative int that defines which axis to split along.
use_circular_shift: if True, add a learned circular shift between successive
flow layers.
prng: either a PRNG key, or an integer seed to convert to a PRNG key. The
PRNG key will be used to construct the permutations, when these are
random.
circular_shift_init: initializer for the circular shifts.
Returns:
The flow, a Distrax bijector.
"""
if isinstance(prng, int):
prng = jax.random.PRNGKey(prng)
if split_axis >= 0:
raise ValueError(f'Expected split axis to be negative, got {split_axis}.')
def bijector_fn(params: Array):
return distrax.RationalQuadraticSpline(
params,
range_min=lower,
range_max=upper,
boundary_slopes='circular',
min_bin_size=(upper - lower) * 1e-4)
split_size = event_shape[split_axis]
split_index = split_size // 2
cyclic_permutation = jnp.roll(jnp.arange(split_size), shift=1)
block_ndims = len(event_shape) + split_axis
layers = []
for _ in range(num_layers):
sublayers = []
# Variable permutation.
if permute_variables:
if split_size <= 3:
permutation = cyclic_permutation
else:
prng, key = jax.random.split(prng)
permutation = jax.random.permutation(key, jnp.arange(split_size))
permute_layer = tfp.bijectors.Permute(permutation, axis=split_axis)
permute_layer = distrax.Block(permute_layer, block_ndims)
sublayers.append(permute_layer)
# Circular shift.
if use_circular_shift:
shift = utils.Parameter(
name='circular_shift',
param_name='shift',
shape=event_shape[-1:],
init=circular_shift_init)()
shift_layer = bijectors.CircularShift(
(upper - lower) * shift, lower, upper)
shift_layer = distrax.Block(shift_layer, len(event_shape))
sublayers.append(shift_layer)
# Coupling layers.
for swap in [True, False]:
shape_transformed = list(event_shape)
shape_transformed[split_axis] = (
split_index if swap else split_size - split_index)
coupling_layer = distrax.SplitCoupling(
swap=swap,
split_index=split_index,
split_axis=split_axis,
event_ndims=len(event_shape),
bijector=bijector_fn,
conditioner=conditioner['constructor'](
shape_transformed=shape_transformed,
num_bijector_params=3 * num_bins + 1,
lower=lower,
upper=upper,
**conditioner['kwargs']))
sublayers.append(coupling_layer)
layers.append(distrax.Chain(sublayers))
return distrax.Chain(layers)
| flows_for_atomic_solids-main | models/coupling_flows.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Embeddings."""
import chex
import jax.numpy as jnp
Array = chex.Array
def circular(x: Array,
lower: float,
upper: float,
num_frequencies: int) -> Array:
"""Maps angles to points on the unit circle.
The mapping is such that the interval [lower, upper] is mapped to a full
circle starting and ending at (1, 0). For num_frequencies > 1, the mapping
also includes higher frequencies which are multiples of 2 pi/(lower-upper)
so that [lower, upper] wraps around the unit circle multiple times.
Args:
x: array of shape [..., D].
lower: lower limit, angles equal to this will be mapped to (1, 0).
upper: upper limit, angles equal to this will be mapped to (1, 0).
num_frequencies: number of frequencies to consider in the embedding.
Returns:
An array of shape [..., 2*num_frequencies*D].
"""
base_frequency = 2. * jnp.pi / (upper - lower)
frequencies = base_frequency * jnp.arange(1, num_frequencies+1)
angles = frequencies * (x[..., None] - lower)
# Reshape from [..., D, num_frequencies] to [..., D*num_frequencies].
angles = angles.reshape(x.shape[:-1] + (-1,))
cos = jnp.cos(angles)
sin = jnp.sin(angles)
return jnp.concatenate([cos, sin], axis=-1)
| flows_for_atomic_solids-main | models/embeddings.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities."""
from typing import Sequence
import chex
import haiku as hk
Array = chex.Array
class Parameter(hk.Module):
"""Helper Haiku module for defining model parameters."""
def __init__(self,
name: str,
param_name: str,
shape: Sequence[int],
init: hk.initializers.Initializer):
super().__init__(name=name)
self._param = hk.get_parameter(param_name, shape=shape, init=init)
def __call__(self) -> Array:
return self._param
| flows_for_atomic_solids-main | models/utils.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General-purpose energy functions."""
import abc
from typing import Optional
import chex
from flows_for_atomic_solids.utils import observable_utils as obs_utils
import jax
import jax.numpy as jnp
Array = chex.Array
def _check_same_dim(coordinates: Array, box_length: Array) -> None:
"""Check that `coordinates` and `box_length` have the same dimensionality."""
if coordinates.ndim < 1:
raise ValueError('The coordinates cannot be a scalar.')
if box_length.ndim < 1:
raise ValueError('The box length cannot be a scalar.')
dim1 = coordinates.shape[-1]
dim2 = box_length.shape[-1]
if dim1 != dim2:
raise ValueError(
f'The dimensionality of the coordinates (got {dim1}) must be equal '
f'to the dimensionality of the box (got {dim2}).')
class PotentialEnergy(abc.ABC):
"""Potential energy function of N interacting particles confined in a box.
It assumes periodic boundary conditions.
"""
def __init__(self, box_length: Optional[Array] = None):
"""Initializer.
Args:
box_length: array of shape [dim], the side lengths of the box that
contains the particles. If None, the box length must be passed as an
argument to the class methods.
"""
if box_length is not None and box_length.ndim != 1:
raise ValueError(
f'`box_length` must be a vector. Got `box_length = {box_length}`.')
self._box_length = box_length
@property
def box_length(self) -> Array:
if self._box_length is None:
raise ValueError('This class does not have a default box length.')
return self._box_length
def __call__(self,
coordinates: Array,
box_length: Optional[Array] = None) -> Array:
return self.energy(coordinates, box_length)
@abc.abstractmethod
def energy(self,
coordinates: Array,
box_length: Optional[Array] = None) -> Array:
"""Computes the potential energy.
Args:
coordinates: array with shape [..., num_atoms, dim] containing the
particle coordinates.
box_length: array with shape [..., dim] containing the box length. If
None, the default box length will be used instead.
Returns:
an array with shape [...] containing the computed energy values.
"""
def forces(self,
coordinates: Array,
box_length: Optional[Array] = None) -> Array:
"""Computes the forces exerted on each particle for a batch of coordinates.
Args:
coordinates: array with shape [..., num_atoms, dim] containing the
particle coordinates.
box_length: array with shape [..., dim] containing the box length. If
None, the default box length will be used instead.
Returns:
an array with shape [..., num_atoms, dim] whose [..., i, :] entry contains
the total force exerted on particle i.
"""
if box_length is None:
box_length = self.box_length
_check_same_dim(coordinates, box_length)
grad_fn = jnp.vectorize(
jax.grad(self.energy, argnums=0), signature='(n,d),(d)->(n,d)')
return -grad_fn(coordinates, box_length)
def pressure(self,
coordinates: Array,
box_length: Optional[Array] = None) -> Array:
"""Computes the excess virial pressure for a batch of coordinates.
The virial pressure is computed according to the formula
p_virial = (sum_i f_i . r_i - dim * V ∂U/∂V) / (dim * V)
where r_i is the position of particle i, f_i is the total force acting on
particle i, . is the dot-product, V the box volume and dim the number of
dimensions. We assume that volume change occurs via a homogeneous affine
expansion or contraction, so that
dim * V ∂U/∂V = prod_i Li * ∂U/∂(Li)
where Li is the i-th sidelength of the box. More details can be found in
Thompson et al. (2009) https://doi.org/10.1063/1.3245303 (eqs. 1 and 13).
Note that the above formula only contains the "excess" contribution due to
the potential energy but not the "ideal" contribution due to the kinetic
energy. The latter can be computed analytically.
Args:
coordinates: array with shape [..., num_atoms, dim] containing the
particle coordinates.
box_length: array with shape [..., dim] containing the box length. If
None, the default box length will be used instead.
Returns:
an array with shape [...] containing the computed pressure.
"""
if box_length is None:
box_length = self.box_length
_check_same_dim(coordinates, box_length)
dim = coordinates.shape[-1]
energy_grad = jnp.vectorize(
jax.grad(self.energy, argnums=1), signature='(n,d),(d)->(d)')
forces = self.forces(coordinates, box_length)
virial = jnp.sum(coordinates * forces, axis=[-1, -2])
virial -= jnp.sum(
box_length * energy_grad(coordinates, box_length), axis=-1)
return virial / (dim * jnp.prod(box_length, axis=-1))
class PairwisePotentialEnergy(PotentialEnergy):
"""Energy function based on a pairwise potential between particles.
This is a base class for any energy function that can be written as a sum
of pairwise potentials:
E(x_1,...,x_N) = sum_{i<j} U(|x_i - x_j|^2).
Distances are computed using periodic boundary conditions. The pairwise
potential U should be implemented in a subclass.
"""
def __init__(self,
box_length: Optional[Array] = None,
min_distance: float = 0.,
linearize_below: Optional[float] = None):
"""Constructor.
Args:
box_length: array of shape [dim], the side lengths of the box that
contains the particles. If None, the box length must be passed as an
argument to the class methods.
min_distance: the pairwise distance is clipped to this value whenever it
falls below it, which means that the pairwise potential is constant
below this value. This can be used for numerical reasons, for example to
avoid singularities for potentials that diverge at zero distance.
linearize_below: below this value, the potential is linearized, i.e. it
becomes a linear function of the pairwise distance. This can be used for
numerical reasons, for example when the potential grows too fast for
small distances. If None, no linearization is done. NOTE: with
linearization, the pairwise force at zero distance is undefined (becomes
NaN). To have a well-defined force at zero distance, you need to set
`min_distance > 0`.
"""
super().__init__(box_length)
self._min_distance = min_distance
self._linearize_below = linearize_below
@abc.abstractmethod
def _unclipped_pairwise_potential(self, r2: Array) -> Array:
"""Scalar pairwise potential, to be implemented in a subclass.
Args:
r2: a scalar (0-dim array), the squared distance between particles.
Returns:
a scalar, the pairwise potential at that squared distance.
"""
def _pairwise_potential(self, r2: Array) -> Array:
"""Numerically stable (clipped and linearized) version of the potential.
Args:
r2: a scalar (0-dim array), the squared distance between particles.
Returns:
a scalar, the pairwise potential at that squared distance.
"""
r2 = jnp.clip(r2, self._min_distance ** 2)
if self._linearize_below is None:
return self._unclipped_pairwise_potential(r2)
else:
e0, g0 = jax.value_and_grad(
lambda r: self._unclipped_pairwise_potential(r**2))(
self._linearize_below)
return jnp.where(
r2 < self._linearize_below**2,
e0 + (jnp.sqrt(r2) - self._linearize_below) * g0,
self._unclipped_pairwise_potential(r2))
def energy(self,
coordinates: Array,
box_length: Optional[Array] = None) -> Array:
"""Computes the potential energy.
Args:
coordinates: array with shape [..., num_atoms, dim] containing the
particle coordinates.
box_length: array with shape [..., dim] containing the box length. If
None, the default box length will be used instead.
Returns:
an array with shape [...] containing the computed energy values.
"""
if box_length is None:
box_length = self.box_length
_check_same_dim(coordinates, box_length)
r2 = obs_utils.pairwise_squared_distance_pbc(coordinates, box_length)
# We set the diagonal to a non-zero value to avoid infinities for pairwise
# potentials that diverge at zero distances.
r2 += jnp.eye(r2.shape[-1])
energies = jnp.vectorize(self._pairwise_potential, signature='()->()')(r2)
return jnp.sum(jnp.triu(energies, k=1), axis=[-2, -1])
def pairwise_forces(self,
coordinates: Array,
box_length: Optional[Array] = None) -> Array:
"""Computes the pairwise forces for a batch of coordinates.
Args:
coordinates: array with shape [..., num_atoms, dim] containing the
particle coordinates.
box_length: array with shape [..., dim] containing the box length. If
None, the default box length will be used instead.
Returns:
an array with shape [..., num_atoms, num_atoms, dim] whose [..., i, j, :]
entry contains the force exerted on particle i from particle j.
"""
if box_length is None:
box_length = self.box_length
_check_same_dim(coordinates, box_length)
coordinate_deltas = obs_utils.pairwise_difference_pbc(
coordinates, box_length)
r2 = jnp.sum(coordinate_deltas**2, axis=-1)
r2 += jnp.eye(r2.shape[-1])
potential_gradient = jnp.vectorize(
jax.grad(self._pairwise_potential), signature='()->()')
forces = -2. * coordinate_deltas * potential_gradient(r2)[..., None]
return forces
def pairwise_pressure(self,
coordinates: Array,
box_length: Optional[Array] = None) -> Array:
"""Computes the excess virial pressure for a batch of coordinates.
This function returns exactly the same result as `pressure` (up to numerical
differences), but it's implemented in a way that is only valid for pairwise
potentials. It's computed according to the formula
p_virial = 1 / (dim * V) sum_i<j f_ij . r_ij,
where r_ij is the distance vector between two particles, f_ij the force
acting between this pair, . is the dot-product, V the box volume and dim the
number of dimensions. More details can be found in Thompson et al. (2009)
https://doi.org/10.1063/1.3245303 (see eqs. 1 and 7).
Note that the above formula only contains the "excess" contribution due to
the potential energy but not the "ideal" contribution due to the kinetic
energy. The latter can be computed analytically.
Args:
coordinates: array with shape [..., num_atoms, dim] containing the
particle coordinates.
box_length: array with shape [..., dim] containing the box length. If
None, the default box length will be used instead.
Returns:
an array with shape [...] containing the computed pressure.
"""
if box_length is None:
box_length = self.box_length
_check_same_dim(coordinates, box_length)
dim = coordinates.shape[-1]
coordinate_deltas = obs_utils.pairwise_difference_pbc(
coordinates, box_length)
forces = self.pairwise_forces(coordinates, box_length)
dot_product = jnp.sum(coordinate_deltas * forces, axis=-1)
virial = jnp.sum(jnp.triu(dot_product, k=1), axis=[-2, -1])
pressure = virial / (dim * jnp.prod(box_length, axis=-1))
return pressure
| flows_for_atomic_solids-main | systems/energies.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lennard-Jones system."""
from typing import Optional
import chex
from flows_for_atomic_solids.systems import energies
import jax.numpy as jnp
Array = chex.Array
class LennardJonesEnergy(energies.PairwisePotentialEnergy):
"""Evaluates the Lennard-Jones (LJ) energy with periodic boundary conditions.
This class implements a "soft" version of the original LJ potential and uses a
scalar parameter `lambda_lj` in [0, 1] to interpolate between a uniform
distribution (for `lambda_lj=0`) and the standard LJ potential (for
`lambda_lj=1`).
Two LJ particles, separated by a distance `r`, interact with each other via
a radially symmetric, pairwise potential
```
g(r) = 0.5 * (1 - lambda_lj)**2 + (r/sigma)**6
u(r) = 4 * lambda_lj * epsilon * [1/g(r)**2 - 1/g(r)]
```
where `epsilon` and `sigma` are two Lennard-Jones parameters defining the
scales of energy and length.
For `lambda_lj=1`, the pairwise potential above exhibits a singularity at
`r=0`, so that the energy diverges whenever any two particles coincide. The
option `min_distance` can be used to clip the pairwise distance to avoid this
problem.
Optionally, the energy can be shifted so that it is zero at and beyond the
cutoff
```
u_shifted(r) = u(r) - u(cutoff) if r <= cutoff and 0 otherwise.
"""
def __init__(self,
cutoff: float,
box_length: Optional[Array] = None,
epsilon: float = 1.,
sigma: float = 1.,
min_distance: float = 0.,
lambda_lj: float = 1.,
linearize_below: Optional[float] = None,
shift_energy: bool = False):
"""Initializer.
Args:
cutoff: above this value, the pairwise potential is set equal to 0. A
cutoff is typically employed in simulation for performance reasons, so
we also support this option here.
box_length: array of shape [dim], side lengths of the simulation box. If
None, the box length must be passed as an argument to the class methods.
epsilon: determines the scale of the potential. See class docstring.
sigma: determines the scale of the pairwise distance. See class docstring.
min_distance: the pairwise distance is clipped to this value whenever it
falls below it, which means that the pairwise potential is constant
below this value. This is used for numerical reasons, to avoid the
singularity at zero distance.
lambda_lj: parameter for soft-core Lennard-Jones. Interpolates between
a constant pairwise potential (lambda_lj = 0) and proper Lennard-Jones
potential (lambda_lj = 1). See class docstring.
linearize_below: below this value, the potential is linearized, i.e. it
becomes a linear function of the pairwise distance. This can be used for
numerical reasons, to avoid the potential growing too fast for small
distances. If None, no linearization is done. NOTE: linearization
removes the singularity at zero distance, but the pairwise force at
zero distance is still undefined (becomes NaN). To have a well-defined
force at zero distance, you need to set `min_distance > 0`.
shift_energy: whether to shift the energy by a constant such that the
potential is zero at the cutoff (spherically truncated and shifted LJ).
"""
super().__init__(box_length, min_distance, linearize_below)
self._cutoff = cutoff
self._epsilon = epsilon
self._sigma = sigma
self._lambda_lj = lambda_lj
self._shift = self._soft_core_lj_potential(cutoff**2) if shift_energy else 0
def _soft_core_lj_potential(self, r2: Array) -> Array:
r6 = r2**3 / self._sigma**6
r6 += 0.5 * (1. - self._lambda_lj)**2
r6inv = 1. / r6
energy = r6inv * (r6inv - 1.)
energy *= 4. * self._lambda_lj * self._epsilon
return energy
def _unclipped_pairwise_potential(self, r2: Array) -> Array:
energy = self._soft_core_lj_potential(r2)
# Apply radial cutoff and shift.
energy = jnp.where(r2 <= self._cutoff**2, energy - self._shift, 0.)
return energy
| flows_for_atomic_solids-main | systems/lennard_jones.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| flows_for_atomic_solids-main | systems/__init__.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Monatomic (mW) water system."""
import math
from typing import Optional
import chex
from flows_for_atomic_solids.systems import energies
from flows_for_atomic_solids.utils import observable_utils as obs_utils
import jax
import jax.numpy as jnp
Array = chex.Array
MW_A = 7.049556277
MW_B = 0.6022245584
MW_GAMMA = 1.2
MW_EPSILON = 6.189 # Kcal/mol
MW_SIGMA = 2.3925 # Angstrom
MW_REDUCED_CUTOFF = 1.8
MW_COS = math.cos(109.47 / 180. * math.pi)
MW_LAMBDA = 23.15
class _TwoBodyEnergy(energies.PairwisePotentialEnergy):
"""Implements the two-body component of the monatomic-water energy."""
def _unclipped_pairwise_potential(self, r2: Array) -> Array:
r2 /= MW_SIGMA**2
r = jnp.sqrt(r2)
mask = jnp.array(r < MW_REDUCED_CUTOFF)
# Distances on or above the cutoff can cause NaNs in the gradient of
# `term_2` below, even though they're masked out in the forward computation.
# To avoid this, we set these distances to a safe value.
r = jnp.where(mask, r, 2. * MW_REDUCED_CUTOFF)
term_1 = MW_A * MW_EPSILON * (MW_B / r2**2 - 1.)
term_2 = jnp.where(mask, jnp.exp(1. / (r - MW_REDUCED_CUTOFF)), 0.)
energy = term_1 * term_2
return energy
class MonatomicWaterEnergy(energies.PotentialEnergy):
"""Evaluates the monatomic water energy with periodic boundary conditions.
The monatomic water model, or mW model, consists of point particles that
interact with each other via two-body interactions (between pairs of
particles) and three-body interactions (between triplets of particles).
The energy is decomposed as follows:
```
energy = sum of all two-body interactions over distinct pairs +
sum of all three-body interactions over distinct triplets
```
More details on the specific functional form of the individual interaction
terms can be found in the paper of Molinero and Moore (2009):
https://arxiv.org/abs/0809.2811.
"""
def __init__(self,
box_length: Optional[Array] = None,
min_distance: float = 0.,
linearize_below: Optional[float] = None):
"""Constructor.
Args:
box_length: array of shape [dim], side lengths of the simulation box. If
None, the box length must be passed as an argument to the class methods.
min_distance: we clip the pairwise distance to this value in the
calculation of the two-body term. This can be used to remove the
singularity of the two-body term at zero distance.
linearize_below: we linearize the two-body term below this value. If None,
no linearization is done.
"""
super().__init__(box_length)
self._two_body_energy = _TwoBodyEnergy(
min_distance=min_distance, linearize_below=linearize_below)
def _three_body_energy(self, dr: Array) -> Array:
"""Compute three-body term for one sample.
Args:
dr: [num_particles, num_particles, 3] array of distance vectors
between the particles.
Returns:
The three-body energy contribution of the sample (a scalar).
"""
def _one_particle_contribution(dri: Array) -> Array:
# dri is (num_particles-1) x 3.
raw_norms = jnp.linalg.norm(dri, axis=-1)
keep = raw_norms < MW_REDUCED_CUTOFF
norms = jnp.where(keep, raw_norms, 1e20)
norm_energy = jnp.exp(MW_GAMMA/(norms - MW_REDUCED_CUTOFF))
norm_energy = jnp.where(keep, norm_energy, 0.)
normprods = norms[None, :] * norms[:, None]
# Note: the sum below is equivalent to:
# dotprods = jnp.dot(dri, dri[..., None]).squeeze(-1)
# but using jnp.dot results in loss of precision on TPU,
# as evaluated by comparing to MD samples.
dotprods = jnp.sum(dri[:, None, :] * dri[None, :, :], axis=-1)
cos_ijk = dotprods / normprods
energy = MW_LAMBDA * MW_EPSILON * (MW_COS - cos_ijk)**2
energy *= norm_energy
energy = jnp.triu(energy, 1)
energy = jnp.sum(energy, axis=-1)
return jnp.dot(energy, norm_energy)
# Remove diagonal elements [i, i, :], changing the shape from
# [num_particles, num_particles, 3] to [num_particles, num_particles-1, 3].
clean_dr = jnp.rollaxis(jnp.triu(jnp.rollaxis(dr, -1), 1)[..., 1:]+
jnp.tril(jnp.rollaxis(dr, -1), -1)[..., :-1],
0, dr.ndim)
# Vectorize over particles.
energy = jnp.sum(jax.vmap(_one_particle_contribution)(clean_dr))
return energy
def energy(self,
coordinates: Array,
box_length: Optional[Array] = None) -> Array:
"""Computes energies for an entire batch of particles.
Args:
coordinates: array with shape [..., num_particles, dim] containing the
particle coordinates.
box_length: array with shape [..., dim], side lengths of the simulation
box. If None, the default box length will be used instead.
Returns:
energy: array with shape [...] containing the computed energies.
"""
if box_length is None:
box_length = self.box_length
dr = obs_utils.pairwise_difference_pbc(coordinates, box_length)
dr /= MW_SIGMA
two_body_energy = self._two_body_energy(coordinates, box_length)
# Vectorize over samples.
three_body_energy = jnp.vectorize(self._three_body_energy,
signature='(m,m,n)->()')(dr)
return two_body_energy + three_body_energy
| flows_for_atomic_solids-main | systems/monatomic_water.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#!/usr/bin/env python
"""A package to implement the BraVe model.
Broaden Your Views for Self-Supervised Video Learning.
"""
import setuptools
setuptools.setup(
name='brave',
python_requires='>3.7.0',
version='0.0.1',
description='Broaden Your Views for Self-Supervised Video Learning',
author='Ross Hemsley',
url='https://arxiv.org/abs/2103.16559',
packages=setuptools.find_packages(),
install_requires=[
'absl-py==0.12.0',
'astunparse~=1.6.3',
'attrs~=21.2.0',
'cached-property~=1.5.2',
'cachetools~=4.2.2',
'certifi~=2021.5.30',
'charset-normalizer~=2.0.4',
'chex==0.0.8',
'contextlib2~=21.6.0',
'dill==0.3.4',
'dm-haiku==0.0.4',
'dm-tree==0.1.6',
'ffmpeg-python==0.2.0',
'flatbuffers~=1.12',
'future==0.18.2',
'gast==0.4.0',
'google-auth~=1.34.0',
'google-auth-oauthlib==0.4.5',
'google-pasta==0.2.0',
'googleapis-common-protos~=1.53.0',
'grpcio~=1.34.1',
'h5py~=3.1.0',
'idna~=3.2',
'importlib-metadata~=4.6.3',
'importlib-resources~=5.2.2',
'iniconfig~=1.1.1',
'jax==0.2.17',
'jaxlib==0.1.69',
'jaxline==0.0.3',
'joblib~=1.0.1',
'keras-nightly~=2.5.0.dev2021032900',
'Keras-Preprocessing~=1.1.2',
'Markdown~=3.3.4',
'ml-collections==0.1.0',
'mock~=4.0.3',
'numpy~=1.19.5',
'oauthlib~=3.1.1',
'opt-einsum~=3.3.0',
'optax==0.0.9',
'packaging~=21.0',
'pluggy==0.13.1',
'promise~=2.3',
'protobuf~=3.17.3',
'py~=1.10.0',
'pyasn1==0.4.8',
'pyasn1-modules==0.2.8',
'pyparsing~=2.4.7',
'pytest~=6.2.4',
'PyYAML~=5.4.1',
'requests~=2.26.0',
'requests-oauthlib~=1.3.0',
'rsa~=4.7.2',
'scikit-learn==0.24.2',
'scipy~=1.5.4',
'six~=1.15.0',
'sklearn==0.0',
'tabulate==0.8.9',
'tensorboard~=2.5.0',
'tensorboard-data-server==0.6.1',
'tensorboard-plugin-wit~=1.8.0',
'tensorflow~=2.5.0',
'tensorflow-datasets~=4.4.0',
'tensorflow-estimator~=2.5.0',
'tensorflow-metadata~=1.2.0',
'termcolor~=1.1.0',
'threadpoolctl~=2.2.0',
'toml==0.10.2',
'toolz==0.11.1',
'tqdm~=4.62.0',
'typing-extensions~=3.7.4.3',
'urllib3~=1.26.6',
'Werkzeug~=2.0.1',
'wrapt~=1.12.1',
'zipp~=3.5.0',
])
| brave-main | setup.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configuration for the Brave experiment."""
import glob
from jaxline import base_config as jaxline_base_config
import ml_collections
from brave.models.brave import config as brave_config
def get_config() -> ml_collections.ConfigDict:
"""Get the experiment config."""
config = jaxline_base_config.get_base_config()
config.checkpoint_dir = '/tmp/jaxline/brave'
config.train_checkpoint_all_hosts = False
config.training_steps = 300_000
config.log_tensors_interval = 60
config.save_checkpoint_interval = 600
config.eval_specific_checkpoint_dir = ''
config.best_model_eval_metric = 'multiple_of_saving_period'
config.experiment_kwargs.experiment_name = 'brave'
config.experiment_kwargs.config = brave_config.get_experiment_config()
config.eval_modes = config.experiment_kwargs.config.eval_modes
# Fill in this to set the training shards for training.
config.experiment_kwargs.config.model.dataset_shards = glob.glob(
'<path/to/train/shards/*.tfrecord>')
config.lock()
return config
| brave-main | brave/config.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test the experiment."""
import tempfile
from unittest import mock
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
import chex
from jaxline import train
from brave import config
from brave import experiment
from brave.datasets import fixtures
FLAGS = flags.FLAGS
DEVICE_COUNT = 1
chex.set_n_cpu_devices(DEVICE_COUNT)
class ExperimentTest(parameterized.TestCase):
def setUp(self):
super().setUp()
# Needed to make absl work with pytest.
FLAGS.mark_as_parsed()
def test_train(self):
with chex.fake_pmap(), tempfile.TemporaryDirectory() as fixture_dir:
shards = fixtures.write_tf_record_dataset_fixture(fixture_dir)
cfg = _lightweight_brave_config(shards)
train.train(
experiment.Experiment,
cfg,
checkpointer=mock.Mock(),
writer=mock.Mock())
def _lightweight_brave_config(shards):
cfg = config.get_config()
cfg.unlock()
cfg.training_steps = 1
# Do whatever we can to make running this test as fast as possible.
# without changing the experiment too much from a real run.
cfg.experiment_kwargs.config.global_batch_size = 2
cfg.experiment_kwargs.config.model.include_video_in_broad_view = True
cfg.experiment_kwargs.config.model.include_audio_in_broad_view = False
cfg.experiment_kwargs.config.model.output_dims = 2
cfg.experiment_kwargs.config.model.image_size_broad = 2
cfg.experiment_kwargs.config.model.num_frames_broad = 2
cfg.experiment_kwargs.config.model.image_size_narrow = 2
cfg.experiment_kwargs.config.model.num_frames_narrow = 1
cfg.experiment_kwargs.config.model.dataset_shards = shards
cfg.experiment_kwargs.experiment_name = 'brave_test'
print(cfg)
cfg.lock()
return cfg
if __name__ == '__main__':
absltest.main()
| brave-main | brave/experiment_test.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| brave-main | brave/__init__.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A runnable program to evaluate video embeddings.
Given a model checkpoint, and the location of the shards for a dataset,
computes the performance of the Brave video embeddings. This code
may be used to evaluate both UCF101 and HMDB51, as long as they are both
given in the appropriate input format. The only hyperparameter to this program
is the svm_regularization constant, which can impact the performance of the
linear classification.
"""
import glob
import json
from absl import app
from absl import flags
import chex
import jax
import numpy as np
import tensorflow as tf
from brave.datasets import datasets
from brave.evaluate import evaluate_video_embedding
from brave.models.brave import brave
FLAGS = flags.FLAGS
flags.DEFINE_string('checkpoint_path', None, 'Checkpoint to evaluate.')
flags.DEFINE_integer('batch_size', None, 'The size of the batches to use.')
# Hyperparameters
flags.DEFINE_float('svm_regularization', None, 'Regularization constant.')
# Datasets
flags.DEFINE_string('train_dataset_shards', None,
'Glob pattern for train shards.')
flags.DEFINE_string('test_dataset_shards', None,
'Glob pattern for test shards.')
# Transformations to apply to video before running network.
flags.DEFINE_integer('num_video_frames', 32, 'Number of frames in eval videos.')
flags.DEFINE_integer('video_step', 2, 'The step to use in the eval videos.')
flags.DEFINE_integer('image_size', 224, 'The size of the video to evaluate.')
def main(_):
checkpoint_path = FLAGS.checkpoint_path
train_shards = glob.glob(FLAGS.train_dataset_shards)
test_shards = glob.glob(FLAGS.test_dataset_shards)
video_config = evaluate_video_embedding.VideoConfig(
num_frames=FLAGS.num_video_frames,
image_size=FLAGS.image_size,
video_step=FLAGS.video_step,
)
video_embedding_fn = _video_embedding(checkpoint_path)
results = evaluate_video_embedding.evaluate_video_embedding(
train_dataset_shards=train_shards,
test_dataset_shards=test_shards,
embedding_fn=video_embedding_fn,
config=video_config,
svm_regularization=FLAGS.svm_regularization,
batch_size=FLAGS.batch_size)
results_dct = dict(
top_1_train=results.train.top_one_accuracy,
top_5_train=results.train.top_five_accuracy,
top_1_test=results.test.top_one_accuracy,
top_5_test=results.test.top_five_accuracy,
)
# Write the results to stdout in a way that can be used as input to other
# programs.
print(json.dumps(results_dct))
def _video_embedding(checkpoint_path: str):
"""Load the video embedding for the BraVe model to evaluate."""
checkpoint = np.load(checkpoint_path, allow_pickle=True).item()
params = checkpoint['params']
state = checkpoint['state']
brave_config_dct = checkpoint['config']
brave_config = brave.BraveConfig(**brave_config_dct)
model = brave.get_model(brave_config)
@jax.jit
def embedding_fn(view: datasets.View) -> chex.Array:
narrow_forward_fn = model.forward_fns['narrow_video']
embedding, _ = narrow_forward_fn(params, state, None, view, False)
return embedding
def synchronous_embedding_fn(view: datasets.View) -> chex.Array:
# jax.jit causes the above function to be executed lazily, but we want
# to force the computation to happen synchronously.
return jax.device_get(embedding_fn(view))
return synchronous_embedding_fn
if __name__ == '__main__':
try:
tf.config.set_visible_devices([], 'GPU') # Prevent TF from using the GPU.
except tf.errors.NotFoundError:
pass
flags.mark_flag_as_required('checkpoint_path')
flags.mark_flag_as_required('batch_size')
flags.mark_flag_as_required('train_dataset_shards')
flags.mark_flag_as_required('test_dataset_shards')
flags.mark_flag_as_required('svm_regularization')
app.run(main)
| brave-main | brave/evaluate_video_embeddings.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A jaxline experiment to train the Brave model."""
import functools
from typing import Sequence
from absl import app
from absl import flags
from absl import logging
import chex
import jax
from jaxline import experiment
from jaxline import platform
from jaxline import utils
import ml_collections
import tensorflow as tf
import tensorflow_datasets as tfds
from brave.models.brave import brave
from brave.training import optimizers
from brave.training import trainer
FLAGS = flags.FLAGS
@chex.dataclass
class ExperimentConfig:
"""The config class for the experiment.
Attributes:
model_name: The name of the model to initialize and train.
global_batch_size: The size of the batches to take from the train dataset.
This will be split amongst all of the devices.
optimizer: The configuration to use for the optimizer.
eval_modes: For each value in this sequence, a new evaluation process will
be started, with the given mode. This allows running multiple parallel
evaluation processes.
"""
model_name: str
global_batch_size: int
model: ml_collections.ConfigDict
optimizer: ml_collections.ConfigDict
eval_modes: Sequence[str]
class Experiment(experiment.AbstractExperiment):
"""Experiment to train Brave."""
CHECKPOINT_ATTRS = {
'_params': 'params',
'_state': 'state',
'_opt_state': 'opt_state',
}
def __init__(self, mode: str, init_rng: chex.Array,
config: ml_collections.ConfigDict, experiment_name: str):
super().__init__(mode=mode, init_rng=init_rng)
self._mode = mode
self._init_rng = init_rng
self._config = ExperimentConfig(**config)
self._model = brave.get_model(brave.BraveConfig(**config.model))
self._opt_state = None
self._params = None
self._state = None
logging.info('Running experiment in mode %s.', mode)
if mode == 'train':
self._init_train_dataset()
self._init_train()
# _ _
# | |_ _ __ __ _(_)_ __
# | __| '__/ _` | | '_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def _init_train(self):
"""Prepare the model for training."""
# Note: We store a copy of the learning rate schedule for logging only.
optimizer_config = optimizers.OptimizerConfig(**self._config.optimizer)
optimizer, self._lr_schedule = optimizers.get_optimizer(optimizer_config)
init_fn = jax.pmap(self._model.init_fn, axis_name='i')
optimizer_init_fn = jax.pmap(optimizer.init, axis_name='i')
broadcasted_key = utils.bcast_local_devices(self._init_rng)
self._params, self._state = init_fn(broadcasted_key)
self._opt_state = optimizer_init_fn(self._params)
self._update_fn = jax.pmap(
trainer.build_update_fn(optimizer, self._model.loss_fn),
axis_name='i',
donate_argnums=(1, 2, 3, 4))
def _init_train_dataset(self):
batch_dims = trainer.get_batch_dims(self._config.global_batch_size,
jax.device_count(),
jax.local_device_count())
logging.info('This host batch dimensions: %s.', batch_dims)
ds = self._model.train_dataset_builder_fn()
for batch_size in reversed(batch_dims):
ds = ds.batch(batch_size, drop_remainder=True)
ds = ds.repeat()
ds = ds.prefetch(tf.data.AUTOTUNE)
ds = tfds.as_numpy(ds)
self._train_data_iter = utils.py_prefetch(lambda: iter(ds))
def step(self, *, rng, writer, global_step):
"""Perform one step of the optimization."""
del writer
batch = next(self._train_data_iter)
logging.log_every_n(logging.INFO, 'Batch shape: %s', 10, batch)
updates = self._update_fn(rng, batch, self._params, self._state,
self._opt_state)
self._params = updates.params
self._state = updates.state
self._opt_state = updates.opt_state
scalars = updates.scalars
scalars['learning_rate'] = self._lr_schedule(global_step)
return utils.get_first(scalars)
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def evaluate(self, *, rng, writer, global_step):
params = utils.get_first(self._params)
state = utils.get_first(self._state)
return self._model.evaluate_fn(global_step, self._mode, params, state)
if __name__ == '__main__':
flags.mark_flag_as_required('config')
try:
tf.config.set_visible_devices([], 'GPU') # Prevent TF from using the GPU.
except tf.errors.NotFoundError:
pass
app.run(functools.partial(platform.main, Experiment))
| brave-main | brave/experiment.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fetch and write HMDB51.
This binary can be run with
python -m brave.download_hmdb --output_dir <desired/output/directory>
This will write a sharded tfrecords version of HMDB to the destination,
with each of the test and train splits separated out.
This binary requires that the ffmpeg and rar commands be callable as
subprocesses.
Note that a local temporary working directory is used to fetch and decompress
files at /tmp/hdmb. This can be set using the `work_dir` flag.
"""
import contextlib
import enum
import glob
import hashlib
import os
import re
import shutil
import subprocess
import tempfile
from typing import Dict, List, NamedTuple, Sequence
from absl import app
from absl import flags
from absl import logging
import ffmpeg
import requests
import tensorflow as tf
import tqdm
DEFAULT_UNRAR_COMMAND = 'unrar'
DEFAULT_HMDB_SPLITS_URL = 'https://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/test_train_splits.rar'
DEFAULT_HMDB_DATA_URL = 'https://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/hmdb51_org.rar'
DEFAULT_WORK_DIR = os.path.join(tempfile.gettempdir(), 'hmdb')
DEFAULT_VIDEO_QUALITY = 1
DEFAULT_VIDEO_FRAMERATE = 25
DEFAULT_VIDEO_MIN_RESIZE = 256
DEFAULT_MAX_SAMPLE_LENGTH_SECONDS = 20.0
MD5_OF_SPLITS_FILE = '15e67781e70dcfbdce2d7dbb9b3344b5'
MD5_OF_VIDEOS_FILE = '517d6f1f19f215c45cdd4d25356be1fb'
FLAGS = flags.FLAGS
flags.DEFINE_string(
'work_dir', DEFAULT_WORK_DIR,
'Temporary working directory for downloading and processing files')
flags.DEFINE_string('output_dir', None,
'Where to write the dataset shards at the end')
flags.DEFINE_string('unrar_command', DEFAULT_UNRAR_COMMAND,
'Path to call unrar')
flags.DEFINE_integer('num_shards', 10, 'Number of shards to write')
class Split(enum.Enum):
UNUSED = 0
TRAIN = 1
TEST = 2
class HMDBVideo(NamedTuple):
hmdb_split_index: int
split: Split
full_video_path: str
action_name: str
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments')
output_dir = FLAGS.output_dir
os.makedirs(output_dir, exist_ok=True)
if os.listdir(output_dir):
logging.error('Output directory `%s` must be empty', output_dir)
exit(1)
work_dir = FLAGS.work_dir
video_dir = os.path.join(work_dir, 'videos')
os.makedirs(work_dir, exist_ok=True)
os.makedirs(video_dir, exist_ok=True)
logging.info('Using `%s` as a temporary work_dir', work_dir)
_fetch_dataset_splits(work_dir)
_fetch_and_decompress_videos(video_dir)
videos = _get_all_hmdb_videos(work_dir, video_dir)
_write_dataset(videos, output_dir, num_shards=FLAGS.num_shards)
logging.info('Finished writing tfrecords dataset to %s', output_dir)
def _write_dataset(videos: Sequence[HMDBVideo], output_dir: str,
num_shards: int) -> None:
"""Write a tfrecords dataset for the given videos.
Args:
videos: The sequence of all videos in the dataset to write.
output_dir: The destination where the dataset will be written.
num_shards: The number of independent shards to write the dataset to.
"""
for split_index in [1, 2, 3]:
split_index_name = f'split_{split_index}'
for split in [Split.TEST, Split.TRAIN]:
split_name = 'test' if split == split.TEST else 'train'
current_dir = os.path.join(output_dir, split_index_name, split_name)
os.makedirs(current_dir, exist_ok=True)
videos_to_write = [
video for video in videos
if video.hmdb_split_index == split_index and video.split == split
]
logging.info('Writing %d videos to %d shards at %s', len(videos_to_write),
num_shards, current_dir)
_write_shards(videos_to_write, current_dir, num_shards)
def _fetch_and_decompress_videos(video_dir: str) -> None:
"""Download and extract rarfiles containing videos.
At the end of this command, all videos will be stored in the given
video_dir. This function also leaves the original .rar files in place.
Args:
video_dir: The directory in the workspace where the video should be
downloaded and extracted.
"""
videos_rar_path = os.path.join(video_dir, 'hmdb51_org.rar')
_download_data(DEFAULT_HMDB_DATA_URL, videos_rar_path)
videos_md5 = md5(videos_rar_path)
logging.info('MD5 of videos rar: %s', videos_md5)
if videos_md5 != MD5_OF_VIDEOS_FILE:
logging.warning(
'MD5 did not match expected value (%s)'
' - this may cause this script to fail', MD5_OF_SPLITS_FILE)
logging.info('Extracting rarfile `%s` in `%s`', videos_rar_path, video_dir)
_extract_rar(videos_rar_path, video_dir)
logging.info('Extracting video rar files')
for path in tqdm.tqdm(os.listdir(video_dir)):
if path == 'hmdb51_org.rar':
continue
_extract_rar(path, video_dir)
def _fetch_dataset_splits(work_dir: str) -> None:
"""Download the datafile containing the splits information.
Args:
work_dir: The location where the temporary files may be downloaded and
decompressed.
"""
splits_rar_path = os.path.join(work_dir, 'test_train_splits.rar')
_download_data(DEFAULT_HMDB_SPLITS_URL, splits_rar_path)
splits_md5 = md5(splits_rar_path)
logging.info('MD5 of splits file: %s', splits_md5)
if splits_md5 != MD5_OF_SPLITS_FILE:
logging.warning(
'MD5 did not match expected value (%s)'
' - this may cause this script to fail', MD5_OF_SPLITS_FILE)
logging.info('Extracting rarfile `%s` in `%s`', splits_rar_path, work_dir)
_extract_rar(splits_rar_path, work_dir)
def _write_shards(hmdb_videos: Sequence[HMDBVideo], output_dir: str,
num_shards: int) -> None:
"""Write tfrecord shards to the output dir.
Args:
hmdb_videos: The videos to write to the output directory.
output_dir: The location where the shards will be written.
num_shards: The number of shards to write.
"""
shard_paths = [
os.path.join(output_dir, f'hmdb51-{i:05d}-of-{num_shards:05d}')
for i in range(num_shards)
]
with contextlib.ExitStack() as context_manager:
writers = [
context_manager.enter_context(tf.io.TFRecordWriter(path))
for path in shard_paths
]
all_actions = set(video.action_name for video in hmdb_videos)
action_lookup = {
action_name: index
for index, action_name in enumerate(sorted(all_actions))
}
for i, video in enumerate(tqdm.tqdm(hmdb_videos)):
sequence = _create_sequence_example(video, action_lookup)
writers[i % len(writers)].write(sequence.SerializeToString())
def _download_data(url: str, destination: str) -> None:
"""Fetch data from a url to the given destination.
If the destination file is found to exist, no download will take place.
Note that since this write is not atomic, if a download partially fails, then
this might cause future runs to fail. Deleting all data in the work_dir will
fix this.
Args:
url: The resource to fetch.
destination: the full path where the output should be written.
"""
if os.path.exists(destination):
logging.info('Found data at `%s`, skipping download.', destination)
return
logging.info('Downloading from `%s` to `%s`', url, destination)
with requests.get(url, stream=True) as r, open(destination, 'wb') as w:
shutil.copyfileobj(r.raw, w)
def _extract_rar(filename: str, work_dir: str) -> None:
_check_unrar_found()
subprocess.call([FLAGS.unrar_command, 'e', '-y', '-idq', filename],
cwd=work_dir)
def _check_unrar_found() -> None:
try:
subprocess.call([FLAGS.unrar_command, '-idq'])
except:
raise RuntimeError(
f'Failed to call unrar using command `{FLAGS.unrar_command}`. '
'Unrar can be downlaoded at https://www.rarlab.com/download.htm.')
def _get_all_hmdb_videos(work_dir: str, video_dir: str) -> List[HMDBVideo]:
"""Extract splits data.
Args:
work_dir: The location containing the split txt files.
video_dir: The location where the video data can be found.
Returns:
A list of HMDBVideo dataclasses containing information about each example
in the dataset.
"""
result = []
for path in glob.glob(os.path.join(work_dir, '*txt')):
match = re.search(r'^(.+)_test_split(\d)\.txt$', path)
if not match:
raise ValueError(f'Failed to parse path name: `{path}`')
action_name = match.group(1)
split_index = int(match.group(2))
with open(path, 'r') as f:
for line in f:
line = line.strip()
try:
video_path, test_train_index = line.split(' ')
except:
raise ValueError(f'Failed to parse line `{line}`')
if test_train_index == '0':
split = Split.UNUSED
elif test_train_index == '1':
split = Split.TRAIN
elif test_train_index == '2':
split = Split.TEST
else:
raise ValueError(f'Unknown split `{test_train_index}`')
result.append(
HMDBVideo(
hmdb_split_index=split_index,
split=split,
action_name=action_name,
full_video_path=os.path.join(video_dir, video_path)))
return result
def _create_sequence_example(
video: HMDBVideo,
label_to_label_index: Dict[str, int]) -> tf.train.SequenceExample:
"""Create a tf example using the conventions used by DMVR."""
jpeg_encoded_images = _extract_jpeg_frames(video.full_video_path)
def _jpeg_feature(buffer) -> tf.train.Feature:
bytes_list = tf.train.BytesList(value=[buffer])
return tf.train.Feature(bytes_list=bytes_list)
def _label_feature(value: Sequence[int]) -> tf.train.Feature:
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
video_feature = tf.train.FeatureList(
feature=[_jpeg_feature(img) for img in jpeg_encoded_images])
features = {'image/encoded': video_feature}
label_index = label_to_label_index[video.action_name]
context = tf.train.Features(feature={
'clip/label/index': _label_feature([label_index]),
})
return tf.train.SequenceExample(
context=context,
feature_lists=tf.train.FeatureLists(feature_list=features))
def _extract_jpeg_frames(
video_path: str,
*,
max_length: float = DEFAULT_MAX_SAMPLE_LENGTH_SECONDS,
fps: int = DEFAULT_VIDEO_FRAMERATE,
min_resize: int = DEFAULT_VIDEO_MIN_RESIZE) -> List[bytes]:
"""Extract list of encoded jpegs from video_path using ffmpeg."""
jpeg_header = b'\xff\xd8'
new_width = '(iw/min(iw,ih))*{}'.format(min_resize)
# Note: the qscale parameter here is important for achieving good performance.
cmd = (
ffmpeg.input(video_path).trim(start=0, end=max_length).filter(
'fps', fps=fps).filter('scale', new_width, -1).output(
'pipe:',
format='image2pipe',
**{
'qscale:v': DEFAULT_VIDEO_QUALITY,
'qmin': DEFAULT_VIDEO_QUALITY,
'qmax': DEFAULT_VIDEO_QUALITY
}))
jpeg_bytes, _ = cmd.run(capture_stdout=True, quiet=True)
jpeg_bytes = jpeg_bytes.split(jpeg_header)[1:]
jpeg_bytes = map(lambda x: jpeg_header + x, jpeg_bytes)
return list(jpeg_bytes)
def _set_context_int(key: str, value: int,
sequence: tf.train.SequenceExample) -> None:
sequence.context.feature[key].int64_list.value[:] = (value,)
def _set_context_bytes(key: str, value: bytes,
sequence: tf.train.SequenceExample):
sequence.context.feature[key].bytes_list.value[:] = (value,)
def _add_bytes_list(key: str, values: Sequence[bytes],
sequence: tf.train.SequenceExample) -> None:
sequence.feature_lists.feature_list[key].feature.add(
).bytes_list.value[:] = values
def md5(path: str) -> str:
"""Compute an MD5 hash of the file at a given path."""
hash_md5 = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
if __name__ == '__main__':
flags.mark_flag_as_required('output_dir')
app.run(main)
| brave-main | brave/download_hmdb.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for trainer."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
from brave.datasets import datasets
from brave.training import trainer
DEVICE_COUNT = 1
chex.set_n_cpu_devices(DEVICE_COUNT)
class TrainerTest(parameterized.TestCase):
def test_train_step(self):
self.assertEqual(DEVICE_COUNT, jax.device_count())
rng = jax.random.PRNGKey(0)
def forward(x):
return hk.nets.MLP([10, 10, 1])(x)
def loss(batch):
# Something that vaguely looks like a trainable embedding, but isn't.
return jnp.mean(forward(batch.views['default'].video))
transformed_loss_fn = hk.transform_with_state(loss)
fake_batch = datasets.MiniBatch(
views={
'default':
datasets.View(
video=jnp.zeros([DEVICE_COUNT, 8, 32, 32, 3]),
audio=None,
labels=None)
})
keys = jnp.broadcast_to(rng, (DEVICE_COUNT,) + rng.shape)
params, state = jax.pmap(transformed_loss_fn.init)(keys, fake_batch)
optimizer = optax.sgd(1e-4)
opt_state = jax.pmap(optimizer.init)(params)
# Check that the parameters are initialized to the same value.
jax.tree_map(lambda x: np.testing.assert_allclose(x[0], x[1]), params)
def loss_fn(params, state, rng, batch):
loss, state = transformed_loss_fn.apply(params, state, rng, batch)
scalars = {'loss': loss}
return loss, (state, scalars)
update_fn = jax.pmap(
trainer.build_update_fn(optimizer, loss_fn), axis_name='i')
# A smoke test to ensure that the updates are successfully computed.
result = update_fn(keys, fake_batch, params, state, opt_state)
# Check the parameters agree across the devices.
jax.tree_map(lambda x: np.testing.assert_allclose(x[0], x[1]),
result.params)
self.assertEqual((DEVICE_COUNT,), result.scalars['loss'].shape)
# Due to pmean, all of the scalars should have the same value.
loss_scalars = result.scalars['loss']
self.assertTrue(np.all(loss_scalars == loss_scalars[0]))
if __name__ == '__main__':
absltest.main()
| brave-main | brave/training/trainer_test.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test the optimizer."""
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from brave.training import optimizers
class OptimizersTest(parameterized.TestCase):
def test_scale_by_module_name(self):
def forward(x):
return hk.Sequential(
[hk.Linear(10, name='linear_1'),
hk.Linear(10, name='linear_2')])(
x)
forward_fn = hk.without_apply_rng(hk.transform(forward))
params = forward_fn.init(jax.random.PRNGKey(0), jnp.zeros([1]))
scaler = optimizers._scale_by_module_name([(r'^.*linear_1.*$', 10.0)])
state = scaler.init(None)
scaled_params, _ = scaler.update(params, state)
np.testing.assert_allclose(10 * params['linear_1']['w'],
scaled_params['linear_1']['w'])
np.testing.assert_allclose(params['linear_2']['w'],
scaled_params['linear_2']['w'])
if __name__ == '__main__':
absltest.main()
| brave-main | brave/training/optimizers_test.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| brave-main | brave/training/__init__.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizers for the trainer."""
import re
from typing import Any, Callable, Dict, Sequence, Tuple
import chex
import jax.numpy as jnp
import optax
import tree
@chex.dataclass
class OptimizerConfig:
"""Shared config for optimizers.
Attributes:
optimizer_name: The name of the optimizer (for example `lars`, `adam`).
scheduler_name: The name of the scheduler (for example. `cosine_decay`).
scheduler_kwargs: Kwargs to pass to the scheduler function.
optimizer_kwargs: Kwargs to pass to the optimizer function.
weight_decay: The constant to use for weight decay.
scale_learning_rate_by_regex: A sequence of regular expressions to match
module parameter paths, along with the rescaling to apply. This allows us
to tune the learning rate for individual parameter blocks. For example
`[(r'^.*predictor.*$', 10.0)] would match modules all modules containing
the name 'predictor' and rescale the learning rate by a factor of 10.0.
"""
optimizer_name: str
optimizer_kwargs: Dict[str, Any]
scheduler_name: str
scheduler_kwargs: Dict[str, Any]
weight_decay: float
scale_learning_rate_by_regex: Sequence[Tuple[str, float]]
def exclude_bias_and_normalizers(params):
def predicate(path: Tuple[Any], value: jnp.ndarray) -> jnp.ndarray:
del value
return path[-1] == 'b' or 'norm' in path[-2]
return tree.map_structure_with_path(predicate, params)
def get_optimizer(
config: OptimizerConfig,
*,
weight_decay_mask: Callable[[optax.Params],
optax.Params] = exclude_bias_and_normalizers,
trust_ratio_mask: Callable[[optax.Params],
optax.Params] = exclude_bias_and_normalizers
) -> Tuple[optax.GradientTransformation, optax.Schedule]:
"""Returns the optimizer.
The function return the optimizer function for the model.
Args:
config: parameters for the optimizer and the scheduler.
weight_decay_mask: A mask used to remove parameter blocks from weight decay.
trust_ratio_mask: A mask used to remove parameter blocks from LARS trust
ratio update.
Returns:
the corresponding `GradientTransformation`.
"""
learning_rate_schedule = _get_learning_rate_schedule(config)
if config.optimizer_name == 'adam':
optimizer = optax.chain(
_scale_by_module_name(config.scale_learning_rate_by_regex),
optax.adamw(
learning_rate=learning_rate_schedule,
weight_decay=config.weight_decay,
mask=weight_decay_mask,
**config.optimizer_kwargs))
return optimizer, learning_rate_schedule
elif config.optimizer_name == 'lars':
optimizer = optax.chain(
_scale_by_module_name(config.scale_learning_rate_by_regex),
optax.lars(
learning_rate=learning_rate_schedule,
weight_decay=config.weight_decay,
weight_decay_mask=weight_decay_mask,
trust_ratio_mask=trust_ratio_mask,
**config.optimizer_kwargs))
return optimizer, learning_rate_schedule
else:
raise ValueError(f'Unknown optimizer: {config.optimizer_name}.')
def _get_learning_rate_schedule(config: OptimizerConfig):
if config.scheduler_name == 'cosine_decay':
return optax.warmup_cosine_decay_schedule(**config.scheduler_kwargs)
elif config.scheduler_name == 'piecewise_constant':
return optax.piecewise_constant_schedule(**config.scheduler_kwargs)
else:
raise ValueError(f'Unknown scheduler: {config.scheduler_name}.')
class _ScaleByModuleNameState(optax.OptState):
...
def _scale_by_module_name(
module_regex_and_scale: Sequence[Tuple[str, float]]
) -> optax.GradientTransformation:
"""An transformation that rescales the updates only of matching layers.
Args:
module_regex_and_scale: A sequence of pairs of regex pattern and scale.
Returns:
An (init_fn, update_fn) tuple.
"""
matchers = [
(re.compile(pattern), scale) for pattern, scale in module_regex_and_scale
]
def rescaler(path: Tuple[Any], value: jnp.ndarray) -> jnp.ndarray:
path = '/'.join(path)
rescaling = 1.0
for matcher, scale in matchers:
if matcher.match(path):
rescaling *= scale
return value * rescaling
def init_fn(_):
return _ScaleByModuleNameState()
def update_fn(updates, state, params=None):
del params
return tree.map_structure_with_path(rescaler, updates), state
return optax.GradientTransformation(init_fn, update_fn)
| brave-main | brave/training/optimizers.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The functions for computing gradient updates."""
from typing import Callable, NamedTuple, Sequence
import chex
import haiku as hk
import jax
import optax
from brave.datasets import datasets
from brave.models import embedding_model
class ModelUpdates(NamedTuple):
params: hk.Params
state: hk.State
opt_state: optax.OptState
scalars: embedding_model.Scalars
UpdateFn = Callable[
[chex.PRNGKey, datasets.MiniBatch, hk.Params, hk.State, optax.OptState],
ModelUpdates]
def build_update_fn(optimizer: optax.GradientTransformation,
loss_fn: embedding_model.LossFn) -> UpdateFn:
"""Returns a function for computing model updates.
Args:
optimizer: The optimizer to use e.g. the result of optax.sgd(...).
loss_fn: An instance of the loss function, pmapped across all devices.
Returns:
A callable function that takes one step in the optimization problem using
the gradients of the loss computed by the model loss function.
"""
def update_fn(rng: chex.PRNGKey, minibatch: datasets.MiniBatch,
params: hk.Params, state: hk.State,
opt_state: optax.OptState) -> ModelUpdates:
grad_fn = jax.grad(loss_fn, has_aux=True)
grad, (state, scalars) = grad_fn(params, state, rng, minibatch)
grad = jax.lax.pmean(grad, axis_name='i')
scalars = jax.lax.pmean(scalars, axis_name='i')
updates, opt_state = optimizer.update(grad, opt_state, params)
params = optax.apply_updates(params, updates)
return ModelUpdates(params, state, opt_state, scalars)
return update_fn
def get_batch_dims(global_batch_size: int, device_count: int,
local_device_count: int) -> Sequence[int]:
"""Compute the batch dims for this host.
The global_batch_size is the number of data samples that are optimized over
in one step of the optimization. This value must be split up so that each
individual device gets some share of the batch.
When running with multiple devices, there may be multiple hosts, each
with multiple local devices. Each host has a local copy of the program, and
runs a local copy of the code. Each host must therefore use a batch size
so that when all of the hosts run together, the total number of batched
elements matches the global batch size. We do this by splitting up the global
batch size evenly amongst all devices, and setting the batch size per host
to the number of host devices times the device batch size.
Args:
global_batch_size: The target total batch size per optimization step.
device_count: The total number of devices sharing computation per step.
local_device_count: The number of devices available on the current host.
Returns:
The batch dimensions to use on the currently running host.
"""
per_device_batch_size, remainder = divmod(global_batch_size, device_count)
if remainder:
raise ValueError(
f'Cannot split batch of {global_batch_size} evenly across {local_device_count} devices.'
)
host_batch_dims = (local_device_count, per_device_batch_size)
return host_batch_dims
| brave-main | brave/training/trainer.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A package to sample from videos."""
from typing import Tuple, Optional, Union
import tensorflow as tf
DEFAULT_MAX_RANDOM_SAMPLE_CROP_ATTEMPTS = 20
DEFAULT_PADDING_ON_FAIL = 16
def pad_and_center_crop_window(image_shape: tf.Tensor,
padding: int = 16) -> tf.Tensor:
"""Compute a crop window for a padded center crop of the given image shape.
Args:
image_shape: The shape of the jpeg [height, width, channels], or [height,
width].
padding: The padding between the input image and the resulting image. The
padding represents the distance between the input image and the output
image at each edge (so that the total number of pixels removed from the
smallest edge is 2 X the padding value.
Returns:
A crop window [y, x, image_size, image_size],
where image_size = min(height, width) - 2 * padding, and y and x are
chosen so that the resutling crop falls in the center of the input image.
"""
# Scrub the channels size, if it was provided.
image_shape = image_shape[:2]
min_image_side = tf.math.reduce_min(image_shape)
image_height = image_shape[0]
image_width = image_shape[1]
# If the padding is larger than the image, no pixels will be returned.
tf.debugging.assert_greater(min_image_side, 2 * padding)
offset_y = tf.cast((image_height - min_image_side) / 2, dtype=tf.int32)
offset_x = tf.cast((image_width - min_image_side) / 2, dtype=tf.int32)
image_size = tf.cast(min_image_side - 2 * padding, tf.int32)
return tf.stack(
[offset_y + padding, offset_x + padding, image_size, image_size])
def random_sample_crop_window(image_shape: tf.Tensor,
min_area: float,
max_area: float,
min_aspect_ratio: float,
max_aspect_ratio: float,
padding_on_fail: int = DEFAULT_PADDING_ON_FAIL,
seed: Optional[int] = None) -> tf.Tensor:
"""Randomly sample a crop window, given an image size and config.
It may be that the random sampler is unable to satisfy the constraints given
(within an acceptable number of iterations). In this case, the sampler
falls back to returning the result of `pad_and_center_crop_window`, with the
default padding set.
Args:
image_shape: A tensor containing [image_height, image_width, channels].
min_area: A float with the minimum area used per crop.
max_area: A float with the maximum area used per crop.
min_aspect_ratio: A float with the minimum aspect ratio distorsion for the
crops.
max_aspect_ratio: A float with the maximum aspect ratio distorsion for the
crops.
padding_on_fail: The padding to use if the sampler fails to return a valid
sample.
seed: The seed to pass to the random sampler.
Returns:
A bounding box tensor [min y, min x, height, width] in image coordinates.
"""
crop_window = _sample_crop_window(
image_shape=image_shape,
min_object_covered=min_area,
aspect_ratio_range=(min_aspect_ratio, max_aspect_ratio),
area_range=(min_area, max_area),
max_attempts=DEFAULT_MAX_RANDOM_SAMPLE_CROP_ATTEMPTS,
seed=seed)
# If the random crop failed, fall back to padded center crop.
return tf.cond(
tf.reduce_all(tf.equal(image_shape[:2], crop_window[2:])),
lambda: pad_and_center_crop_window(image_shape, padding=padding_on_fail),
lambda: tf.identity(crop_window))
def decode_crop_resize_images(
jpeg_encoded_images: tf.Tensor, crop_window: tf.Tensor,
image_size: Union[tf.Tensor, Tuple[int, int]]) -> tf.Tensor:
"""Given a crop window, decode the input tensors.
Args:
jpeg_encoded_images: A tensor containing a sequence of jpeg images.
crop_window: The window to crop, as [y min, x min, height, width].
image_size: The size to use to resize the images to after cropping, as
[height, width].
Returns:
Video encoded as [T, image_size, image_size, C], where the time is the
leading dimension.
"""
video = decode_crop_images(jpeg_encoded_images, crop_window)
return tf.image.resize(video, image_size)
def decode_crop_images(jpeg_encoded_images: tf.Tensor,
crop_window: tf.Tensor) -> tf.Tensor:
"""Given a crop window, decode the input tensors.
Args:
jpeg_encoded_images: A tensor containing jpeg images.
crop_window: [row min, col min, row max, col max] the window to crop.
Returns:
Video encoded as [T, H, W, C], where the time is the leading dimension.
"""
return tf.map_fn(
lambda x: _decode_and_crop(x, crop_window),
jpeg_encoded_images,
fn_output_signature=tf.uint8)
def decode_resize_crop_images(jpeg_encoded_images: tf.Tensor, *,
initial_resize: int,
center_crop_size: int) -> tf.Tensor:
"""Decode, resize minimum and then center crop a sequence of images.
Args:
jpeg_encoded_images: A tensor containing jpeg images.
initial_resize: First, resize the smallest edge of the images to be exactly
this value.
center_crop_size: Once the initial resize is complete,
Returns:
Video encoded as [T, H, W, C], where the time is the leading dimension.
"""
video = tf.map_fn(
tf.io.decode_jpeg, jpeg_encoded_images, fn_output_signature=tf.uint8)
return _resize_min_and_crop(
video, initial_resize=initial_resize, center_crop_size=center_crop_size)
def _resize_min_and_crop(video, *, initial_resize: int,
center_crop_size: int) -> tf.Tensor:
"""Resize the minimum side and center crop to given side.
Args:
video: The video to crop.
initial_resize: First, resize the smallest edge of the images to be exactly
this value.
center_crop_size: Once the initial resize is complete,
Returns:
The cropped video.
"""
video = resize_min(video, initial_resize)
shape = tf.shape(video)
height = shape[1]
width = shape[2]
offset_h = tf.cast((height - center_crop_size) / 2, tf.int32)
offset_w = tf.cast((width - center_crop_size) / 2, tf.int32)
return video[:, offset_h:offset_h + center_crop_size,
offset_w:offset_w + center_crop_size, :]
def resize_min(video: tf.Tensor, shortest_edge: int) -> tf.Tensor:
"""Given a video, resize the smallest side to a given value.
Args:
video: A video as [T, H, W, 3].
shortest_edge: The result will be resized so that the shortest edge matches
this value.
Returns:
A video [T, H', W', 3], where min(H', W') = shortest_edge.
"""
shape = tf.shape(video)
input_h = shape[1]
input_w = shape[2]
output_h = tf.maximum(shortest_edge, (input_h * shortest_edge) // input_w)
output_w = tf.maximum(shortest_edge, (input_w * shortest_edge) // input_h)
def resize_fn():
result = tf.image.resize(
video, (output_h, output_w), method=tf.image.ResizeMethod.BILINEAR)
return tf.cast(result, video.dtype)
should_resize = tf.math.logical_or(
tf.not_equal(input_w, output_w), tf.not_equal(input_h, output_h))
return tf.cond(pred=should_resize, true_fn=resize_fn, false_fn=lambda: video)
def _decode_and_crop(jpeg_encoded_image: tf.Tensor,
crop_window: tf.Tensor) -> tf.Tensor:
"""Decode jpeg using to crop window.
Args:
jpeg_encoded_image: Tensor containing encoded jpeg.
crop_window: [row min, col min, row max, col max] the window to crop.
Returns:
Decoded image [H, W, C]
"""
return tf.image.decode_and_crop_jpeg(
jpeg_encoded_image, crop_window, channels=3)
def _sample_crop_window(image_shape: tf.Tensor,
min_object_covered: float,
aspect_ratio_range: Tuple[float, float],
area_range: Tuple[float, float],
max_attempts: int,
seed: Optional[int] = None) -> tf.Tensor:
"""Sample a crop_window to be used for cropping.
If the sampler fails to find a solution, the full imgae will be returned.
Args:
image_shape: The shape of the image to sample [height, width, channels].
min_object_covered: The minimum amount of the image to cover.
aspect_ratio_range: The range of aspect ratios of the result.
area_range: The range of areas for the sampled boxes.
max_attempts: The number of attempts to the sampler should make before
failing.
seed: The seed to feed to the random number generator.
Returns:
A crop window [min y, min x, height, width]. If the sampler fails,
the resulting crop will be the full image.
"""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box(
image_shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True,
seed=seed)
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
return tf.stack([offset_y, offset_x, target_height, target_width])
| brave-main | brave/datasets/video_sampling.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sampling."""
from absl.testing import absltest
from absl.testing import parameterized
from brave.datasets import sampling
class SamplingTest(parameterized.TestCase):
@parameterized.named_parameters([
{
'testcase_name': 'single_item',
'sequence_length': 10,
'num_samples': 1,
'num_frames_per_sample': 1,
'step': 1,
'expected': [(0, 1, 1)]
},
{
'testcase_name': 'two_items',
'sequence_length': 10,
'num_samples': 2,
'num_frames_per_sample': 1,
'step': 1,
'expected': [(0, 1, 1), (9, 10, 1)]
},
{
'testcase_name': 'five_items',
'sequence_length': 5,
'num_samples': 5,
'num_frames_per_sample': 1,
'step': 1,
'expected': [(0, 1, 1), (1, 2, 1), (2, 3, 1), (3, 4, 1), (4, 5, 1)]
},
{
'testcase_name': 'overlapping_items',
'sequence_length': 5,
'num_samples': 5,
'num_frames_per_sample': 3,
'step': 1,
'expected': [(0, 3, 1), (0, 3, 1), (1, 4, 1), (1, 4, 1), (2, 5, 1)]
},
{
'testcase_name': 'overlapping_with_step',
'sequence_length': 12,
'num_samples': 3,
'num_frames_per_sample': 3,
'step': 2,
'expected': [(0, 5, 2), (3, 8, 2), (7, 12, 2)]
},
{
'testcase_name': 'shortest_possible_sequence',
'sequence_length': 12,
'num_samples': 3,
'num_frames_per_sample': 12,
'step': 1,
'expected': [(0, 12, 1), (0, 12, 1), (0, 12, 1)]
},
{
'testcase_name': 'shortest_possible_sequence_with_step',
'sequence_length': 4,
'num_samples': 3,
'num_frames_per_sample': 2,
'step': 3,
'expected': [(0, 4, 3), (0, 4, 3), (0, 4, 3)]
},
])
def test_compute_linearly_spaced_sample_indices(self, sequence_length,
num_samples,
num_frames_per_sample, step,
expected):
result = sampling.compute_linearly_spaced_sample_indices(
sequence_length, num_samples, num_frames_per_sample, step)
result = [tuple(v) for v in result]
self.assertEqual(expected, result)
if __name__ == '__main__':
absltest.main()
| brave-main | brave/datasets/sampling_test.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Datasets for multi-view multi-modal data."""
import functools
from typing import Callable, Dict, Optional, Sequence, Union
import chex
import tensorflow as tf
from brave.datasets import media_sequences
Array = Union[tf.Tensor, chex.Array]
@chex.dataclass
class View:
"""A view is a group of time-synchronized modes.
Attributes:
video: An optional tensor of shape [BATCH_DIMS..., T, H, W, C], where T is
the time dimension and C=3 are the image channels as RGB.
audio: An optional tensor of shape [BATCH_DIMS..., AUDIO...]. Depending on
the representation, the audio may be rank-1 stored as a raw waveform of
dimension (T,), or as a rank-2 spectrogram, (T, frequency_bins). The
sample-rate is dependent on the dataset.
labels: If available, contains the integer class labels for this view, of
shape [BATCH_DIMS..., N], where N is the number of labels.
"""
video: Optional[Array]
audio: Optional[Array]
labels: Optional[Array]
def __repr__(self):
# Workaround http://b/190464506
if isinstance(self.video, (str, int)):
return ''
modes = []
if self.video is not None:
modes.append(f'video: {self.video.shape}')
if self.audio is not None:
modes.append(f'audio: {self.audio.shape}')
if self.labels is not None:
modes.append(f'labels: {self.labels.shape}')
return f'<View {", ".join(modes)}'
@chex.dataclass
class MiniBatch:
"""Minibatches contain multimodal multi-view data.
Attributes:
views: A mapping from view_name to view. Each view is a time-synchronized
slice of the underlying raw data. Any batch dimensions are contained in
the tensors of the view themselves.
"""
views: Dict[str, View]
def __repr__(self):
# Workaround http://b/190464506
if isinstance(self.views, (str, int)):
return ''
views = ', '.join(f'{name}: {view}' for name, view in self.views.items())
return f'<Batch {views}>'
ViewSamplerFn = Callable[[media_sequences.EncodedSequence],
Dict[str, media_sequences.EncodedSequence]]
ViewDecoderFn = Callable[[Dict[str, media_sequences.EncodedSequence]],
Dict[str, View]]
def multi_view_dataset(
shards: Sequence[str],
features: Sequence[media_sequences.FeatureKind],
view_sampler: ViewSamplerFn,
view_decoder: ViewDecoderFn,
*,
shuffle: bool = False,
shard_reader: media_sequences.ShardReaderFn = media_sequences
.tf_record_shard_reader,
) -> tf.data.Dataset:
"""Construct a multiview multimodal dataset.
The dataset is constructed in three stages,
* The specified features are read into EncodedSequence objects.
* The view_sampler function is used to filter out only the fields that
are needed for each view.
* The view_decoder is used to decode the EncodedSequences into views.
Args:
shards: The shard paths to read the dataset from.
features: The features to deserialize from the table into the encoded media
sequences object.
view_sampler: A callable taking an encoded media sequence and returning a
dictionary of sampled media sequences, oen for each view.
view_decoder: A callable taking a dictionary of encoded sequences per view
and returning the decoded views.
shuffle: Whether or not to shuffle the data.
shard_reader: The callable used to decode shards from paths.
Returns:
A tfds dataset with underlying datatype `datasets.Minibatch`. Note that
the returned dataset has no batch dimensions, so that every item in the
dataset is a single example. Call `.batch()` on the result to group together
examples.
"""
ds = media_sequences.media_sequence_dataset(
shards, features, shuffle=shuffle, shard_reader=shard_reader)
return _multi_view_batches_from_sequences(
ds, view_sampler, view_decoder, deterministic=not shuffle)
def _multi_view_batches_from_sequences(ds: tf.data.Dataset,
view_sampler: ViewSamplerFn,
view_decoder: ViewDecoderFn, *,
deterministic: bool) -> tf.data.Dataset:
"""Construct batches using the view decoder."""
ds = ds.map(view_sampler)
ds = ds.map(
functools.partial(_make_batch, view_decoder=view_decoder),
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=deterministic)
return ds
def _make_batch(sequences: Dict[str, media_sequences.EncodedSequence],
view_decoder: ViewDecoderFn) -> MiniBatch:
return MiniBatch(views=view_decoder(sequences))
| brave-main | brave/datasets/datasets.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| brave-main | brave/datasets/__init__.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for datasets."""
import tempfile
from typing import Dict
from absl.testing import absltest
from absl.testing import parameterized
from brave.datasets import datasets
from brave.datasets import fixtures
from brave.datasets import media_sequences
from brave.datasets import time_sampling
from brave.datasets import video_sampling
class TFRecordDatasetsTest(parameterized.TestCase):
def test_multi_view_dataset(self):
with tempfile.TemporaryDirectory() as fixture_dir:
self.shards = fixtures.write_tf_record_dataset_fixture(fixture_dir)
# Create a video dataset with a single view called 'default' containing
# the first 4 frames of each video, cropped to 128 X 128
ds = datasets.multi_view_dataset(
self.shards,
features=[media_sequences.FeatureKind.VIDEO],
view_sampler=_test_sampler,
view_decoder=_test_decoder)
for batch in ds:
self.assertEqual(batch.views['default'].video.shape, (4, 128, 128, 3))
def _test_decoder(
sequences: Dict[str, media_sequences.EncodedSequence]
) -> Dict[str, datasets.View]:
batch_views = {}
for view_name, sequence in sequences.items():
# Now decode the crop window from the sequence as a video.
video = video_sampling.decode_resize_crop_images(
sequence.jpeg_encoded_images, initial_resize=224, center_crop_size=128)
batch_views[view_name] = datasets.View(video=video, audio=None, labels=None)
return batch_views
def _test_sampler(
sequence: media_sequences.EncodedSequence
) -> Dict[str, media_sequences.EncodedSequence]:
# Simply sample the first four frames of the sequence into a single view
# called 'default'.
result = time_sampling.random_sample_sequence_using_video(
num_video_frames=4, video_frame_step=1, sequence=sequence)
return {'default': result.sequence}
if __name__ == '__main__':
absltest.main()
| brave-main | brave/datasets/datasets_test.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Video sampling tests."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from brave.datasets import video_sampling
class VideoSamplingTest(parameterized.TestCase):
@parameterized.parameters([
dict(image_shape=(224, 300), padding=0, expected=(0, 38, 224, 224)),
dict(image_shape=(300, 224), padding=0, expected=(38, 0, 224, 224)),
dict(image_shape=(224, 300), padding=16, expected=(16, 54, 192, 192)),
dict(image_shape=(300, 224), padding=16, expected=(54, 16, 192, 192)),
dict(image_shape=(32 + 1, 32 + 1), padding=16, expected=(16, 16, 1, 1)),
])
def test_center_crop(self, image_shape, padding, expected):
image_shape = tf.constant(image_shape, dtype=tf.int32)
bbox = video_sampling.pad_and_center_crop_window(image_shape, padding)
np.testing.assert_allclose(expected, bbox.numpy())
@parameterized.parameters([
dict(
image_shape=(224, 300, 3),
min_area=0.5,
max_area=1.0,
min_aspect_ratio=1.0,
max_aspect_ratio=1.0),
dict(
image_shape=(224, 224, 3),
min_area=0.5,
max_area=1.0,
min_aspect_ratio=0.3,
max_aspect_ratio=2.0),
dict(
image_shape=(100, 10, 3),
min_area=0.001,
max_area=1.0,
min_aspect_ratio=0.1,
max_aspect_ratio=10.0),
])
def test_random_sample_crop_window(self, image_shape, min_area, max_area,
min_aspect_ratio, max_aspect_ratio):
windows = []
for i in range(100):
crop_window = video_sampling.random_sample_crop_window(
tf.constant(image_shape),
min_area=min_area,
max_area=max_area,
min_aspect_ratio=min_aspect_ratio,
max_aspect_ratio=max_aspect_ratio,
seed=i).numpy()
windows.append(crop_window)
# Test that we see plenty of variety in the samples.
different_samples = set(tuple(window) for window in windows)
assert len(different_samples) > 50
image_area = image_shape[0] * image_shape[1]
sampled_min_area = min(w[2] * w[3] for w in windows)
sampled_max_area = max(w[2] * w[3] for w in windows)
sampled_min_aspect_ratio = min(w[3] / w[2] for w in windows)
sampled_max_aspect_ratio = min(w[3] / w[2] for w in windows)
self.assertLess(sampled_max_area / image_area, max_area + 1e-4)
self.assertGreater(sampled_min_area / image_area, min_area - 1e-4)
self.assertLess(sampled_max_aspect_ratio, max_aspect_ratio + 1e-4)
self.assertGreater(sampled_min_aspect_ratio, min_aspect_ratio - 1e-4)
def test_random_sample_crop_window_fall_back(self):
# The sampler can't satisfy the given conditions, and will thus fallback
# to a padded center crop. We check this by comparing with a padded
# center crop gives the same result in this case.
image_shape = tf.constant([224, 64, 3])
crop_window = video_sampling.random_sample_crop_window(
image_shape,
min_area=0.5,
max_area=1.0,
min_aspect_ratio=100.0,
max_aspect_ratio=200.0,
seed=0)
padded_center_crop = video_sampling.pad_and_center_crop_window(image_shape)
np.testing.assert_allclose(padded_center_crop.numpy(), crop_window.numpy())
def test_resize_min_and_crop(self):
video = np.ones((3, 120, 240, 3))
video[:, 50:70, 110:130, :] = 1.0
cropped = video_sampling._resize_min_and_crop(
video, initial_resize=60, center_crop_size=60)
self.assertEqual(cropped.shape, (3, 60, 60, 3))
expected = np.ones((3, 60, 60, 3))
expected[:, 20:40, 20:40, :] = 1.0
np.testing.assert_allclose(expected, cropped.numpy())
if __name__ == '__main__':
absltest.main()
| brave-main | brave/datasets/video_sampling_test.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for time sampling."""
from absl.testing import absltest
from absl.testing import parameterized
import tensorflow as tf
from brave.datasets import media_sequences
from brave.datasets import sampling
from brave.datasets import time_sampling
class TimeSamplingTest(parameterized.TestCase):
def test_video_sampling_overruns_region(self):
sequence = _sequence_fixture()
# There is only one way to sample
result = time_sampling.random_sample_sequence_using_video(
num_video_frames=2, video_frame_step=3, sequence=sequence, seed=5)
expected_images = tf.constant(['abc', 'jkl'])
tf.assert_equal(expected_images, result.sequence.jpeg_encoded_images)
tf.assert_equal(0, result.indices.start_index)
tf.assert_equal(4, result.indices.end_index)
expected_audio = tf.constant([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
tf.assert_equal(expected_audio, result.sequence.audio)
def test_impossible_time_sampling(self):
sequence = _sequence_fixture()
with self.assertRaises(tf.errors.InvalidArgumentError):
time_sampling.random_sample_sequence_using_video(
num_video_frames=3, video_frame_step=3, sequence=sequence, seed=5)
def test_all_values_are_sampled(self):
sequence = _sequence_fixture()
samples = set()
for _ in range(100):
result = time_sampling.random_sample_sequence_using_video(
num_video_frames=1, video_frame_step=1, sequence=sequence)
vs = tuple(result.sequence.jpeg_encoded_images.numpy().tolist())
samples.add(vs)
expected = set([(b'abc',), (b'def',), (b'ghi',), (b'jkl',)])
self.assertEqual(expected, samples)
def test_time_sample_length_1(self):
sequence = _sequence_fixture()
samples = set()
for _ in range(100):
result = time_sampling.random_sample_sequence_using_video(
num_video_frames=1,
video_frame_step=1,
sequence=sequence,
sample_start_index=2)
vs = tuple(result.sequence.jpeg_encoded_images.numpy().tolist())
samples.add(vs)
expected = set([(b'ghi',), (b'jkl',)])
self.assertEqual(expected, samples)
def test_constrained_sample_ranges(self):
sequence = _sequence_fixture()
result = time_sampling.random_sample_sequence_using_video(
num_video_frames=2,
video_frame_step=1,
sequence=sequence,
sample_start_index=1,
sample_end_index=3)
# Only one sequence can satisfy these constraints.
expected_images = tf.constant(['def', 'ghi'])
tf.assert_equal(expected_images, result.sequence.jpeg_encoded_images)
expected_audio = tf.constant([2.0, 3.0, 4.0, 5.0])
tf.assert_equal(expected_audio, result.sequence.audio)
@parameterized.parameters([
{
'indices': (0, 2, 1),
'old_length': 4,
'new_length': 4,
'expected': (0, 2, 1),
},
{
'indices': (0, 1, 1),
'old_length': 4,
'new_length': 8,
'expected': (0, 2, 1),
},
{
'indices': (0, 1, 1),
'old_length': 4,
'new_length': 8,
'expected': (0, 2, 1),
},
{
'indices': (0, 4, 3),
'old_length': 4,
'new_length': 4,
'expected': (0, 4, 1),
},
{
'indices': (0, 10, 4),
'old_length': 10,
'new_length': 5,
'expected': (0, 5, 1),
},
])
def test_synced_indices(self, indices, old_length, new_length, expected):
indices = sampling.Indices(*indices)
result = time_sampling.synced_indices(indices, old_length, new_length)
self.assertEqual(expected[0], result.start_index)
self.assertEqual(expected[1], result.end_index)
self.assertEqual(expected[2], result.step)
def test_get_subsequence_by_video_indices(self):
sequence = _sequence_fixture()
result = time_sampling.get_subsequence_by_video_indices(
sequence, sampling.Indices(1, 3, 1))
expected_images = tf.constant(['def', 'ghi'])
expected_audio = tf.constant([2.0, 3.0, 4.0, 5.0])
tf.assert_equal(expected_audio, result.audio)
tf.assert_equal(expected_images, result.jpeg_encoded_images)
result = time_sampling.get_subsequence_by_video_indices(
sequence, sampling.Indices(1, 3, 1), override_num_audio_samples=8)
expected_images = tf.constant(['def', 'ghi'])
expected_audio = tf.constant([2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 0.0, 0.0])
tf.assert_equal(expected_audio, result.audio)
tf.assert_equal(expected_images, result.jpeg_encoded_images)
def _sequence_fixture():
return media_sequences.EncodedSequence(
jpeg_encoded_images=tf.constant(['abc', 'def', 'ghi', 'jkl']),
audio=tf.constant([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]),
labels=None)
if __name__ == '__main__':
absltest.main()
| brave-main | brave/datasets/time_sampling_test.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logic for sampling in the temporal dimension."""
from typing import NamedTuple, Optional, Union
import tensorflow as tf
from brave.datasets import media_sequences
from brave.datasets import sampling
Scalar = Union[int, tf.Tensor]
class SampleResult(NamedTuple):
sequence: media_sequences.EncodedSequence
indices: sampling.Indices
def random_sample_sequence_using_video(
num_video_frames: Scalar,
video_frame_step: Scalar,
sequence: media_sequences.EncodedSequence,
seed: Optional[int] = None,
sample_start_index: Scalar = 0,
sample_end_index: Scalar = -1,
override_num_audio_samples: Optional[int] = None,
) -> SampleResult:
"""Randomly sample a sub-sequence using video to sync.
Args:
num_video_frames: The number of video frames to return.
video_frame_step: The step between frames sampled from the sequence.
sequence: The sequence to sample from.
seed: A seed to set to introduce determinism.
sample_start_index: The returned must start at or after this value.
sample_end_index: The returned must end before this value (can be negative).
Thus the returned indices must fall within the range given by
[sample_start_index, sample_end_index).
override_num_audio_samples: If set, and audio is present, then the length of
the sampled audio will be set to this value. This is useful to avoid
rounding and sync. errors where we want the audio tensor to have a
specific shape.
Returns:
A new sequence, where the video has been sampled to have the given
number of frames and the given step. All other fields present in the
sequence are also sampled proportionally to the same time range.
"""
# Change negative values of sample_end_index into a true index value.
total_frames = tf.shape(sequence.jpeg_encoded_images)[0]
sample_end_index = tf.cond(
tf.less(sample_end_index, 0), lambda: total_frames + sample_end_index + 1,
lambda: sample_end_index)
indices = sampling.random_sample(
start_index=sample_start_index,
end_index=sample_end_index,
sample_length=num_video_frames,
step=video_frame_step,
seed=seed)
new_sequence = get_subsequence_by_video_indices(
sequence, indices, override_num_audio_samples=override_num_audio_samples)
return SampleResult(new_sequence, indices)
def get_subsequence_by_video_indices(
sequence: media_sequences.EncodedSequence,
indices: sampling.Indices,
override_num_audio_samples: Optional[int] = None
) -> media_sequences.EncodedSequence:
"""Return a new subsequence sliced down using the video to sync time.
Args:
sequence: The sequence to slice using indices.
indices: The indices to use to slice the input.
override_num_audio_samples: If set, and audio is present, then the length of
the sampled audio will be set to this value. This is useful to avoid
rounding and sync. errors where we want the tensors to have a specific
shape.
Returns:
A new sequence, sliced using the given indices (which are specifically
for the `jpeg_encoded_images` part of the input data.
The other components of the input sequence will be sliced synchronously
to the same sub-region (although with no 'step' applied).
The labels are never sliced, and are always kept the same.
"""
result = media_sequences.EncodedSequence(
jpeg_encoded_images=None, audio=None, labels=sequence.labels)
result.jpeg_encoded_images = sequence.jpeg_encoded_images[
indices.start_index:indices.end_index:indices.step]
if sequence.labels is not None:
result.labels = sequence.labels
if sequence.audio is not None:
audio = sequence.audio
video_length = tf.shape(sequence.jpeg_encoded_images)[0]
audio_length = tf.shape(sequence.audio)[0]
audio_indices = synced_indices(indices, video_length, audio_length)
if override_num_audio_samples is not None:
audio_indices = sampling.Indices(
audio_indices.start_index,
audio_indices.start_index + override_num_audio_samples,
audio_indices.step)
audio_length = tf.shape(sequence.audio)[-1]
if audio_length < audio_indices.end_index:
padding = tf.zeros((audio_indices.end_index - audio_length,))
audio = tf.concat([audio, padding], axis=-1)
result.audio = audio[audio_indices.start_index:audio_indices
.end_index:audio_indices.step]
return result
def synced_indices(indices: sampling.Indices,
old_length: Scalar,
new_length: Scalar,
new_step: Scalar = 1) -> sampling.Indices:
"""Move indices in one array to equivalent indices in another.
Args:
indices: The indices to resample/
old_length: The length of the array the indices came from.
new_length: The length of the array we would like to sample in.
new_step: The step value to return with the returned sequence.
Returns:
Indices, modified so that they sample the same region in another array.
"""
length = (indices.end_index - indices.start_index)
ratio = tf.cast(new_length / old_length, tf.float32)
start = tf.cast(tf.cast(indices.start_index, tf.float32) * ratio, tf.int32)
end = tf.cast((tf.cast(indices.start_index + length, tf.float32)) * ratio,
tf.int32)
start = tf.maximum(0, start)
end = tf.minimum(new_length, end)
return sampling.Indices(
start_index=start,
end_index=end,
step=new_step,
)
| brave-main | brave/datasets/time_sampling.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A package for reading and materializing encoded data into memory."""
import enum
import functools
from typing import Any, Callable, Dict, Optional, Sequence
import chex
import tensorflow as tf
MAXIMUM_DATA_FETCH_CONCURRENT_REQUESTS = 8
DEFAULT_BLOCK_LENGTH = 8
DEFAULT_SHUFFLE_BUFFER_SIZE = 32
# Note(rhemsley): We are fortunate that the datasets we currently use follow the
# same conventions for naming these features. We may have to support different
# feature names in future.
DEFAULT_LABEL_FEATURE_NAME = 'clip/label/index'
DEFAULT_IMAGE_FEATURE_NAME = 'image/encoded'
DEFAULT_AUDIO_FEATURE_NAME = 'WAVEFORM/feature/floats'
# A ShardReaderFn takes a path and returns a tf.data.Dataset. The dataset
# must be an iterable over serialized tf.train.SequenceExample protos in
# tensors.
ShardReaderFn = Callable[[str], tf.data.Dataset]
class FeatureKind(enum.Enum):
VIDEO = enum.auto()
LABELS = enum.auto()
AUDIO = enum.auto()
@chex.dataclass
class EncodedSequence:
"""Encoded sequences contain selected fields read from MediaSequence protos.
We make use of an optimized proto reader to construct these objects, which
is why we do not use the proto message directly.
Attributes:
jpeg_encoded_images: An optional tensor of shape (T,) containing jpeg
encoded strings. Each entry in the tensor corresponds to an (ordered)
frame in a video.
audio: Raw audio encoded as a waveform, with rank-1 and of shape (T,). For
example, for 10 seconds of audio sampled at 48Khz, the shape would be
(480000,).
labels: An optional tensor of integer class indices of shape (N,), where N
is the number of labels associated with this sequence.
"""
jpeg_encoded_images: Optional[tf.Tensor]
audio: Optional[tf.Tensor]
labels: Optional[tf.Tensor]
def __repr__(self):
features = []
if self.jpeg_encoded_images is not None:
features.append(f'jpeg_encoded_images: {self.jpeg_encoded_images.shape}')
if self.labels is not None:
features.append(f'labels: {self.labels.shape}')
if self.audio is not None:
features.append(f'audio: {self.audio.shape}')
return '<EncodedSequence ' + ', '.join(features) + '>'
def tf_record_shard_reader(path: str) -> tf.data.Dataset:
"""By default, we assume that the data can be read using from TFRecords."""
return tf.data.TFRecordDataset(path)
def media_sequence_dataset(
shards: Sequence[str],
features: Sequence[FeatureKind],
*,
shuffle: bool = False,
shard_reader: ShardReaderFn = tf.data.TFRecordDataset) -> tf.data.Dataset:
"""Returns a tensorflow dataset that iterates over encoded media sequences.
Uses jax.process_count() and jax.process_index() to shard the data across
the different active processes.
Args:
shards: The paths to the shards to read.
features: The features to read from the encoded protobuf.
shuffle: Whether or not to shuffle the data.
shard_reader: A function mapping a path from the sequence of shards and
return a tf.data.Dataset over serializedtf.train.SequenceExample protos.
Defaults to a reader for `TFRecordDataset`.
Returns:
A tf.data.Dataset containing objects of type EncodedSequence.
"""
# Create a dataset that iterates over the shard paths.
ds = tf.data.Dataset.from_tensor_slices(shards)
# Shuffling the shards is an efficient way to shuffle the dataset at
# a coarse level of granularity.
if shuffle:
ds = ds.shuffle(len(shards), seed=0)
# We map the shard reader function across the shards and interleave the
# results in parallel, resulting in parallel reads to the shards that are
# combined into one sequential dataset.
# According to the docs, the cycle_length becomes the maximum concurrent
# fetches.
ds = ds.interleave(
shard_reader,
cycle_length=MAXIMUM_DATA_FETCH_CONCURRENT_REQUESTS,
block_length=DEFAULT_BLOCK_LENGTH,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=not shuffle)
# Deserialize only the required features from the protobuf.
ds = ds.map(functools.partial(_extract_features_from_pbf, features=features))
# Despite shuffling the shards, we still need to shuffle within the shards.
if shuffle:
ds = ds.shuffle(buffer_size=DEFAULT_SHUFFLE_BUFFER_SIZE)
return ds
def extend_sequence(sequence: EncodedSequence,
min_video_frames: int) -> EncodedSequence:
"""Extend a sequence until it contains at least the given number of frames.
Args:
sequence: The sequence under consideration
min_video_frames: The minimum number of video frames required.
Returns:
A sequence containing at least the minimum number of frames.
"""
num_frames = tf.shape(sequence.jpeg_encoded_images)[0]
needed_repeats = tf.math.ceil(min_video_frames / num_frames)
return tf.cond(
tf.greater(needed_repeats, 1.0), lambda: repeat(sequence, needed_repeats),
lambda: sequence)
def repeat(sequence: EncodedSequence, n: int) -> EncodedSequence:
"""Extend a sequence by repeating it n times.
This is useful to extend unusually short sequences to allow sampling longer
time ranges.
Args:
sequence: The sequence to repeat.
n: The number of times to loop the input seuquence.
Returns:
A new sequence that is n times longer than the input sequence.
"""
result = EncodedSequence(jpeg_encoded_images=None, audio=None, labels=None)
if sequence.jpeg_encoded_images is not None:
result.jpeg_encoded_images = tf.tile(
sequence.jpeg_encoded_images, multiples=(n,))
if sequence.audio is not None:
result.audio = tf.tile(sequence.audio, multiples=(n,))
if sequence.labels is not None:
result.labels = sequence.labels
return result
def _extract_features_from_pbf(
buffer: tf.Tensor, features: Sequence[FeatureKind]) -> EncodedSequence:
"""Read specific features from a media sequence proto.
Args:
buffer: A serialized tf.train.SequenceExample proto.
features: The features that should be read into the resulting object.
Returns:
An EncodedSequence object containing the requested features.
"""
features_dct = _build_features_dct(features)
context_features_dct = _build_context_features_dct(features)
context_dct, dct = tf.io.parse_single_sequence_example(
buffer, context_features_dct, features_dct)
result = EncodedSequence(jpeg_encoded_images=None, audio=None, labels=None)
if DEFAULT_IMAGE_FEATURE_NAME in dct:
result.jpeg_encoded_images = tf.identity(dct[DEFAULT_IMAGE_FEATURE_NAME])
if DEFAULT_AUDIO_FEATURE_NAME in dct:
audio = tf.sparse.to_dense(dct[DEFAULT_AUDIO_FEATURE_NAME])
# The audio is stored on the wire as (1, <n samples>).
tf.assert_rank(audio, 2)
result.audio = audio[0]
if DEFAULT_LABEL_FEATURE_NAME in context_dct:
result.labels = tf.sparse.to_dense(context_dct[DEFAULT_LABEL_FEATURE_NAME])
return result
def _build_context_features_dct(
features: Sequence[FeatureKind]) -> Dict[str, Any]:
dct = {}
for feature in features:
if feature is FeatureKind.LABELS:
dct[DEFAULT_LABEL_FEATURE_NAME] = tf.io.VarLenFeature(dtype=tf.int64)
return dct
def _build_features_dct(features: Sequence[FeatureKind]) -> Dict[str, Any]:
"""Build the input dictionary required by parse_single_sequence_example.
Due to optimizations in parse_single_sequence_example, we need to specify
additional information about the way the fields should be loaded.
Args:
features: The features to load
Returns:
Type information used to construct the tensors for the encoded sequence
objects.
"""
dct = {}
for feature in features:
if feature is FeatureKind.VIDEO:
dct[DEFAULT_IMAGE_FEATURE_NAME] = tf.io.FixedLenSequenceFeature(
(), dtype=tf.string)
if feature is FeatureKind.AUDIO:
dct[DEFAULT_AUDIO_FEATURE_NAME] = tf.io.VarLenFeature(dtype=tf.float32)
return dct
| brave-main | brave/datasets/media_sequences.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implement logic for sampling videos and audio."""
from typing import Sequence, NamedTuple, Union, Optional
import tensorflow as tf
Scalar = Union[int, tf.Tensor]
class Indices(NamedTuple):
start_index: Scalar
end_index: Scalar
step: Scalar
def compute_linearly_spaced_sample_indices(sequence_length: int,
num_samples: int,
num_frames_per_sample: int,
step: int) -> Sequence[Indices]:
"""Space out windows along a sequence.
Args:
sequence_length: The length of the sequence we are sampling from.
num_samples: The number of samples to return from the input sequence.
num_frames_per_sample: Each resulting sample must have this number of
frames.
step: The gap between frames sampled into the output.
Returns:
A sequence of slice indices referencing the input sequence.
When `num_samples` is one, the returned sample always starts at zero,
When `num_samples` is two, we return a sample starting at zero, and
another sample starting at the end of the sequence (i.e. the last sample
that could fit at the end).
As `num_samples` increases, the samples are returned spaced out evenly
between the first possible sample, and the last possible sample.
Samples may overlap or indeed be repeated.
"""
# Each cropped sample must have this number of frames. We repeat
# the underlying sequence until it contains this number of frames, so that
# we can return valid crops. Even if some of the samples may be repeats.
sample_length = (num_frames_per_sample - 1) * step + 1
last_sample_start = tf.cast(sequence_length - sample_length, tf.float32)
start_indices = tf.linspace(0.0, last_sample_start, num_samples)
start_indices = tf.cast(start_indices, tf.int32)
indices = [
Indices(start_indices[i], start_indices[i] + sample_length, step)
for i in range(num_samples)
]
return indices
def random_sample(start_index: Scalar,
end_index: Scalar,
sample_length: Scalar,
step: Scalar,
seed: Optional[int] = None) -> Indices:
"""Sample a range from a sequence given constraints.
All arguments must be positive.
Args:
start_index: The returned sample must start at or after this index.
end_index: The returned sample must end before this index.
sample_length: The sample must contain this number of values.
step: The sample must have this step in the original sequence.
seed: A seed for the rng.
Returns:
Indices representing the start, end, and step in the original sequence.
Raises:
tf.error.InvalidArgumenError if the sample is not satisfiable - in this
case, there are not enough elements in the sequence to return a sample.
"""
samplable_sequence_length = end_index - start_index
required_length = step * (sample_length - 1) + 1
tf.debugging.assert_less_equal(required_length, samplable_sequence_length)
max_val = samplable_sequence_length - required_length + 1
idx = tf.random.uniform((), maxval=max_val, dtype=tf.int32, seed=seed)
start = start_index + idx
end = start + required_length
return Indices(start, end, step)
| brave-main | brave/datasets/sampling.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Package to compute spectrograms from PCM encoded audio."""
import tensorflow as tf
DEFAULT_FRAME_LENGTH = 320
DEFAULT_FFT_LENGTH = 320
DEFAULT_LOG_FACTOR = 10_000
DEFAULT_LOWER_EDGE_FREQUENCY_HZ = 80.0
DEFAULT_UPPER_EDGE_FREQUENCY_HZ = 7600.0
def pcm_to_log_mel_spectrogram(pcm: tf.Tensor, input_sample_rate: int,
num_spectrogram_bins: int, fft_step: int):
"""Compute log-mel spectrogram from raw audio.
Args:
pcm: The raw audio represented as PCM, with shape (BATCH_DIMS..., N) sampled
at the sample rate `input_sample_rate`, and with zero or more batch
dimensions.
input_sample_rate: The samplerate of the input audio.
num_spectrogram_bins: The number of bins in the output spectrogram.
fft_step: The step size to use in the fft.
Returns:
The log-mel spectrogram of the raw audio, with shape
(BATCH_DIMS... , N / `fft_step`, `num_bins`), where N is the number of
samples in the input pcm.
"""
stfts = tf.signal.stft(
pcm,
frame_length=DEFAULT_FRAME_LENGTH,
frame_step=fft_step,
fft_length=DEFAULT_FFT_LENGTH,
window_fn=tf.signal.hann_window,
pad_end=True)
spectrograms = tf.abs(stfts)
# Warp the linear scale spectrograms into the mel-scale.
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_spectrogram_bins, stfts.shape[-1], input_sample_rate,
DEFAULT_LOWER_EDGE_FREQUENCY_HZ, DEFAULT_UPPER_EDGE_FREQUENCY_HZ)
mel_spectrograms = tf.tensordot(spectrograms, linear_to_mel_weight_matrix, 1)
mel_spectrograms.set_shape(spectrograms.shape[:-1].concatenate(
linear_to_mel_weight_matrix.shape[-1:]))
# Compute a stabilized log to get log-magnitude mel-scale spectrograms.
# Using Sander's suggested alternative.
log_mel_spectrograms = tf.math.log(1 + DEFAULT_LOG_FACTOR * mel_spectrograms)
return log_mel_spectrograms
| brave-main | brave/datasets/spectrograms.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for spectrograms."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from brave.datasets import spectrograms
class AudioProcessingTest(parameterized.TestCase):
def test_log_mel_spectrogram(self):
pcm = np.random.uniform(low=-1.0, high=1.0, size=(48_000 * 13,))
pcm = tf.constant(pcm, dtype=tf.float32)
expected_length = int(48_000 * 13 / 160)
log_mel = spectrograms.pcm_to_log_mel_spectrogram(
pcm, input_sample_rate=48_000, num_spectrogram_bins=80, fft_step=160)
self.assertEqual((expected_length, 80), log_mel.shape)
def test_batched_spectrogram(self):
shape = (3, 5, 48_000)
pcm = np.random.uniform(low=-1.0, high=1.0, size=shape)
pcm = tf.constant(pcm, dtype=tf.float32)
spectrogram = spectrograms.pcm_to_log_mel_spectrogram(
pcm, input_sample_rate=48_000, num_spectrogram_bins=80, fft_step=160)
self.assertEqual((3, 5, int(48_000 / 160), 80), spectrogram.shape)
if __name__ == '__main__':
absltest.main()
| brave-main | brave/datasets/spectrograms_test.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for media sequences."""
import tempfile
from absl.testing import absltest
from absl.testing import parameterized
import tensorflow as tf
from brave.datasets import fixtures
from brave.datasets import media_sequences
class MediaSequencesTest(parameterized.TestCase):
def test_read_hmdb_51(self):
with tempfile.TemporaryDirectory() as fixture_dir:
self.shards = fixtures.write_tf_record_dataset_fixture(fixture_dir)
features = [
media_sequences.FeatureKind.VIDEO,
media_sequences.FeatureKind.LABELS,
media_sequences.FeatureKind.AUDIO,
]
ds = media_sequences.media_sequence_dataset(self.shards, features)
for v in ds:
self.assertIsInstance(v, media_sequences.EncodedSequence)
self.assertIsNotNone(v.jpeg_encoded_images)
self.assertIsNotNone(v.audio)
self.assertIsNotNone(v.labels)
def test_repeat(self):
sequence = media_sequences.EncodedSequence(
jpeg_encoded_images=tf.constant(['abc', 'def', 'ghi', 'jkl']),
audio=tf.zeros((480376,)),
labels=None)
result = media_sequences.repeat(sequence, 2)
expected = tf.constant(
['abc', 'def', 'ghi', 'jkl', 'abc', 'def', 'ghi', 'jkl'])
tf.assert_equal(expected, result.jpeg_encoded_images)
self.assertEqual((2 * 480376,), tf.shape(result.audio))
@parameterized.parameters([
dict(min_num_frames=1, expected_length=4),
dict(min_num_frames=3, expected_length=4),
dict(min_num_frames=4, expected_length=4),
dict(min_num_frames=5, expected_length=8),
dict(min_num_frames=8, expected_length=8),
dict(min_num_frames=9, expected_length=12),
dict(min_num_frames=13, expected_length=16),
dict(min_num_frames=16, expected_length=16),
])
def test_extend_sequence(self, min_num_frames, expected_length):
sequence = media_sequences.EncodedSequence(
jpeg_encoded_images=tf.constant(['abc', 'def', 'ghi', 'jkl']),
audio=tf.constant([0.1, -0.1, 0.0, 0.0]),
labels=None)
result = media_sequences.extend_sequence(sequence, min_num_frames)
self.assertEqual(expected_length, result.jpeg_encoded_images.shape[0])
self.assertEqual(expected_length, result.audio.shape[0])
if __name__ == '__main__':
absltest.main()
| brave-main | brave/datasets/media_sequences_test.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data augmentations."""
from absl.testing import absltest
from absl.testing import parameterized
import tensorflow as tf
from brave.datasets import augmentations
from brave.datasets import datasets
class AugmentationsTest(parameterized.TestCase):
def test_normalize_video(self):
view = _view_fixture()
result = augmentations.normalize_video(view)
self.assertEqual(result.video.shape, (4, 8, 8, 3))
self.assertAlmostEqual(result.video[0, 0, 0, 0], 0.0)
def test_random_color_augment_video(self):
view = _view_fixture()
result = augmentations.random_color_augment_video(
view, prob_color_augment=1.0, prob_color_drop=1.0)
self.assertEqual(result.video.shape, (4, 8, 8, 3))
def test_gaussian_blur(self):
view = _view_fixture()
result = augmentations.random_gaussian_blur_video(
view, kernel_size=3, sigma_range=(1.0, 1.0))
self.assertEqual(result.video.shape, (4, 8, 8, 3))
def test_random_horizontal_flip_video(self):
view = _view_fixture()
result = augmentations.random_horizontal_flip_video(view)
self.assertEqual(result.video.shape, (4, 8, 8, 3))
def test_random_convolve_video(self):
view = _view_fixture()
result = augmentations.random_convolve_video(view)
self.assertEqual(result.video.shape, (4, 8, 8, 3))
def _view_fixture() -> datasets.View:
return datasets.View(video=tf.zeros([4, 8, 8, 3]), audio=None, labels=None)
if __name__ == '__main__':
absltest.main()
| brave-main | brave/datasets/augmentations_test.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provide view augmentations for the dataset."""
import copy
import functools
from typing import Optional, Tuple
import tensorflow as tf
from brave.datasets import datasets
DEFAULT_RANDOM_CONVOLVE_MAX_K = 11
def normalize_video(view: datasets.View) -> datasets.View:
"""Return view with video normalized to range [0, 1]."""
result = copy.copy(view)
result.video = view.video * (1.0 / 255.0)
return result
def random_color_augment_video(view: datasets.View, *,
prob_color_augment: float,
prob_color_drop: float) -> datasets.View:
"""Apply random color augmentations to the video in a view."""
video = _color_default_augm(
view.video,
zero_centering_image=False,
prob_color_augment=prob_color_augment,
prob_color_drop=prob_color_drop)
result = copy.copy(view)
result.video = video
return result
def random_gaussian_blur_video(
view: datasets.View, *, kernel_size: int,
sigma_range: Tuple[float, float]) -> datasets.View:
"""Apply a gaussian blur with a random sigma value in the range sigma_range.
Args:
view: The input view to augment.
kernel_size: The kernel size of the blur kernel.
sigma_range: A random value in this range is chosen as the sigma value for
the gaussian blur.
Returns:
A new view where the video has a guassian gaussian blur applied.
"""
sigma = tf.random.uniform((),
sigma_range[0],
sigma_range[1],
dtype=tf.float32)
def blur(img):
return _gaussian_blur(img, kernel_size=kernel_size, sigma=sigma)
result = copy.copy(view)
result.video = tf.map_fn(blur, view.video, fn_output_signature=tf.float32)
return result
def random_horizontal_flip_video(view: datasets.View) -> datasets.View:
"""Randomly flip all frames within a video."""
flip = tf.random.uniform((), minval=0, maxval=2, dtype=tf.int32)
video = tf.cond(
pred=tf.equal(flip, 1),
true_fn=lambda: tf.image.flip_left_right(view.video),
false_fn=lambda: view.video)
result = copy.copy(view)
result.video = video
return result
def random_convolve_video(view: datasets.View,
*,
max_k=DEFAULT_RANDOM_CONVOLVE_MAX_K) -> datasets.View:
"""Apply a random convolution to the input view's video."""
video = _random_convolve(view.video, max_k=max_k)
result = copy.copy(view)
result.video = video
return result
def _gaussian_blur(image: tf.Tensor,
kernel_size: int,
sigma: float,
padding='SAME'):
"""Blurs the given image with separable convolution.
Args:
image: Tensor of shape [height, width, channels] and dtype float to blur.
kernel_size: Integer Tensor for the size of the blur kernel. This is should
be an odd number. If it is an even number, the actual kernel size will be
size + 1.
sigma: Sigma value for gaussian operator.
padding: Padding to use for the convolution. Typically 'SAME' or 'VALID'.
Returns:
A Tensor representing the blurred image.
"""
radius = tf.cast(kernel_size // 2, tf.int32)
kernel_size = radius * 2 + 1
x = tf.cast(tf.range(-radius, radius + 1), tf.float32)
blur_filter = tf.exp(-tf.pow(x, 2.0) /
(2.0 * tf.pow(tf.cast(sigma, tf.float32), 2.0)))
blur_filter /= tf.reduce_sum(blur_filter)
# One vertical and one horizontal filter.
blur_v = tf.reshape(blur_filter, [kernel_size, 1, 1, 1])
blur_h = tf.reshape(blur_filter, [1, kernel_size, 1, 1])
num_channels = tf.shape(image)[-1]
blur_h = tf.tile(blur_h, [1, 1, num_channels, 1])
blur_v = tf.tile(blur_v, [1, 1, num_channels, 1])
expand_batch_dim = image.shape.ndims == 3
if expand_batch_dim:
# Tensorflow requires batched input to convolutions, which we can fake with
# an extra dimension.
image = tf.expand_dims(image, axis=0)
blurred = tf.nn.depthwise_conv2d(
image, blur_h, strides=[1, 1, 1, 1], padding=padding)
blurred = tf.nn.depthwise_conv2d(
blurred, blur_v, strides=[1, 1, 1, 1], padding=padding)
if expand_batch_dim:
blurred = tf.squeeze(blurred, axis=0)
return blurred
def _color_default_augm(frames: tf.Tensor,
zero_centering_image: bool = False,
prob_color_augment: float = 0.8,
prob_color_drop: float = 0.0,
seed: Optional[int] = None):
"""Standard color augmentation for videos.
Args:
frames: A float32 tensor of shape [timesteps, input_h, input_w, channels].
zero_centering_image: If `True`, results are in [-1, 1], if `False`, results
are in [0, 1].
prob_color_augment: Probability of applying color augmentation.
prob_color_drop: Probability of droping the colors to gray scale.
seed: A seed to use for the random sampling.
Returns:
A tensor of same shape as the input with color eventually altered.
"""
def color_augment(video: tf.Tensor) -> tf.Tensor:
"""Do standard color augmentations."""
# Note the same augmentation will be applied to all frames of the video.
if zero_centering_image:
video = 0.5 * (video + 1.0)
video = tf.image.random_brightness(video, max_delta=32. / 255.)
video = tf.image.random_saturation(video, lower=0.6, upper=1.4)
video = tf.image.random_contrast(video, lower=0.6, upper=1.4)
video = tf.image.random_hue(video, max_delta=0.2)
video = tf.clip_by_value(video, 0.0, 1.0)
if zero_centering_image:
video = 2 * (video - 0.5)
return video
def color_drop(video: tf.Tensor) -> tf.Tensor:
"""Do color drop."""
video = tf.image.rgb_to_grayscale(video)
video = tf.tile(video, [1, 1, 1, 3])
return video
should_color_augment = tf.random.uniform([],
minval=0,
maxval=1,
dtype=tf.float32,
seed=seed)
frames = tf.cond(
pred=tf.less(should_color_augment, tf.cast(prob_color_augment,
tf.float32)),
true_fn=lambda: color_augment(frames),
false_fn=lambda: frames)
should_color_drop = tf.random.uniform([],
minval=0,
maxval=1,
dtype=tf.float32,
seed=seed)
frames = tf.cond(
pred=tf.less(should_color_drop, tf.cast(prob_color_drop, tf.float32)),
true_fn=lambda: color_drop(frames),
false_fn=lambda: frames)
return frames
def _random_convolve(x: tf.Tensor, max_k: int, init='he') -> tf.Tensor:
"""Applies a random convolution of random odd kernel size <= max_k."""
if init == 'he':
he_normal_init = tf.initializers.he_normal
w_init = he_normal_init()
else:
raise NotImplementedError(f'Unknown init: {init} for RandConv.')
_, _, _, ch = x.get_shape().as_list()
# Prepare the switch case operation, depending on the dynamically sampled k.
values_k = range(1, max_k + 1, 2)
nb_values_k = len(values_k)
random_conv_fns = {}
def apply_conv2d_fn(x, k, ch, w_init):
k_h, k_w, k_ic, k_oc = k, k, ch, ch
w_shape = [k_h, k_w, k_ic, k_oc]
strides = 1
w = w_init(w_shape)
return tf.nn.conv2d(x, w, strides, 'SAME', name='random_conv')
for ind_k in range(nb_values_k):
k = 2 * ind_k + 1
apply_conv_k_fn = functools.partial(apply_conv2d_fn, x, k, ch, w_init)
random_conv_fns[ind_k] = apply_conv_k_fn
# Sample k uniformly in 1:max_k:2.
ind_k = tf.cast(tf.floor(tf.random.uniform([], maxval=nb_values_k)), tf.int32)
x = tf.switch_case(ind_k, random_conv_fns, name='sample_random_conv')
return x
| brave-main | brave/datasets/augmentations.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implement test fixtures for testing datasets."""
import os
from typing import Dict, List, Sequence
import numpy as np
import tensorflow as tf
DEFAULT_AUDIO_SAMPLES = int(10.376 * 48_000)
def write_tf_record_dataset_fixture(path: str) -> List[str]:
"""Write a tfrecord dataset to the given path."""
records = [record for k, record in sorted(_make_fixture().items())]
shard_1 = os.path.join(path, 'shard1.tfrecords')
shard_2 = os.path.join(path, 'shard2.tfrecords')
with tf.io.TFRecordWriter(shard_1) as w:
for record in records[:2]:
w.write(record)
with tf.io.TFRecordWriter(shard_2) as w:
for record in records[2:]:
w.write(record)
return [shard_1, shard_2]
def _make_fixture() -> Dict[str, bytes]:
return {
'sequence_1': _fake_sequence_1(0).SerializeToString(),
'sequence_2': _fake_sequence_1(1).SerializeToString(),
'sequence_3': _fake_sequence_1(2).SerializeToString(),
}
def _fake_sequence_1(label: int) -> tf.train.SequenceExample:
"""The first test sequence."""
img = np.zeros((224, 300, 3))
video = tf.train.FeatureList(feature=[
_jpeg_feature(img),
_jpeg_feature(img),
_jpeg_feature(img),
_jpeg_feature(img),
_jpeg_feature(img),
])
audio = _audio_feature(np.zeros((DEFAULT_AUDIO_SAMPLES,)))
features = {
'image/encoded': video,
'WAVEFORM/feature/floats': audio,
}
context = tf.train.Features(feature={
'clip/label/index': _label_feature([label]),
})
return tf.train.SequenceExample(
context=context,
feature_lists=tf.train.FeatureLists(feature_list=features))
def _jpeg_feature(img: np.ndarray) -> tf.train.Feature:
buffer = tf.io.encode_jpeg(img).numpy()
bytes_list = tf.train.BytesList(value=[buffer])
return tf.train.Feature(bytes_list=bytes_list)
def _audio_feature(value: np.ndarray) -> tf.train.Feature:
return tf.train.FeatureList(
feature=[tf.train.Feature(float_list=tf.train.FloatList(value=value))])
def _label_feature(value: Sequence[int]) -> tf.train.Feature:
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
| brave-main | brave/datasets/fixtures.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Define a re-usable container for multimodal embedding models."""
from typing import Dict, Tuple, Callable
import chex
import haiku as hk
import tensorflow as tf
from brave.datasets import datasets
Loss = chex.Array
Scalars = Dict[str, chex.Array]
GlobalStep = int
EvalMode = str
InitFn = Callable[[chex.PRNGKey], Tuple[hk.Params, hk.State]]
LossFn = Callable[[hk.Params, hk.State, chex.PRNGKey, datasets.MiniBatch],
Tuple[Loss, Tuple[hk.State, Scalars]]]
ForwardFn = Callable[[hk.Params, hk.State, chex.PRNGKey, datasets.View, bool],
Tuple[chex.Array, hk.State]]
DatasetBuilderFn = Callable[[], tf.data.Dataset]
EvaluateFn = Callable[[GlobalStep, EvalMode, hk.Params, hk.State],
Dict[str, chex.Array]]
@chex.dataclass
class MultimodalEmbeddingModel:
"""A trainable multimodal embedding model.
Attributes:
init_fn: The init function may be called to initialize parameters and state
for the model. The resulting parameters may be used for all other
functions returned in this dataclass.
forward_fns: A mapping giving the (named) embedding functions trained by the
model.
loss_fn: A function to compute training loss given a train batch.
evaluate_fn: A function taking the global step, the "evaluation mode" (a
user-defined string taken from the Jaxline mode, which has been added to
support running multiple parallel Jaxline evaluation processes), and
returning a dictionary of metrics that may be published directly.
train_dataset_builder_fn: A callable returning the train dataset. he dataset
must be an iterable over datasets.MiniBatch, each minibatch must be
structured to support the specific embedding model being trained. Note
that the batch dimension is added by the trainer, so the dataset should
return single examples, rather than batches.
"""
init_fn: InitFn
forward_fns: Dict[str, ForwardFn]
loss_fn: LossFn
evaluate_fn: EvaluateFn
train_dataset_builder_fn: DatasetBuilderFn
| brave-main | brave/models/embedding_model.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| brave-main | brave/models/__init__.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Brave."""
import copy
import tempfile
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
from jaxline import utils as jaxline_utils
import ml_collections
import numpy as np
import optax
import tensorflow_datasets as tfds
from brave.datasets import datasets
from brave.datasets import fixtures
from brave.models.brave import brave
from brave.training import trainer
ConfigDict = ml_collections.ConfigDict
TEST_CONFIG = ConfigDict({
'num_frames_broad': 2,
'image_size_broad': 1,
'step_broad': 1,
'include_video_in_broad_view': True,
'include_audio_in_broad_view': True,
'num_frames_narrow': 1,
'image_size_narrow': 2,
'step_narrow': 1,
'output_dims': 4,
'tsm_resnet_width_multiplier': 1,
'num_spectrogram_bins': 2,
'fft_step': 160,
'dataset_shards': None,
'input_video_sample_rate': 25.0,
'input_audio_sample_rate': 16_000,
})
# The number of dimensions the backbone emits.
BACKBONE_EMBEDDING_DIMS = 2048
DEVICE_COUNT = 1
chex.set_n_cpu_devices(DEVICE_COUNT)
class BraveTest(parameterized.TestCase):
def test_apply_embeddings_and_loss(self):
"""Test that parameters and loss can be computed for all embeddings."""
# Using real pmap would make the test _much_ slower.
with chex.fake_pmap(), tempfile.TemporaryDirectory() as fixture_dir:
self.assertEqual(jax.local_device_count(), DEVICE_COUNT)
shards = fixtures.write_tf_record_dataset_fixture(fixture_dir)
cfg = copy.copy(TEST_CONFIG)
cfg.unlock()
cfg.dataset_shards = shards
cfg.lock()
cfg = brave.BraveConfig(**cfg)
# A key with the right shape (note this should really be broadcasted
# for init)
broadcasted_key = jaxline_utils.bcast_local_devices(jax.random.PRNGKey(0))
batch = brave.get_empty_minibatch(cfg)
broad_view = batch.views['broad']
narrow_view = batch.views['narrow']
broad_view.video = np.random.uniform(size=[DEVICE_COUNT, 1, 2, 2, 2, 3])
broad_view.audio = np.random.uniform(size=[DEVICE_COUNT, 1, 4, 2])
narrow_view.video = np.random.uniform(size=[DEVICE_COUNT, 1, 1, 2, 2, 3])
model = brave.get_model(cfg)
params, state = jax.pmap(model.init_fn, axis_name='i')(broadcasted_key)
embeddings = model.forward_fns
def broad(params, state, view):
return embeddings['broad_video'](params, state, None, view, False)
def narrow(params, state, view):
return embeddings['narrow_video'](params, state, None, view, False)
broad = jax.pmap(broad)
narrow = jax.pmap(narrow)
f_b, _ = broad(params, state, broad_view)
self.assertEqual(f_b.shape, (DEVICE_COUNT, 1, BACKBONE_EMBEDDING_DIMS))
f_n, _ = narrow(params, state, narrow_view)
self.assertEqual(f_n.shape, (DEVICE_COUNT, 1, BACKBONE_EMBEDDING_DIMS))
loss_fn = model.loss_fn
optimizer = optax.sgd(learning_rate=1e-3)
opt_state = optimizer.init(jax.random.PRNGKey(0))
update_fn = jax.pmap(
trainer.build_update_fn(optimizer, loss_fn),
axis_name='i',
donate_argnums=(1, 2, 3, 4))
key = jax.random.split(jax.random.PRNGKey(0), DEVICE_COUNT)
updates = update_fn(key, batch, params, state, opt_state)
metrics = updates.scalars
self.assertIn('loss', metrics)
loss = metrics['loss']
self.assertEqual(loss.shape, (DEVICE_COUNT,))
self.assertFalse(np.isnan(loss[0]))
# The loss should be approximately equal to 1.0, due to the random
# initialization of the network. Since the test is small, there may
# a large variance, so we set the tolerance to be high.
# Note also that we take the mean over the loss, even though there
# is a pmean. This is due to fake_pmap not currently implementing
# lax.pmean.
self.assertAlmostEqual(1.0, np.mean(loss), delta=5.0)
def test_table_reader(self):
with tempfile.TemporaryDirectory() as fixture_dir:
shards = fixtures.write_tf_record_dataset_fixture(fixture_dir)
cfg = copy.copy(TEST_CONFIG)
cfg.unlock()
cfg.dataset_shards = shards
cfg.lock()
cfg = brave.BraveConfig(**cfg)
dataset = brave._train_dataset_builder(cfg)
ds = dataset()
ds = ds.batch(2)
ds = tfds.as_numpy(ds)
for batch in ds:
self.assertIsInstance(batch, datasets.MiniBatch)
self.assertIn('narrow', batch.views)
self.assertIn('broad', batch.views)
break
def test_avoid_nan_in_loss(self):
"""Test that degenerate points do not result in NaN loss values."""
x = np.array([[1, 2, 3]])
y = np.array([[0, 0, 0]])
loss = brave._regression_loss(x, y, 1e-6)
np.testing.assert_allclose(0.5, loss, rtol=1e-5)
if __name__ == '__main__':
absltest.main()
| brave-main | brave/models/brave/brave_test.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Config for training Brave experiment."""
import ml_collections
def get_experiment_config() -> ml_collections.ConfigDict:
"""The config for the BraVe model`."""
return ml_collections.ConfigDict({
'global_batch_size': 512,
'model_name': 'brave',
'eval_modes': ['eval'],
'model': {
'image_size_broad': 112,
'num_frames_broad': 64,
'step_broad': 4,
'include_video_in_broad_view': False,
'include_audio_in_broad_view': True,
'image_size_narrow': 224,
'num_frames_narrow': 16,
'step_narrow': 2,
'output_dims': 128,
'tsm_resnet_width_multiplier': 1,
'num_spectrogram_bins': 80,
'fft_step': 160,
'dataset_shards': [], # Set this to train the model.
'input_audio_sample_rate': 48_000,
'input_video_sample_rate': 25.0,
},
'optimizer': {
'optimizer_name': 'lars',
'weight_decay': 1e-2,
'optimizer_kwargs': {
'momentum': 0.9,
'trust_coefficient': 0.001,
'nesterov': True,
},
'scheduler_name': 'cosine_decay',
'scheduler_kwargs': {
'init_value': 0.0,
'peak_value': 1.0,
'warmup_steps': 5000,
'decay_steps': 300_000,
'end_value': 0.0,
},
'scale_learning_rate_by_regex': [(r'.*predictor.*', 10.0)],
},
})
| brave-main | brave/models/brave/config.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implement the Brave model."""
import copy
import functools
from typing import Callable, Dict, Optional, Sequence, Tuple
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
from brave.datasets import augmentations
from brave.datasets import datasets
from brave.datasets import media_sequences
from brave.datasets import sampling
from brave.datasets import spectrograms
from brave.datasets import time_sampling
from brave.datasets import video_sampling
from brave.models import embedding_model
from brave.models.brave import modules
FAKE_AUDIO_LENGTH = 100
EmbeddingFn = Callable[[hk.Params, hk.State, chex.PRNGKey, datasets.View, bool],
Tuple[chex.Array, chex.Array]]
PredictorFn = Callable[[hk.Params, hk.State, chex.PRNGKey, chex.Array, bool],
Tuple[chex.Array, chex.Array]]
@chex.dataclass
class ParameterizedFns:
broad_video_embedding: EmbeddingFn
broad_audio_embedding: EmbeddingFn
narrow_video_embedding: EmbeddingFn
b_video_to_n_video: PredictorFn
n_video_to_b_video: PredictorFn
b_audio_to_n_video: PredictorFn
n_video_to_b_audio: PredictorFn
@chex.dataclass
class BraveConfig:
"""Specific configuration for the BraVe model."""
# Broad view config
include_video_in_broad_view: bool
include_audio_in_broad_view: bool
num_frames_narrow: int
image_size_narrow: int
step_broad: int
# Narrow view config
num_frames_broad: int
image_size_broad: int
step_narrow: int
# Predictors and projectors use output_dims dimensions.
output_dims: int
tsm_resnet_width_multiplier: int
# spectrogram config (when using audio)
num_spectrogram_bins: int
fft_step: int
# Dataset
dataset_shards: Sequence[str]
input_video_sample_rate: int
input_audio_sample_rate: int
def get_model(config: BraveConfig) -> embedding_model.MultimodalEmbeddingModel:
"""Construct a model implementing BraVe.
Args:
config: Configuration for BraVe.
Returns:
A `MultimodalEmbeddingModel` to train BraVe.
"""
init_fn, parameterized_fns = _build_parameterized_fns(config)
loss_fn = _build_loss_fn(config, parameterized_fns)
forward_fns = {
'broad_video': parameterized_fns.broad_video_embedding,
'broad_audio': parameterized_fns.broad_audio_embedding,
'narrow_video': parameterized_fns.narrow_video_embedding,
}
return embedding_model.MultimodalEmbeddingModel(
init_fn=init_fn,
forward_fns=forward_fns,
loss_fn=loss_fn,
evaluate_fn=_build_eval_fn(forward_fns),
train_dataset_builder_fn=_train_dataset_builder(config),
)
def get_empty_minibatch(config: BraveConfig) -> datasets.MiniBatch:
"""Get a zero-initialized minibatch for initialization and testing."""
narrow_video = np.zeros([
1, config.num_frames_narrow, config.image_size_narrow,
config.image_size_narrow, 3
])
broad_audio = None
broad_video = None
if config.include_video_in_broad_view:
broad_video = np.zeros([
1, config.num_frames_broad, config.image_size_broad,
config.image_size_broad, 3
])
if config.include_audio_in_broad_view:
# Computing the actual size of this tensor is surprisingly difficult.
# But in practice it doesn't matter, the parameter block will be the same
# in this case, one simply has to re-jit.
broad_audio = np.zeros([1, FAKE_AUDIO_LENGTH, config.num_spectrogram_bins])
return datasets.MiniBatch(
views={
'broad':
datasets.View(video=broad_video, audio=broad_audio, labels=None),
'narrow':
datasets.View(video=narrow_video, audio=None, labels=None),
})
def _build_parameterized_fns(
config: BraveConfig) -> Tuple[embedding_model.InitFn, ParameterizedFns]:
"""Initialize Brave trainable embedding functions and predictors.
Args:
config: Configuration for the brave model.
Returns:
All parameterized trainable functions used by the BraVe model.
"""
output_dims = config.output_dims
def shared_project(feats, is_training):
net = modules.ProjectAndPredict(output_dims, name='shared_project')
return net(feats, is_training)
def broad_video_embedding(view, is_training):
net = modules.VideoEmbedding(
width_multiplier=config.tsm_resnet_width_multiplier,
name='broad_video_embedding')
return net(view, is_training)
def broad_audio_embedding(view, is_training):
net = modules.AudioEmbedding(name='broad_audio_embedding')
return net(view, is_training)
def narrow_video_embedding(view, is_training):
net = modules.VideoEmbedding(
width_multiplier=config.tsm_resnet_width_multiplier,
name='narrow_video_embedding')
return net(view, is_training)
def b_video_to_n_video(f_b_1, is_training):
return shared_project(f_b_1, is_training)
def n_video_to_b_video(f_n, is_training):
return shared_project(f_n, is_training)
def b_audio_to_n_video(f_b_2, is_training):
return shared_project(f_b_2, is_training)
def n_video_to_b_audio(f_n, is_training):
return shared_project(f_n, is_training)
def init():
batch = get_empty_minibatch(config)
broad, narrow = batch.views['broad'], batch.views['narrow']
f_n = narrow_video_embedding(narrow, is_training=True)
if config.include_video_in_broad_view:
f_b_1 = broad_video_embedding(broad, is_training=True)
b_video_to_n_video(f_b_1, is_training=True)
n_video_to_b_video(f_n, is_training=True)
if config.include_audio_in_broad_view:
f_b_2 = broad_audio_embedding(broad, is_training=True)
b_audio_to_n_video(f_b_2, is_training=True)
n_video_to_b_audio(f_n, is_training=True)
return hk.transform_with_state(init).init, ParameterizedFns(
broad_video_embedding=hk.transform_with_state(
broad_video_embedding).apply,
broad_audio_embedding=hk.transform_with_state(
broad_audio_embedding).apply,
narrow_video_embedding=hk.transform_with_state(
narrow_video_embedding).apply,
b_video_to_n_video=hk.transform_with_state(b_video_to_n_video).apply,
n_video_to_b_video=hk.transform_with_state(n_video_to_b_video).apply,
b_audio_to_n_video=hk.transform_with_state(b_audio_to_n_video).apply,
n_video_to_b_audio=hk.transform_with_state(n_video_to_b_audio).apply,
)
def _build_loss_fn(
config: BraveConfig,
paremeterized_fns: ParameterizedFns) -> embedding_model.LossFn:
"""Construct the loss function for BraVe.
Takes as input the predictors across views, the predictors take as input
a view and output the predicted value of a predictor computed from another
view.
Args:
config: The config for BraVe.
paremeterized_fns: The cross-view predictor functions.
Returns:
A function for computing the loss between the predictors with respect to
the data contained within a minibatch.
"""
broad_video_embedding = paremeterized_fns.broad_video_embedding
broad_audio_embedding = paremeterized_fns.broad_audio_embedding
narrow_video_embedding = paremeterized_fns.narrow_video_embedding
b_video_to_n_video = paremeterized_fns.b_video_to_n_video
n_video_to_b_video = paremeterized_fns.n_video_to_b_video
b_audio_to_n_video = paremeterized_fns.b_audio_to_n_video
n_video_to_b_audio = paremeterized_fns.n_video_to_b_audio
def loss_fn(
params: hk.Params,
state: hk.State,
rng: chex.PRNGKey,
batch: datasets.MiniBatch,
) -> Tuple[chex.Array, Tuple[hk.State, embedding_model.Scalars]]:
metrics = {}
losses = []
broad, narrow = batch.views['broad'], batch.views['narrow']
k1, k2, k3, k4, k5, k6, k7 = jax.random.split(rng, 7)
f_n, state = narrow_video_embedding(params, state, k1, narrow, True)
if config.include_video_in_broad_view:
f_b_1, state = broad_video_embedding(params, state, k2, broad, True)
(z_b_1, h_b_1), state = b_video_to_n_video(params, state, k4, f_b_1, True)
(z_n_1, h_n_1), state = n_video_to_b_video(params, state, k5, f_n, True)
chex.assert_rank([z_b_1, h_b_1, z_n_1, h_n_1], 2)
loss_b_to_n_1 = _regression_loss(h_b_1, jax.lax.stop_gradient(z_n_1))
loss_n_to_b_1 = _regression_loss(h_n_1, jax.lax.stop_gradient(z_b_1))
chex.assert_rank([loss_b_to_n_1, loss_b_to_n_1], 0)
metrics['loss_b_to_n_1'] = loss_b_to_n_1
metrics['loss_n_to_b_1'] = loss_n_to_b_1
losses.extend([loss_b_to_n_1, loss_n_to_b_1])
if config.include_audio_in_broad_view:
f_b_2, state = broad_audio_embedding(params, state, k3, broad, True)
(z_b_2, h_b_2), state = b_audio_to_n_video(params, state, k6, f_b_2, True)
(z_n_2, h_n_2), state = n_video_to_b_audio(params, state, k7, f_n, True)
chex.assert_rank([z_b_2, h_b_2, z_n_2, h_n_2], 2)
loss_b_to_n_2 = _regression_loss(h_b_2, jax.lax.stop_gradient(z_n_2))
loss_n_to_b_2 = _regression_loss(h_n_2, jax.lax.stop_gradient(z_b_2))
chex.assert_rank([loss_b_to_n_2, loss_b_to_n_2], 0)
metrics['loss_b_to_n_2'] = loss_b_to_n_2
metrics['loss_n_to_b_2'] = loss_n_to_b_2
losses.extend([loss_b_to_n_2, loss_n_to_b_2])
loss = jnp.stack(losses).mean()
chex.assert_rank(loss, 0)
metrics['loss'] = loss
return loss, (state, metrics)
return loss_fn
def _regression_loss(x: chex.Array,
y: chex.Array,
epsilon: float = 1e-5) -> chex.Array:
"""Cosine-similarity based loss."""
batched_norm_fn = jnp.vectorize(_safe_norm, signature='(k)->()', excluded={1})
normed_x = x / jnp.expand_dims(batched_norm_fn(x, epsilon), axis=-1)
normed_y = y / jnp.expand_dims(batched_norm_fn(y, epsilon), axis=-1)
return jnp.mean(0.5 * jnp.sum((normed_x - normed_y)**2, axis=-1))
def _safe_norm(x: chex.Array, min_norm: float) -> chex.Array:
"""Compute normalization, with correct gradients."""
norm = jnp.linalg.norm(x)
x = jnp.where(norm <= min_norm, jnp.ones_like(x), x)
return jnp.where(norm <= min_norm, min_norm, jnp.linalg.norm(x))
def _build_eval_fn(
forward_fns: Dict[str, embedding_model.ForwardFn]
) -> embedding_model.EvaluateFn:
"""Construct a function to use for evaluating BraVe."""
del forward_fns
def eval_fn(global_step: int, mode: str, params: hk.Params,
state: hk.State) -> Dict[str, chex.Array]:
del mode
del params
del state
# No online evaluation enabled in this release.
return {'multiple_of_saving_period': global_step // (50 * 1000)}
return eval_fn
def _train_dataset_builder(
config: BraveConfig) -> embedding_model.DatasetBuilderFn:
"""Construct the train dataset for BraVe."""
def build_dataset():
return _train_dataset(config, config.dataset_shards, shuffle=True)
return build_dataset
def _train_dataset(config: BraveConfig,
shards: Sequence[str],
*,
shuffle: bool = False) -> tf.data.Dataset:
"""Construct the train dataset for BraVe."""
features = [media_sequences.FeatureKind.VIDEO]
if config.include_audio_in_broad_view:
features.append(media_sequences.FeatureKind.AUDIO)
ds = datasets.multi_view_dataset(
shards=shards,
features=features,
view_sampler=functools.partial(_brave_random_view_sampler, config),
view_decoder=functools.partial(_brave_view_decoder, config),
shuffle=shuffle)
ds = ds.map(
_transform_views,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=False)
return ds
def _transform_views(batch: datasets.MiniBatch) -> datasets.MiniBatch:
"""Apply data augmentations to the views in the batch."""
narrow = batch.views['narrow']
narrow_video_width = narrow.video.shape[-2]
narrow = augmentations.normalize_video(narrow)
narrow = augmentations.random_gaussian_blur_video(
narrow, kernel_size=narrow_video_width // 10, sigma_range=(0.1, 2.0))
narrow = augmentations.random_horizontal_flip_video(narrow)
narrow = augmentations.random_color_augment_video(
narrow, prob_color_augment=0.8, prob_color_drop=0.2)
broad = batch.views['broad']
if broad.video is not None:
broad_video_width = broad.video.shape[-2]
broad = augmentations.normalize_video(broad)
broad = augmentations.random_gaussian_blur_video(
broad, kernel_size=broad_video_width // 10, sigma_range=(0.1, 2.0))
broad = augmentations.random_horizontal_flip_video(broad)
broad = augmentations.random_color_augment_video(
broad, prob_color_augment=0.8, prob_color_drop=0.2)
broad = augmentations.random_convolve_video(view=broad)
result = copy.copy(batch)
result.views = dict(broad=broad, narrow=narrow)
return result
def _brave_random_view_sampler(
config: BraveConfig, sequence: media_sequences.EncodedSequence
) -> Dict[str, media_sequences.EncodedSequence]:
"""Sample the data for BraVe."""
num_audio_samples = _num_audio_samples(
config.num_frames_broad * config.step_broad,
config.input_video_sample_rate, config.input_audio_sample_rate)
frames_required_narrow = (config.num_frames_narrow -
1) * config.step_narrow + 1
narrow_indices = sampling.random_sample(
start_index=0,
end_index=tf.math.maximum(
tf.shape(sequence.jpeg_encoded_images)[0], frames_required_narrow),
sample_length=config.num_frames_narrow,
step=config.step_narrow)
# Extend the sequence so that there are enough frames
# for the broad view.
frames_required_broad = (config.num_frames_broad - 1) * config.step_broad + 1
# Note: We align the narrow view to the start of the broad view.
# For some configurations, it is better to randomly sample the start of the
# narrow (for example when the broad view is a video).
broad_indices = sampling.Indices(
start_index=narrow_indices.start_index,
end_index=narrow_indices.start_index + frames_required_broad,
step=config.step_broad)
# Technically an over-estimate, but ensures there are enough frames.
min_frames_required = narrow_indices.start_index + (
config.num_frames_broad * config.step_broad)
sequence = media_sequences.extend_sequence(sequence, min_frames_required)
narrow = time_sampling.get_subsequence_by_video_indices(
sequence, narrow_indices)
broad = time_sampling.get_subsequence_by_video_indices(
sequence, broad_indices, override_num_audio_samples=num_audio_samples)
return {
'broad': broad,
'narrow': narrow,
}
def _brave_view_decoder(
config: BraveConfig, sequences: Dict[str, media_sequences.EncodedSequence]
) -> Dict[str, datasets.View]:
"""Decode sequences to return the views for BraVe."""
result = {}
for view_name, sequence in sequences.items():
result[view_name] = datasets.View(
video=_get_video_for_view(config, view_name, sequence),
audio=_get_audio_for_view(config, view_name, sequence),
labels=None)
return result
def _get_video_for_view(
config: BraveConfig, view_name: str,
sequence: media_sequences.EncodedSequence) -> Optional[tf.Tensor]:
"""Sample and decode video."""
if view_name == 'broad':
if config.include_video_in_broad_view:
return _sample_video(config, sequence, config.image_size_broad)
else:
return None
return _sample_video(config, sequence, config.image_size_narrow)
def _get_audio_for_view(
config: BraveConfig, view_name: str,
sequence: media_sequences.EncodedSequence) -> Optional[tf.Tensor]:
"""Get the audio field for the view, if needed."""
if config.include_audio_in_broad_view and view_name == 'broad':
return spectrograms.pcm_to_log_mel_spectrogram(
sequence.audio,
input_sample_rate=config.input_audio_sample_rate,
num_spectrogram_bins=config.num_spectrogram_bins,
fft_step=config.fft_step)
return None
def _sample_video(config: BraveConfig,
sequence: media_sequences.EncodedSequence,
image_size: int) -> tf.Tensor:
"""Randomly crop and decode videos to a given square image size."""
del config
# Extract shape only reads the image header.
image_shape = tf.image.extract_jpeg_shape(sequence.jpeg_encoded_images[0])
crop_window = video_sampling.random_sample_crop_window(
image_shape,
min_area=0.3,
max_area=1.0,
min_aspect_ratio=0.5,
max_aspect_ratio=2.0)
return video_sampling.decode_crop_resize_images(
sequence.jpeg_encoded_images,
crop_window,
image_size=(image_size, image_size))
def _num_audio_samples(num_video_frames: int, video_sample_rate: int,
audio_sample_rate: int) -> int:
return int((num_video_frames / video_sample_rate) * audio_sample_rate)
| brave-main | brave/models/brave/brave.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Temporal Shift Module w/ ResNet-50 and ResNet-101.
Based on:
TSM: Temporal Shift Module for Efficient Video Understanding
Ji Lin, Chuang Gan, Song Han
https://arxiv.org/pdf/1811.08383.pdf.
"""
from typing import Callable, Optional, Tuple
from absl import logging
import chex
import haiku as hk
import jax
import jax.numpy as jnp
NormalizeFn = Callable[..., chex.Array]
class TSMResNetBlock(hk.Module):
"""A ResNet subblock with Temporal Channel Shifting.
Combines a typical ResNetV2 block implementation
(see https://arxiv.org/abs/1512.03385) with a pre-convolution Temporal
Shift Module (see https://arxiv.org/pdf/1811.08383.pdf) in the residual.
"""
def __init__(self,
output_channels: int,
stride: int,
use_projection: bool,
tsm_mode: str,
normalize_fn: Optional[NormalizeFn] = None,
channel_shift_fraction: float = 0.125,
num_frames: int = 8,
name: str = 'TSMResNetBlock'):
"""Initializes the TSMResNetBlock module.
Args:
output_channels: Number of output channels.
stride: Stride used in convolutions.
use_projection: Whether to use a projection for the shortcut.
tsm_mode: Mode for TSM ('gpu' or 'tpu' or 'deflated_0.x').
normalize_fn: Function used for normalization.
channel_shift_fraction: The fraction of temporally shifted channels. If
`channel_shift_fraction` is 0, the block is the same as a normal ResNet
block.
num_frames: Size of frame dimension in a single batch example
name: The name of the module.
"""
super().__init__(name=name)
self._output_channels = output_channels
self._bottleneck_channels = output_channels // 4
self._stride = stride
self._use_projection = use_projection
self._normalize_fn = normalize_fn
self._tsm_mode = tsm_mode
self._channel_shift_fraction = channel_shift_fraction
self._num_frames = num_frames
def __call__(self,
inputs: chex.Array,
is_training: bool = True) -> jnp.ndarray:
"""Connects the ResNetBlock module into the graph.
Args:
inputs: A 4-D float array of shape `[B, H, W, C]`.
is_training: Whether to use training mode.
Returns:
A 4-D float array of shape
`[B * num_frames, new_h, new_w, output_channels]`.
"""
# ResNet V2 uses pre-activation, where the batch norm and relu are before
# convolutions, rather than after as in ResNet V1.
preact = inputs
if self._normalize_fn is not None:
preact = self._normalize_fn(preact, is_training=is_training)
preact = jax.nn.relu(preact)
if self._use_projection:
shortcut = hk.Conv2D(
output_channels=self._output_channels,
kernel_shape=1,
stride=self._stride,
with_bias=False,
padding='SAME',
name='shortcut_conv')(
preact)
else:
shortcut = inputs
# Eventually applies Temporal Shift Module.
if self._channel_shift_fraction != 0:
preact = apply_temporal_shift(
preact,
tsm_mode=self._tsm_mode,
num_frames=self._num_frames,
channel_shift_fraction=self._channel_shift_fraction)
# First convolution.
residual = hk.Conv2D(
self._bottleneck_channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding='SAME',
name='conv_0')(
preact)
# Second convolution.
if self._normalize_fn is not None:
residual = self._normalize_fn(residual, is_training=is_training)
residual = jax.nn.relu(residual)
residual = hk.Conv2D(
output_channels=self._bottleneck_channels,
kernel_shape=3,
stride=self._stride,
with_bias=False,
padding='SAME',
name='conv_1')(
residual)
# Third convolution.
if self._normalize_fn is not None:
residual = self._normalize_fn(residual, is_training=is_training)
residual = jax.nn.relu(residual)
residual = hk.Conv2D(
output_channels=self._output_channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding='SAME',
name='conv_2')(
residual)
# NOTE: we do not use block multiplier.
output = shortcut + residual
return output
class TSMResNetUnit(hk.Module):
"""Block group for TSM ResNet."""
def __init__(self,
output_channels: int,
num_blocks: int,
stride: int,
tsm_mode: str,
num_frames: int,
normalize_fn: Optional[NormalizeFn] = None,
channel_shift_fraction: float = 0.125,
name: str = 'tsm_resnet_unit'):
"""Creates a TSMResNet Unit.
Args:
output_channels: Number of output channels.
num_blocks: Number of ResNet blocks in the unit.
stride: Stride of the unit.
tsm_mode: Which temporal shift module to use.
num_frames: Size of frame dimension in a single batch example.
normalize_fn: Function used for normalization.
channel_shift_fraction: The fraction of temporally shifted channels. If
`channel_shift_fraction` is 0, the block is the same as a normal ResNet
block.
name: The name of the module.
"""
super().__init__(name=name)
self._output_channels = output_channels
self._num_blocks = num_blocks
self._normalize_fn = normalize_fn
self._stride = stride
self._tsm_mode = tsm_mode
self._channel_shift_fraction = channel_shift_fraction
self._num_frames = num_frames
def __call__(self, inputs: chex.Array, is_training: bool) -> jnp.ndarray:
"""Connects the module to inputs.
Args:
inputs: A 4-D float array of shape `[B * num_frames, H, W, C]`.
is_training: Whether to use training mode.
Returns:
A 4-D float array of shape
`[B * num_frames, H // stride, W // stride, output_channels]`.
"""
net = inputs
for idx_block in range(self._num_blocks):
net = TSMResNetBlock(
self._output_channels,
stride=self._stride if idx_block == 0 else 1,
use_projection=idx_block == 0,
normalize_fn=self._normalize_fn,
tsm_mode=self._tsm_mode,
channel_shift_fraction=self._channel_shift_fraction,
num_frames=self._num_frames,
name=f'block_{idx_block}')(
net, is_training=is_training)
return net
class TSMResNetV2(hk.Module):
"""TSM based on ResNet V2 as described in https://arxiv.org/abs/1603.05027."""
# Endpoints of the model in order.
VALID_ENDPOINTS = (
'tsm_resnet_stem',
'tsm_resnet_unit_0',
'tsm_resnet_unit_1',
'tsm_resnet_unit_2',
'tsm_resnet_unit_3',
'last_conv',
'Embeddings',
)
def __init__(self,
normalize_fn: Optional[NormalizeFn] = None,
depth: int = 50,
num_frames: int = 16,
channel_shift_fraction: float = 0.125,
width_mult: int = 1,
name: str = 'TSMResNetV2'):
"""Constructs a ResNet model.
Args:
normalize_fn: Function used for normalization.
depth: Depth of the desired ResNet.
num_frames: Number of frames (used in TPU mode).
channel_shift_fraction: Fraction of channels that are temporally shifted,
if `channel_shift_fraction` is 0, a regular ResNet is returned.
width_mult: Whether or not to use a width multiplier.
name: The name of the module.
Raises:
ValueError: If `channel_shift_fraction` or `depth` has invalid value.
"""
super().__init__(name=name)
if not 0. <= channel_shift_fraction <= 1.0:
raise ValueError(f'channel_shift_fraction ({channel_shift_fraction})'
' has to be in [0, 1].')
self._num_frames = num_frames
self._channels = (256, 512, 1024, 2048)
self._strides = (1, 2, 2, 2)
num_blocks = {
50: (3, 4, 6, 3),
101: (3, 4, 23, 3),
152: (3, 8, 36, 3),
200: (3, 24, 36, 3),
}
if depth not in num_blocks:
raise ValueError(
f'`depth` should be in {list(num_blocks.keys())} ({depth} given).')
self._num_blocks = num_blocks[depth]
self._width_mult = width_mult
self._channel_shift_fraction = channel_shift_fraction
self._normalize_fn = normalize_fn
def __call__(self,
inputs: chex.Array,
is_training: bool = True,
final_endpoint: str = 'Embeddings',
is_deflated: bool = False,
alpha_deflation: float = 0.3) -> jnp.ndarray:
"""Connects the TSM ResNetV2 module into the graph.
Args:
inputs: The input may be in one of two shapes; if the shape is `[B, T, H,
W, C]`, this module assumes the backend is a GPU (setting
`tsm_mode='gpu'`) and `T` is treated the time dimension, with `B` being
the batch dimension. This mode cannot be used when `is_deflated` is
`true`. In this mode, the num_frames parameter passed to the constructor
is ignored. If the shape is `[B, H, W, C]`, then the batch dimension is
assumed to be of the form [B*T, H, W, C], where `T` is the number of
frames in each video. This value may be set by passing `num_frames=n` to
the constructor. The default value is `n=16` (beware this default is not
the same as the default for the `TSMResNetBlock`, which has a default of
8 frames). In this case, the module assumes it is being run on a TPU,
and emits instructions that are more efficient for for that case,
using`tsm_mode`='tpu'` for the downstream blocks.
is_training: Whether to use training mode.
final_endpoint: Up to which endpoint to run / return.
is_deflated: Whether or not to use the deflated version of the network.
alpha_deflation: Deflation parameter to use for dealing with the padding
effect.
Returns:
Network output at location `final_endpoint`. A float array which shape
depends on `final_endpoint`.
Raises:
ValueError: If `final_endpoint` is not recognized.
"""
# Prepare inputs for TSM.
if is_deflated:
if len(inputs.shape) != 4:
raise ValueError(
'In deflated mode inputs should be given as [B, H, W, 3]')
logging.warning(
'Deflation is an experimental feature and the API might change.')
tsm_mode = f'deflated_{alpha_deflation}'
num_frames = 1
else:
inputs, tsm_mode, num_frames = prepare_inputs(inputs)
num_frames = num_frames or self._num_frames
self._final_endpoint = final_endpoint
if self._final_endpoint not in self.VALID_ENDPOINTS:
raise ValueError(f'Unknown final endpoint {self._final_endpoint}')
# Stem convolution.
end_point = 'tsm_resnet_stem'
net = hk.Conv2D(
output_channels=64 * self._width_mult,
kernel_shape=7,
stride=2,
with_bias=False,
name=end_point,
padding='SAME')(
inputs)
net = hk.MaxPool(
window_shape=(1, 3, 3, 1), strides=(1, 2, 2, 1), padding='SAME')(
net)
if self._final_endpoint == end_point:
net = prepare_outputs(net, tsm_mode, num_frames, reduce_mean=False)
return net
# Residual block.
for unit_id, (channels, num_blocks, stride) in enumerate(
zip(self._channels, self._num_blocks, self._strides)):
end_point = f'tsm_resnet_unit_{unit_id}'
net = TSMResNetUnit(
output_channels=channels * self._width_mult,
num_blocks=num_blocks,
stride=stride,
normalize_fn=self._normalize_fn,
channel_shift_fraction=self._channel_shift_fraction,
num_frames=num_frames,
tsm_mode=tsm_mode,
name=end_point)(
net, is_training=is_training)
if self._final_endpoint == end_point:
net = prepare_outputs(net, tsm_mode, num_frames, reduce_mean=False)
return net
if self._normalize_fn is not None:
net = self._normalize_fn(net, is_training=is_training)
net = jax.nn.relu(net)
end_point = 'last_conv'
if self._final_endpoint == end_point:
net = prepare_outputs(net, tsm_mode, num_frames, reduce_mean=False)
return net
net = jnp.mean(net, axis=(1, 2))
# Prepare embedding outputs for TSM (temporal average of features).
net = prepare_outputs(net, tsm_mode, num_frames, reduce_mean=True)
assert self._final_endpoint == 'Embeddings'
return net
def prepare_inputs(inputs: chex.Array) -> Tuple[jnp.ndarray, str, int]:
"""Deduces input mode for TSM."""
# Deduce if we run on TPU based on input shape.
if len(inputs.shape) == 5:
# Input is given in the standard [B, T, H, W, 3] format.
tsm_mode = 'gpu'
num_frames = inputs.shape[1]
inputs = jnp.reshape(inputs, [-1] + list(inputs.shape[2:]))
else:
# Input is given in the [T * B, H, W, 3] format.
tsm_mode = 'tpu'
num_frames = None
return inputs, tsm_mode, num_frames
def prepare_outputs(outputs: chex.Array,
tsm_mode: str,
num_frames: int,
reduce_mean: bool = True) -> jnp.ndarray:
"""Processes output of TSM to undo the merging of batch and time."""
# Get the shape without the batch/time dimension (for TSM batch and time are
# merged in the first dimension).
shape_no_bt = list(outputs.shape[1:])
if tsm_mode == 'tpu':
# Outputs are of the shape [num_frames * B, ..., n_channels]
outputs = jnp.reshape(outputs, [num_frames, -1] + shape_no_bt)
if reduce_mean:
# We average over time and space.
outputs = jnp.mean(
outputs, axis=[0] + list(range(2,
len(shape_no_bt) + 1)))
else:
outputs = jnp.transpose(
outputs, axes=[1, 0] + list(range(2,
len(shape_no_bt) + 2)))
elif tsm_mode == 'gpu':
# Outputs are of the shape [B * num_frames, ..., n_channels]
outputs = jnp.reshape(outputs, [-1, num_frames] + shape_no_bt)
if reduce_mean:
outputs = jnp.mean(
outputs, axis=[1] + list(range(2,
len(shape_no_bt) + 1)))
elif tsm_mode.startswith('deflated'):
# In deflated mode, outputs are already in the right format.
pass
else:
raise ValueError('`tsm_mode` should be \'tpu\' or \'gpu\' or '
f'\'deflated_0.x\' ({tsm_mode} given)')
return outputs
def apply_temporal_shift(x: chex.Array,
tsm_mode: str,
num_frames: int,
channel_shift_fraction: float = 0.125) -> jnp.ndarray:
"""Performs a temporal shift: https://arxiv.org/abs/1811.08383 with mode."""
if tsm_mode == 'tpu':
outputs = temporal_shift_tpu(x, num_frames, channel_shift_fraction)
elif tsm_mode == 'gpu':
outputs = temporal_shift_gpu(x, num_frames, channel_shift_fraction)
elif tsm_mode.startswith('deflated'):
alpha = float(tsm_mode.split('_')[1])
outputs = temporal_shift_image_mode(x, channel_shift_fraction, alpha)
else:
raise ValueError('`tsm_mode` should be \'tpu\' or \'gpu\' or '
f'\'deflated_0.x\' ({tsm_mode} given)')
return outputs
def temporal_shift_image_mode(x, channel_shift_fraction=0.125, alpha=0.3):
"""Temporal shift applied on single image (to emulate a fixed video)."""
# B, H, W, C = batch_size, im_height, im_width, channels
# Input is (B, H, W, C)
orig_shp = tuple(x.shape)
n_channels = orig_shp[-1]
n_shift = int(n_channels * channel_shift_fraction)
# Alpha emulates the effect of the padding when using a single frame
shifted_backward = alpha * x[:, :, :, -n_shift:]
shifted_forward = alpha * x[:, :, :, :n_shift]
no_shift = x[:, :, :, n_shift:-n_shift]
shifted_x = jnp.concatenate([shifted_backward, no_shift, shifted_forward],
axis=3)
return shifted_x
def temporal_shift_gpu(x: chex.Array,
num_frames: int,
channel_shift_fraction: float = 0.125) -> jnp.ndarray:
"""Performs a temporal shift: https://arxiv.org/abs/1811.08383."""
# B, T, H, W, C = batch_size, num_frames, im_height, im_width, channels
# Input is (B * T, H, W, C)
orig_shp = tuple(x.shape)
reshaped_x = jnp.reshape(x, (-1, num_frames) + orig_shp[1:])
n_channels = orig_shp[-1]
n_shift = int(n_channels * channel_shift_fraction)
new_shp = tuple(reshaped_x.shape)
# shifted_backward = reshaped_x[:, 1:, :, :, -n_shift:]
shifted_backward = jax.lax.slice(
reshaped_x, (0, 1, 0, 0, new_shp[4] - n_shift),
(new_shp[0], new_shp[1], new_shp[2], new_shp[3], new_shp[4]))
shifted_backward_padding = ((0, 0), (0, 1), (0, 0), (0, 0), (0, 0))
shifted_backward = jnp.pad(shifted_backward, shifted_backward_padding)
# shifted_forward = reshaped_x[:, :-1, :, :, :n_shift]
shifted_forward = jax.lax.slice(
reshaped_x, (0, 0, 0, 0, 0),
(new_shp[0], new_shp[1] - 1, new_shp[2], new_shp[3], n_shift))
shifted_forward_padding = ((0, 0), (1, 0), (0, 0), (0, 0), (0, 0))
shifted_forward = jnp.pad(shifted_forward, shifted_forward_padding)
no_shift = reshaped_x[:, :, :, :, n_shift:-n_shift]
shifted_x = jnp.concatenate([shifted_backward, no_shift, shifted_forward],
axis=4)
return jnp.reshape(shifted_x, (-1,) + orig_shp[1:])
def temporal_shift_tpu(x: chex.Array,
num_frames: int,
channel_shift_fraction: float = 0.125) -> jnp.ndarray:
"""Performs a temporal shift: https://arxiv.org/abs/1811.08383.
TPU optimized version of TSM.
Args:
x: Input expected to be [T * B, H, W, C] (where the batch has been reshaped
from a time major version of the input).
num_frames: number of frames T per video.
channel_shift_fraction: fraction of the channel to shift forward and
backward.
Returns:
The temporal shifted version of x.
"""
# B, T, H, W, C = batch_size, num_frames, im_height, im_width, channels
# Input is (T * B, H, W, C)
original_dtype = x.dtype
original_shape = list(x.shape)
batch_size = int(original_shape[0] / num_frames)
n_channels = int(original_shape[-1])
n_shift = int(n_channels * channel_shift_fraction)
# Cast to bfloat16.
x = x.astype(jnp.bfloat16)
# For the following, assume that x has 3 channels [x1, x2, x3] and n_shift=1.
# Shift backward, we first pad by zeros [x1, x2, x3, 0, 0].
orig_shp = list(x.shape)
shifted_backward_padding = ((0, batch_size, 0), (0, 0, 0), (0, 0, 0),
(0, n_channels - n_shift, 0))
x_backward_padding = jax.lax.pad(
x,
padding_value=jnp.bfloat16(0.),
padding_config=shifted_backward_padding)
# The following shift gets to [x3^+1, 0, 0] (where +1 means from the future).
shifted_backward = jax.lax.slice(x_backward_padding,
(batch_size, 0, 0, n_channels - n_shift),
(orig_shp[0] + batch_size, orig_shp[1],
orig_shp[2], 2 * n_channels - n_shift))
# Shift forward, we first pad by zeros [0, 0, x1, x2, x3].
shifted_forward_padding = ((batch_size, 0, 0), (0, 0, 0), (0, 0, 0),
(n_channels - n_shift, 0, 0))
x_forward_padding = jax.lax.pad(
x, padding_value=jnp.bfloat16(0.), padding_config=shifted_forward_padding)
# The following shift gets to [0, 0, x1^-1] (where -1 means from the past).
shifted_forward = jax.lax.slice(
x_forward_padding, (0, 0, 0, 0),
(orig_shp[0], orig_shp[1], orig_shp[2], n_channels))
# No shift is in the middle, this gets [0, x2, 0].
mask_noshift = (jnp.reshape((jnp.arange(n_channels) >= n_shift) &
(jnp.arange(n_channels) < n_channels - n_shift),
(1, 1, 1, -1))).astype(jnp.bfloat16)
no_shift = mask_noshift * x
# By summing everything together, we end up with [x3^+1, x2, x1^-1].
# Note: channels have been reordered but that doesn't matter for the model.
shifted_x = shifted_backward + shifted_forward + no_shift
return shifted_x.astype(original_dtype)
| brave-main | brave/models/brave/tsm_resnet.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.