text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="perspective-more" format="2">
<advance width="1200"/>
<unicode hex="F0D24"/>
<note>
perspective-more
</note>
<outline>
<contour>
<point x="336" y="680" type="line"/>
<point x="864" y="680" type="line"/>
<point x="1019" y="214" type="line"/>
<point x="181" y="214" type="line"/>
</contour>
<contour>
<point x="891" y="1146" type="line"/>
<point x="891" y="1320" type="line"/>
<point x="657" y="1086" type="line"/>
<point x="891" y="854" type="line"/>
<point x="891" y="1029" type="line"/>
<point x="1123" y="1029" type="line"/>
<point x="1123" y="1146" type="line"/>
</contour>
<contour>
<point x="543" y="1086" type="line"/>
<point x="309" y="1320" type="line"/>
<point x="309" y="1146" type="line"/>
<point x="77" y="1146" type="line"/>
<point x="77" y="1029" type="line"/>
<point x="309" y="1029" type="line"/>
<point x="309" y="854" type="line"/>
</contour>
<contour>
<point x="1180" y="100" type="line"/>
<point x="949" y="797" type="line"/>
<point x="251" y="797" type="line"/>
<point x="20" y="100" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/perspective-more.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/perspective-more.glif",
"repo_id": "cascadia-code",
"token_count": 612
}
| 628 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="pin-outline" format="2">
<advance width="1200"/>
<unicode hex="F0931"/>
<note>
pin-outline
</note>
<outline>
<contour>
<point x="1027" y="567" type="line"/>
<point x="883" y="710" type="line"/>
<point x="883" y="1280" type="line"/>
<point x="957" y="1280" type="line"/>
<point x="957" y="1420" type="line"/>
<point x="243" y="1420" type="line"/>
<point x="243" y="1280" type="line"/>
<point x="317" y="1280" type="line"/>
<point x="317" y="710" type="line"/>
<point x="173" y="567" type="line"/>
<point x="173" y="427" type="line"/>
<point x="543" y="427" type="line"/>
<point x="543" y="0" type="line"/>
<point x="657" y="0" type="line"/>
<point x="657" y="427" type="line"/>
<point x="1027" y="427" type="line"/>
</contour>
<contour>
<point x="827" y="567" type="line"/>
<point x="373" y="567" type="line"/>
<point x="457" y="653" type="line"/>
<point x="457" y="1280" type="line"/>
<point x="743" y="1280" type="line"/>
<point x="743" y="653" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/pin-outline.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/pin-outline.glif",
"repo_id": "cascadia-code",
"token_count": 573
}
| 629 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="relation-one-to-one-or-many" format="2">
<advance width="1200"/>
<unicode hex="F14A4"/>
<note>
relation-one-to-one-or-many
</note>
<outline>
<contour>
<point x="1123" y="653" type="line"/>
<point x="1006" y="536" type="line"/>
<point x="1006" y="653" type="line"/>
<point x="891" y="653" type="line"/>
<point x="891" y="536" type="line"/>
<point x="657" y="536" type="line"/>
<point x="657" y="1001" type="line"/>
<point x="309" y="1001" type="line"/>
<point x="309" y="1116" type="line"/>
<point x="194" y="1116" type="line"/>
<point x="194" y="1001" type="line"/>
<point x="20" y="1001" type="line"/>
<point x="20" y="884" type="line"/>
<point x="194" y="884" type="line"/>
<point x="194" y="767" type="line"/>
<point x="309" y="767" type="line"/>
<point x="309" y="884" type="line"/>
<point x="543" y="884" type="line"/>
<point x="543" y="419" type="line"/>
<point x="891" y="419" type="line"/>
<point x="891" y="304" type="line"/>
<point x="1006" y="304" type="line"/>
<point x="1006" y="419" type="line"/>
<point x="1123" y="304" type="line"/>
<point x="1180" y="304" type="line"/>
<point x="1180" y="653" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/relation-one-to-one-or-many.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/relation-one-to-one-or-many.glif",
"repo_id": "cascadia-code",
"token_count": 649
}
| 630 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="rename-box" format="2">
<advance width="1200"/>
<unicode hex="F0455"/>
<note>
rename-box
</note>
<outline>
<contour>
<point x="987" y="517" type="line"/>
<point x="987" y="387" type="line"/>
<point x="503" y="387" type="line"/>
<point x="633" y="517" type="line"/>
</contour>
<contour>
<point x="373" y="387" type="line"/>
<point x="213" y="387" type="line"/>
<point x="213" y="550" type="line"/>
<point x="721" y="1054" type="line" smooth="yes"/>
<point x="730" y="1063"/>
<point x="757" y="1063"/>
<point x="766" y="1054" type="qcurve" smooth="yes"/>
<point x="881" y="943" type="line"/>
<point x="890" y="931"/>
<point x="890" y="906"/>
<point x="881" y="894" type="qcurve"/>
</contour>
<contour>
<point x="1104" y="1290"/>
<point x="1050" y="1290" type="qcurve" smooth="yes"/>
<point x="150" y="1290" type="line" smooth="yes"/>
<point x="96" y="1290"/>
<point x="20" y="1214"/>
<point x="20" y="1160" type="qcurve" smooth="yes"/>
<point x="20" y="260" type="line" smooth="yes"/>
<point x="20" y="206"/>
<point x="96" y="130"/>
<point x="150" y="130" type="qcurve" smooth="yes"/>
<point x="1050" y="130" type="line" smooth="yes"/>
<point x="1104" y="130"/>
<point x="1180" y="206"/>
<point x="1180" y="260" type="qcurve" smooth="yes"/>
<point x="1180" y="1160" type="line" smooth="yes"/>
<point x="1180" y="1214"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/rename-box.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/rename-box.glif",
"repo_id": "cascadia-code",
"token_count": 785
}
| 631 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="resize-bottom-right" format="2">
<advance width="1200"/>
<unicode hex="F045D"/>
<note>
resize-bottom-right
</note>
<outline>
<contour>
<point x="1180" y="359" type="line"/>
<point x="951" y="359" type="line"/>
<point x="951" y="130" type="line"/>
<point x="1180" y="130" type="line"/>
</contour>
<contour>
<point x="1180" y="827" type="line"/>
<point x="951" y="827" type="line"/>
<point x="951" y="593" type="line"/>
<point x="1180" y="593" type="line"/>
</contour>
<contour>
<point x="717" y="359" type="line"/>
<point x="483" y="359" type="line"/>
<point x="483" y="130" type="line"/>
<point x="717" y="130" type="line"/>
</contour>
<contour>
<point x="717" y="827" type="line"/>
<point x="483" y="827" type="line"/>
<point x="483" y="593" type="line"/>
<point x="717" y="593" type="line"/>
</contour>
<contour>
<point x="254" y="359" type="line"/>
<point x="20" y="359" type="line"/>
<point x="20" y="130" type="line"/>
<point x="254" y="130" type="line"/>
</contour>
<contour>
<point x="1180" y="1290" type="line"/>
<point x="951" y="1290" type="line"/>
<point x="951" y="1056" type="line"/>
<point x="1180" y="1056" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/resize-bottom-right.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/resize-bottom-right.glif",
"repo_id": "cascadia-code",
"token_count": 680
}
| 632 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="rhombus-medium" format="2">
<advance width="1200"/>
<unicode hex="F0A10"/>
<note>
rhombus-medium
</note>
<outline>
<contour>
<point x="649" y="1290"/>
<point x="600" y="1290" type="qcurve" smooth="yes"/>
<point x="551" y="1290"/>
<point x="516" y="1256" type="qcurve" smooth="yes"/>
<point x="54" y="794" type="line" smooth="yes"/>
<point x="20" y="759"/>
<point x="20" y="661"/>
<point x="54" y="626" type="qcurve" smooth="yes"/>
<point x="516" y="164" type="line" smooth="yes"/>
<point x="551" y="130"/>
<point x="649" y="130"/>
<point x="684" y="164" type="qcurve" smooth="yes"/>
<point x="1146" y="626" type="line" smooth="yes"/>
<point x="1180" y="661"/>
<point x="1180" y="759"/>
<point x="1146" y="794" type="qcurve" smooth="yes"/>
<point x="684" y="1256" type="line" smooth="yes"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/rhombus-medium.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/rhombus-medium.glif",
"repo_id": "cascadia-code",
"token_count": 464
}
| 633 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="rolodex-outline" format="2">
<advance width="1200"/>
<unicode hex="F1ABA"/>
<note>
rolodex-outline
</note>
<outline>
<contour>
<point x="309" y="479" type="line"/>
<point x="194" y="479" type="line"/>
<point x="194" y="361" type="line"/>
<point x="309" y="361" type="line"/>
</contour>
<contour>
<point x="309" y="710" type="line"/>
<point x="194" y="710" type="line"/>
<point x="194" y="593" type="line"/>
<point x="309" y="593" type="line"/>
</contour>
<contour>
<point x="543" y="479" type="line"/>
<point x="426" y="479" type="line"/>
<point x="426" y="361" type="line"/>
<point x="543" y="361" type="line"/>
</contour>
<contour>
<point x="543" y="710" type="line"/>
<point x="426" y="710" type="line"/>
<point x="426" y="593" type="line"/>
<point x="543" y="593" type="line"/>
</contour>
<contour>
<point x="774" y="479" type="line"/>
<point x="657" y="479" type="line"/>
<point x="657" y="361" type="line"/>
<point x="774" y="361" type="line"/>
</contour>
<contour>
<point x="774" y="710" type="line"/>
<point x="657" y="710" type="line"/>
<point x="657" y="593" type="line"/>
<point x="774" y="593" type="line"/>
</contour>
<contour>
<point x="1006" y="479" type="line"/>
<point x="891" y="479" type="line"/>
<point x="891" y="361" type="line"/>
<point x="1006" y="361" type="line"/>
</contour>
<contour>
<point x="1006" y="710" type="line"/>
<point x="891" y="710" type="line"/>
<point x="891" y="593" type="line"/>
<point x="1006" y="593" type="line"/>
</contour>
<contour>
<point x="856" y="941"/>
<point x="891" y="977"/>
<point x="891" y="1001" type="qcurve" smooth="yes"/>
<point x="891" y="1176" type="line" smooth="yes"/>
<point x="891" y="1197"/>
<point x="856" y="1233"/>
<point x="810" y="1233"/>
<point x="774" y="1197"/>
<point x="774" y="1176" type="qcurve" smooth="yes"/>
<point x="774" y="1001" type="line" smooth="yes"/>
<point x="774" y="977"/>
<point x="810" y="941"/>
<point x="831" y="941" type="qcurve" smooth="yes"/>
</contour>
<contour>
<point x="390" y="941"/>
<point x="426" y="977"/>
<point x="426" y="1001" type="qcurve" smooth="yes"/>
<point x="426" y="1176" type="line" smooth="yes"/>
<point x="426" y="1197"/>
<point x="390" y="1233"/>
<point x="344" y="1233"/>
<point x="309" y="1197"/>
<point x="309" y="1176" type="qcurve" smooth="yes"/>
<point x="309" y="1001" type="line" smooth="yes"/>
<point x="309" y="977"/>
<point x="344" y="941"/>
<point x="369" y="941" type="qcurve" smooth="yes"/>
</contour>
<contour>
<point x="1106" y="1118"/>
<point x="1057" y="1116" type="qcurve" smooth="yes"/>
<point x="978" y="1116" type="line"/>
<point x="978" y="1001" type="line" smooth="yes"/>
<point x="978" y="941"/>
<point x="891" y="854"/>
<point x="772" y="854"/>
<point x="687" y="941"/>
<point x="687" y="1001" type="qcurve" smooth="yes"/>
<point x="687" y="1102" type="line" smooth="yes"/>
<point x="687" y="1108"/>
<point x="679" y="1116"/>
<point x="674" y="1116" type="qcurve" smooth="yes"/>
<point x="526" y="1116" type="line" smooth="yes"/>
<point x="521" y="1116"/>
<point x="513" y="1108"/>
<point x="513" y="1102" type="qcurve" smooth="yes"/>
<point x="513" y="1001" type="line" smooth="yes"/>
<point x="513" y="941"/>
<point x="428" y="854"/>
<point x="309" y="854"/>
<point x="222" y="941"/>
<point x="222" y="1001" type="qcurve" smooth="yes"/>
<point x="222" y="1116" type="line"/>
<point x="143" y="1116" type="line" smooth="yes"/>
<point x="94" y="1118"/>
<point x="20" y="1050"/>
<point x="20" y="1001" type="qcurve" smooth="yes"/>
<point x="20" y="304" type="line" smooth="yes"/>
<point x="20" y="255"/>
<point x="94" y="187"/>
<point x="143" y="187" type="qcurve" smooth="yes"/>
<point x="1057" y="187" type="line" smooth="yes"/>
<point x="1106" y="187"/>
<point x="1180" y="255"/>
<point x="1180" y="304" type="qcurve" smooth="yes"/>
<point x="1180" y="1001" type="line" smooth="yes"/>
<point x="1180" y="1050"/>
</contour>
<contour>
<point x="1066" y="767" type="line"/>
<point x="1066" y="304" type="line"/>
<point x="134" y="304" type="line"/>
<point x="134" y="767" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/rolodex-outline.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/rolodex-outline.glif",
"repo_id": "cascadia-code",
"token_count": 2344
}
| 634 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="sale" format="2">
<advance width="1200"/>
<unicode hex="F046F"/>
<note>
sale
</note>
<outline>
<contour>
<point x="749" y="1169" type="line"/>
<point x="600" y="1318" type="line"/>
<point x="451" y="1166" type="line"/>
<point x="242" y="1202" type="line"/>
<point x="209" y="995" type="line"/>
<point x="20" y="899" type="line"/>
<point x="116" y="713" type="line"/>
<point x="20" y="523" type="line"/>
<point x="207" y="428" type="line"/>
<point x="242" y="218" type="line"/>
<point x="449" y="254" type="line"/>
<point x="597" y="102" type="line"/>
<point x="746" y="251" type="line"/>
<point x="956" y="218" type="line"/>
<point x="988" y="425" type="line"/>
<point x="1180" y="521" type="line"/>
<point x="1084" y="710" type="line"/>
<point x="1180" y="899" type="line"/>
<point x="991" y="995" type="line"/>
<point x="958" y="1202" type="line"/>
</contour>
<contour>
<point x="499" y="980"/>
<point x="547" y="932"/>
<point x="547" y="864"/>
<point x="499" y="818"/>
<point x="431" y="818"/>
<point x="386" y="864"/>
<point x="386" y="932"/>
<point x="431" y="980"/>
<point x="466" y="980" type="qcurve" smooth="yes"/>
</contour>
<contour>
<point x="769" y="602"/>
<point x="814" y="556"/>
<point x="814" y="488"/>
<point x="769" y="440"/>
<point x="701" y="440"/>
<point x="653" y="488"/>
<point x="653" y="556"/>
<point x="701" y="602"/>
<point x="734" y="602" type="qcurve" smooth="yes"/>
</contour>
<contour>
<point x="330" y="516" type="line"/>
<point x="794" y="980" type="line"/>
<point x="870" y="904" type="line"/>
<point x="406" y="440" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/sale.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/sale.glif",
"repo_id": "cascadia-code",
"token_count": 954
}
| 635 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="set-top-box" format="2">
<advance width="1200"/>
<unicode hex="F099F"/>
<note>
set-top-box
</note>
<outline>
<contour>
<point x="232" y="604" type="line"/>
<point x="968" y="604" type="line"/>
<point x="968" y="577" type="line" smooth="yes"/>
<point x="968" y="567"/>
<point x="985" y="552"/>
<point x="995" y="552" type="qcurve" smooth="yes"/>
<point x="1047" y="552" type="line" smooth="yes"/>
<point x="1059" y="552"/>
<point x="1074" y="567"/>
<point x="1074" y="577" type="qcurve" smooth="yes"/>
<point x="1074" y="604" type="line"/>
<point x="1126" y="604" type="line" smooth="yes"/>
<point x="1148" y="604"/>
<point x="1180" y="633"/>
<point x="1180" y="656" type="qcurve" smooth="yes"/>
<point x="1180" y="814" type="line" smooth="yes"/>
<point x="1180" y="836"/>
<point x="1148" y="868"/>
<point x="1126" y="868" type="qcurve" smooth="yes"/>
<point x="74" y="868" type="line" smooth="yes"/>
<point x="52" y="868"/>
<point x="20" y="836"/>
<point x="20" y="814" type="qcurve" smooth="yes"/>
<point x="20" y="656" type="line" smooth="yes"/>
<point x="20" y="633"/>
<point x="52" y="604"/>
<point x="74" y="604" type="qcurve" smooth="yes"/>
<point x="126" y="604" type="line"/>
<point x="126" y="577" type="line" smooth="yes"/>
<point x="126" y="567"/>
<point x="141" y="552"/>
<point x="153" y="552" type="qcurve" smooth="yes"/>
<point x="205" y="552" type="line" smooth="yes"/>
<point x="215" y="552"/>
<point x="232" y="567"/>
<point x="232" y="577" type="qcurve" smooth="yes"/>
</contour>
<contour>
<point x="232" y="762" type="line"/>
<point x="232" y="710" type="line"/>
<point x="126" y="710" type="line"/>
<point x="126" y="762" type="line"/>
</contour>
<contour>
<point x="390" y="762" type="line"/>
<point x="390" y="710" type="line"/>
<point x="284" y="710" type="line"/>
<point x="284" y="762" type="line"/>
</contour>
<contour>
<point x="1069" y="789"/>
<point x="1101" y="757"/>
<point x="1101" y="712"/>
<point x="1069" y="683"/>
<point x="1025" y="683"/>
<point x="995" y="712"/>
<point x="995" y="757"/>
<point x="1025" y="789"/>
<point x="1047" y="789" type="qcurve" smooth="yes"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/set-top-box.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/set-top-box.glif",
"repo_id": "cascadia-code",
"token_count": 1233
}
| 636 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="shield-plus" format="2">
<advance width="1200"/>
<unicode hex="F0ADA"/>
<note>
shield-plus
</note>
<outline>
<contour>
<point x="1180" y="220" type="line"/>
<point x="1180" y="343" type="line"/>
<point x="997" y="343" type="line"/>
<point x="997" y="527" type="line"/>
<point x="876" y="527" type="line"/>
<point x="876" y="343" type="line"/>
<point x="693" y="343" type="line"/>
<point x="693" y="220" type="line"/>
<point x="876" y="220" type="line"/>
<point x="876" y="40" type="line"/>
<point x="997" y="40" type="line"/>
<point x="997" y="220" type="line"/>
</contour>
<contour>
<point x="20" y="1137" type="line"/>
<point x="20" y="770" type="line" smooth="yes"/>
<point x="20" y="604"/>
<point x="163" y="295"/>
<point x="415" y="77"/>
<point x="570" y="37" type="qcurve"/>
<point x="644" y="60" type="line"/>
<point x="570" y="157"/>
<point x="570" y="283" type="qcurve" smooth="yes"/>
<point x="570" y="381"/>
<point x="667" y="550"/>
<point x="836" y="650"/>
<point x="937" y="650" type="qcurve" smooth="yes"/>
<point x="1025" y="650"/>
<point x="1103" y="610" type="qcurve"/>
<point x="1120" y="690"/>
<point x="1120" y="770" type="qcurve" smooth="yes"/>
<point x="1120" y="1137" type="line"/>
<point x="570" y="1383" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/shield-plus.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/shield-plus.glif",
"repo_id": "cascadia-code",
"token_count": 740
}
| 637 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="signature-freehand" format="2">
<advance width="1200"/>
<unicode hex="F0DFC"/>
<note>
signature-freehand
</note>
<outline>
<contour>
<point x="1180" y="250" type="line"/>
<point x="20" y="250" type="line"/>
<point x="20" y="135" type="line"/>
<point x="1180" y="135" type="line"/>
</contour>
<contour>
<point x="181" y="489" type="line"/>
<point x="262" y="571" type="line"/>
<point x="222" y="612" type="line"/>
<point x="140" y="530" type="line"/>
<point x="61" y="612" type="line"/>
<point x="20" y="571" type="line"/>
<point x="102" y="489" type="line"/>
<point x="20" y="408" type="line"/>
<point x="61" y="367" type="line"/>
<point x="140" y="449" type="line"/>
<point x="222" y="367" type="line"/>
<point x="262" y="408" type="line"/>
</contour>
<contour>
<point x="823" y="533"/>
<point x="627" y="476"/>
<point x="540" y="476" type="qcurve"/>
<point x="456" y="489" type="line"/>
<point x="426" y="489"/>
<point x="369" y="462"/>
<point x="369" y="432" type="qcurve" smooth="yes"/>
<point x="369" y="348"/>
<point x="494" y="348" type="qcurve" smooth="yes"/>
<point x="644" y="348"/>
<point x="973" y="481"/>
<point x="973" y="598" type="qcurve" smooth="yes"/>
<point x="973" y="664"/>
<point x="867" y="759"/>
<point x="774" y="800" type="qcurve" smooth="yes"/>
<point x="663" y="852"/>
<point x="575" y="925"/>
<point x="575" y="969" type="qcurve" smooth="yes"/>
<point x="575" y="1042"/>
<point x="823" y="1159"/>
<point x="864" y="1159" type="qcurve"/>
<point x="897" y="1143" type="line" smooth="yes"/>
<point x="924" y="1127"/>
<point x="946" y="1127"/>
<point x="989" y="1162"/>
<point x="989" y="1187" type="qcurve" smooth="yes"/>
<point x="989" y="1241"/>
<point x="929" y="1285"/>
<point x="864" y="1285" type="qcurve" smooth="yes"/>
<point x="766" y="1285"/>
<point x="448" y="1080"/>
<point x="448" y="925" type="qcurve" smooth="yes"/>
<point x="448" y="865"/>
<point x="565" y="767"/>
<point x="720" y="688" type="qcurve" smooth="yes"/>
<point x="845" y="626"/>
<point x="845" y="574" type="qcurve"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/signature-freehand.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/signature-freehand.glif",
"repo_id": "cascadia-code",
"token_count": 1195
}
| 638 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="skip-previous-outline" format="2">
<advance width="1200"/>
<unicode hex="F0F28"/>
<note>
skip-previous-outline
</note>
<outline>
<contour>
<point x="20" y="130" type="line"/>
<point x="215" y="130" type="line"/>
<point x="215" y="1290" type="line"/>
<point x="20" y="1290" type="line"/>
</contour>
<contour>
<point x="1180" y="1290" type="line"/>
<point x="360" y="710" type="line"/>
<point x="1180" y="130" type="line"/>
</contour>
<contour>
<point x="985" y="918" type="line"/>
<point x="985" y="502" type="line"/>
<point x="695" y="710" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/skip-previous-outline.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/skip-previous-outline.glif",
"repo_id": "cascadia-code",
"token_count": 343
}
| 639 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="source-commit-local" format="2">
<advance width="1200"/>
<unicode hex="F071B"/>
<note>
source-commit-local
</note>
<outline>
<contour>
<point x="493" y="1106"/>
<point x="312" y="998"/>
<point x="204" y="817"/>
<point x="204" y="603"/>
<point x="312" y="422"/>
<point x="493" y="314"/>
<point x="707" y="314"/>
<point x="888" y="422"/>
<point x="996" y="603"/>
<point x="996" y="817"/>
<point x="888" y="998"/>
<point x="707" y="1106"/>
<point x="600" y="1106" type="qcurve" smooth="yes"/>
</contour>
<contour>
<point x="700" y="947"/>
<point x="600" y="947" type="qcurve" smooth="yes"/>
<point x="500" y="947"/>
<point x="363" y="810"/>
<point x="363" y="610"/>
<point x="500" y="473"/>
<point x="700" y="473"/>
<point x="837" y="610"/>
<point x="837" y="810"/>
</contour>
<contour>
<point x="678" y="1261" type="line"/>
<point x="678" y="1420" type="line"/>
<point x="522" y="1420" type="line"/>
<point x="522" y="1261" type="line"/>
</contour>
<contour>
<point x="678" y="0" type="line"/>
<point x="678" y="159" type="line"/>
<point x="522" y="159" type="line"/>
<point x="522" y="0" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/source-commit-local.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/source-commit-local.glif",
"repo_id": "cascadia-code",
"token_count": 695
}
| 640 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="square-off-outline" format="2">
<advance width="1200"/>
<unicode hex="F12EF"/>
<note>
square-off-outline
</note>
<outline>
<contour>
<point x="1110" y="130" type="line"/>
<point x="1180" y="200" type="line"/>
<point x="90" y="1290" type="line"/>
<point x="20" y="1220" type="line"/>
<point x="124" y="1117" type="line"/>
<point x="124" y="226" type="line"/>
<point x="1014" y="226" type="line"/>
</contour>
<contour>
<point x="903" y="337" type="line"/>
<point x="235" y="337" type="line"/>
<point x="235" y="1005" type="line"/>
</contour>
<contour>
<point x="1007" y="1109" type="line"/>
<point x="1007" y="513" type="line"/>
<point x="1118" y="402" type="line"/>
<point x="1118" y="1220" type="line"/>
<point x="300" y="1220" type="line"/>
<point x="411" y="1109" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/square-off-outline.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/square-off-outline.glif",
"repo_id": "cascadia-code",
"token_count": 468
}
| 641 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="stack-overflow" format="2">
<advance width="1200"/>
<unicode hex="F04CC"/>
<note>
stack-overflow
</note>
<outline>
<contour>
<point x="141" y="146" type="line"/>
<point x="141" y="513" type="line"/>
<point x="20" y="513" type="line"/>
<point x="20" y="25" type="line"/>
<point x="1122" y="25" type="line"/>
<point x="1122" y="513" type="line"/>
<point x="998" y="513" type="line"/>
<point x="998" y="146" type="line"/>
</contour>
<contour>
<point x="876" y="421" type="line"/>
<point x="902" y="542" type="line"/>
<point x="301" y="667" type="line"/>
<point x="276" y="549" type="line"/>
</contour>
<contour>
<point x="912" y="574" type="line"/>
<point x="963" y="686" type="line"/>
<point x="407" y="945" type="line"/>
<point x="356" y="833" type="line"/>
</contour>
<contour>
<point x="982" y="715" type="line"/>
<point x="1059" y="807" type="line"/>
<point x="589" y="1201" type="line"/>
<point x="509" y="1105" type="line"/>
</contour>
<contour>
<point x="717" y="1322" type="line"/>
<point x="1081" y="830" type="line"/>
<point x="1180" y="903" type="line"/>
<point x="813" y="1395" type="line"/>
</contour>
<contour>
<point x="876" y="267" type="line"/>
<point x="876" y="392" type="line"/>
<point x="266" y="392" type="line"/>
<point x="266" y="267" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/stack-overflow.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/stack-overflow.glif",
"repo_id": "cascadia-code",
"token_count": 760
}
| 642 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="star-circle-outline" format="2">
<advance width="1200"/>
<unicode hex="F09A4"/>
<note>
star-circle-outline
</note>
<outline>
<contour>
<point x="600" y="525" type="line"/>
<point x="799" y="405" type="line"/>
<point x="744" y="631" type="line"/>
<point x="919" y="781" type="line"/>
<point x="690" y="800" type="line"/>
<point x="600" y="1012" type="line"/>
<point x="510" y="803" type="line"/>
<point x="281" y="781" type="line"/>
<point x="456" y="631" type="line"/>
<point x="401" y="405" type="line"/>
</contour>
<contour>
<point x="486" y="1290"/>
<point x="271" y="1203"/>
<point x="107" y="1039"/>
<point x="20" y="824"/>
<point x="20" y="596"/>
<point x="107" y="381"/>
<point x="271" y="217"/>
<point x="486" y="130"/>
<point x="714" y="130"/>
<point x="929" y="217"/>
<point x="1093" y="381"/>
<point x="1180" y="596"/>
<point x="1180" y="824"/>
<point x="1093" y="1039"/>
<point x="929" y="1203"/>
<point x="714" y="1290"/>
<point x="600" y="1290" type="qcurve" smooth="yes"/>
</contour>
<contour>
<point x="725" y="1176"/>
<point x="940" y="1050"/>
<point x="1066" y="835"/>
<point x="1066" y="585"/>
<point x="940" y="370"/>
<point x="725" y="244"/>
<point x="475" y="244"/>
<point x="260" y="370"/>
<point x="134" y="585"/>
<point x="134" y="835"/>
<point x="260" y="1050"/>
<point x="475" y="1176"/>
<point x="600" y="1176" type="qcurve" smooth="yes"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/star-circle-outline.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/star-circle-outline.glif",
"repo_id": "cascadia-code",
"token_count": 864
}
| 643 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="subdirectory-arrow-left" format="2">
<advance width="1200"/>
<unicode hex="F060C"/>
<note>
subdirectory-arrow-left
</note>
<outline>
<contour>
<point x="20" y="516" type="line"/>
<point x="484" y="52" type="line"/>
<point x="593" y="161" type="line"/>
<point x="314" y="440" type="line"/>
<point x="1180" y="440" type="line"/>
<point x="1180" y="1368" type="line"/>
<point x="1024" y="1368" type="line"/>
<point x="1024" y="592" type="line"/>
<point x="314" y="592" type="line"/>
<point x="593" y="871" type="line"/>
<point x="484" y="980" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/subdirectory-arrow-left.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/subdirectory-arrow-left.glif",
"repo_id": "cascadia-code",
"token_count": 327
}
| 644 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="tea" format="2">
<advance width="1200"/>
<unicode hex="F0D9E"/>
<note>
tea
</note>
<outline>
<contour>
<point x="20" y="130" type="line"/>
<point x="1053" y="130" type="line"/>
<point x="1053" y="260" type="line"/>
<point x="20" y="260" type="line"/>
</contour>
<contour>
<point x="923" y="967" type="line"/>
<point x="923" y="1160" type="line"/>
<point x="1053" y="1160" type="line"/>
<point x="1053" y="967" type="line"/>
</contour>
<contour>
<point x="407" y="1290" type="line"/>
<point x="407" y="1136" type="line"/>
<point x="524" y="1045" type="line"/>
<point x="537" y="1033"/>
<point x="537" y="1018" type="qcurve" smooth="yes"/>
<point x="537" y="743" type="line" smooth="yes"/>
<point x="537" y="728"/>
<point x="518" y="710"/>
<point x="503" y="710" type="qcurve" smooth="yes"/>
<point x="247" y="710" type="line" smooth="yes"/>
<point x="231" y="710"/>
<point x="213" y="728"/>
<point x="213" y="743" type="qcurve" smooth="yes"/>
<point x="213" y="1018" type="line" smooth="yes"/>
<point x="213" y="1033"/>
<point x="228" y="1045" type="qcurve" smooth="yes"/>
<point x="343" y="1136" type="line"/>
<point x="343" y="1290" type="line"/>
<point x="20" y="1290" type="line"/>
<point x="20" y="647" type="line" smooth="yes"/>
<point x="20" y="544"/>
<point x="177" y="387"/>
<point x="280" y="387" type="qcurve" smooth="yes"/>
<point x="666" y="387" type="line" smooth="yes"/>
<point x="769" y="387"/>
<point x="923" y="544"/>
<point x="923" y="647" type="qcurve" smooth="yes"/>
<point x="923" y="840" type="line"/>
<point x="1053" y="840" type="line" smooth="yes"/>
<point x="1104" y="840"/>
<point x="1180" y="915"/>
<point x="1180" y="967" type="qcurve" smooth="yes"/>
<point x="1180" y="1160" type="line" smooth="yes"/>
<point x="1180" y="1211"/>
<point x="1104" y="1290"/>
<point x="1053" y="1290" type="qcurve" smooth="yes"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/tea.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/tea.glif",
"repo_id": "cascadia-code",
"token_count": 1068
}
| 645 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="temperature-fahrenheit" format="2">
<advance width="1200"/>
<unicode hex="F0505"/>
<note>
temperature-fahrenheit
</note>
<outline>
<contour>
<point x="771" y="130" type="line"/>
<point x="771" y="539" type="line"/>
<point x="1110" y="539" type="line"/>
<point x="1110" y="744" type="line"/>
<point x="771" y="744" type="line"/>
<point x="771" y="948" type="line"/>
<point x="1180" y="948" type="line"/>
<point x="1180" y="1153" type="line"/>
<point x="566" y="1153" type="line"/>
<point x="566" y="130" type="line"/>
</contour>
<contour>
<point x="138" y="1290"/>
<point x="20" y="1172"/>
<point x="20" y="999"/>
<point x="138" y="881"/>
<point x="311" y="881"/>
<point x="429" y="999"/>
<point x="429" y="1172"/>
<point x="311" y="1290"/>
<point x="225" y="1290" type="qcurve" smooth="yes"/>
</contour>
<contour>
<point x="253" y="1153"/>
<point x="292" y="1114"/>
<point x="292" y="1057"/>
<point x="253" y="1018"/>
<point x="196" y="1018"/>
<point x="157" y="1057"/>
<point x="157" y="1114"/>
<point x="196" y="1153"/>
<point x="225" y="1153" type="qcurve" smooth="yes"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/temperature-fahrenheit.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/temperature-fahrenheit.glif",
"repo_id": "cascadia-code",
"token_count": 672
}
| 646 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="timer-sand-full" format="2">
<advance width="1200"/>
<unicode hex="F078C"/>
<note>
timer-sand-full
</note>
<outline>
<contour>
<point x="1027" y="1420" type="line"/>
<point x="173" y="1420" type="line"/>
<point x="173" y="993" type="line"/>
<point x="457" y="710" type="line"/>
<point x="173" y="427" type="line"/>
<point x="173" y="0" type="line"/>
<point x="1027" y="0" type="line"/>
<point x="1027" y="427" type="line"/>
<point x="743" y="710" type="line"/>
<point x="1027" y="993" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/timer-sand-full.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/timer-sand-full.glif",
"repo_id": "cascadia-code",
"token_count": 304
}
| 647 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="toothbrush-paste" format="2">
<advance width="1200"/>
<unicode hex="F112A"/>
<note>
toothbrush-paste
</note>
<outline>
<contour>
<point x="1064" y="1189" type="line"/>
<point x="937" y="1313" type="line"/>
<point x="644" y="1023" type="line"/>
<point x="771" y="899" type="line"/>
<point x="832" y="960" type="line"/>
<point x="771" y="1023" type="line"/>
<point x="937" y="1189" type="line"/>
<point x="1000" y="1128" type="line"/>
</contour>
<contour>
<point x="1180" y="1156"/>
<point x="1147" y="1189" type="qcurve"/>
<point x="802" y="847" type="line"/>
<point x="677" y="803" type="line"/>
<point x="61" y="189" type="line"/>
<point x="147" y="107" type="line"/>
<point x="478" y="441" type="line"/>
<point x="730" y="189" type="line"/>
<point x="978" y="441" type="line"/>
<point x="730" y="689" type="line"/>
<point x="738" y="700" type="line"/>
<point x="865" y="742" type="line"/>
<point x="1147" y="1023" type="line" smooth="yes"/>
<point x="1180" y="1057"/>
</contour>
<contour>
<point x="561" y="855" type="line"/>
<point x="271" y="1148" type="line"/>
<point x="20" y="899" type="line"/>
<point x="313" y="606" type="line"/>
</contour>
<contour>
<point x="1042" y="253" type="line"/>
<point x="959" y="336" type="line"/>
<point x="832" y="211" type="line"/>
<point x="915" y="129" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/toothbrush-paste.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/toothbrush-paste.glif",
"repo_id": "cascadia-code",
"token_count": 772
}
| 648 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="triangle-small-up" format="2">
<advance width="1200"/>
<unicode hex="F1A0A"/>
<note>
triangle-small-up
</note>
<outline>
<contour>
<point x="600" y="1218" type="line"/>
<point x="20" y="202" type="line"/>
<point x="1180" y="202" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/triangle-small-up.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/triangle-small-up.glif",
"repo_id": "cascadia-code",
"token_count": 166
}
| 649 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="turnstile" format="2">
<advance width="1200"/>
<unicode hex="F0CD5"/>
<note>
turnstile
</note>
<outline>
<contour>
<point x="1180" y="1290" type="line"/>
<point x="483" y="1290" type="line"/>
<point x="483" y="1116" type="line"/>
<point x="831" y="767" type="line"/>
<point x="831" y="130" type="line"/>
<point x="1180" y="130" type="line"/>
</contour>
<contour>
<point x="763" y="721" type="qcurve"/>
<point x="437" y="1048" type="line"/>
<point x="369" y="980"/>
<point x="369" y="884" type="qcurve"/>
<point x="20" y="884" type="line"/>
<point x="20" y="767" type="line"/>
<point x="398" y="767" type="line"/>
<point x="431" y="715"/>
<point x="483" y="683" type="qcurve"/>
<point x="483" y="304" type="line"/>
<point x="600" y="304" type="line"/>
<point x="600" y="653" type="line"/>
<point x="695" y="653"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/turnstile.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/turnstile.glif",
"repo_id": "cascadia-code",
"token_count": 497
}
| 650 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="valve-closed" format="2">
<advance width="1200"/>
<unicode hex="F1067"/>
<note>
valve-closed
</note>
<outline>
<contour>
<point x="1066" y="1290" type="line"/>
<point x="1066" y="767" type="line"/>
<point x="763" y="767" type="line"/>
<point x="744" y="819"/>
<point x="654" y="882"/>
<point x="546" y="882"/>
<point x="456" y="819"/>
<point x="437" y="767" type="qcurve"/>
<point x="134" y="767" type="line"/>
<point x="134" y="1290" type="line"/>
<point x="20" y="1290" type="line"/>
<point x="20" y="130" type="line"/>
<point x="134" y="130" type="line"/>
<point x="134" y="653" type="line"/>
<point x="437" y="653" type="line"/>
<point x="456" y="601"/>
<point x="546" y="538"/>
<point x="654" y="538"/>
<point x="744" y="601"/>
<point x="763" y="653" type="qcurve"/>
<point x="1066" y="653" type="line"/>
<point x="1066" y="130" type="line"/>
<point x="1180" y="130" type="line"/>
<point x="1180" y="1290" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/valve-closed.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/valve-closed.glif",
"repo_id": "cascadia-code",
"token_count": 564
}
| 651 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="view-dashboard-variant" format="2">
<advance width="1200"/>
<unicode hex="F0843"/>
<note>
view-dashboard-variant
</note>
<outline>
<contour>
<point x="369" y="1116" type="line"/>
<point x="20" y="1116" type="line"/>
<point x="20" y="304" type="line"/>
<point x="369" y="304" type="line"/>
</contour>
<contour>
<point x="774" y="1116" type="line"/>
<point x="426" y="1116" type="line"/>
<point x="426" y="827" type="line"/>
<point x="774" y="827" type="line"/>
</contour>
<contour>
<point x="1180" y="1116" type="line"/>
<point x="831" y="1116" type="line"/>
<point x="831" y="593" type="line"/>
<point x="1180" y="593" type="line"/>
</contour>
<contour>
<point x="774" y="767" type="line"/>
<point x="426" y="767" type="line"/>
<point x="426" y="304" type="line"/>
<point x="774" y="304" type="line"/>
</contour>
<contour>
<point x="1180" y="536" type="line"/>
<point x="831" y="536" type="line"/>
<point x="831" y="304" type="line"/>
<point x="1180" y="304" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/view-dashboard-variant.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/view-dashboard-variant.glif",
"repo_id": "cascadia-code",
"token_count": 584
}
| 652 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="volume-vibrate" format="2">
<advance width="1200"/>
<unicode hex="F1121"/>
<note>
volume-vibrate
</note>
<outline>
<contour>
<point x="312" y="927" type="line"/>
<point x="20" y="927" type="line"/>
<point x="20" y="493" type="line"/>
<point x="312" y="493" type="line"/>
<point x="671" y="130" type="line"/>
<point x="671" y="1290" type="line"/>
</contour>
<contour>
<point x="1180" y="1144" type="line"/>
<point x="929" y="1399" type="line"/>
<point x="854" y="1324" type="line"/>
<point x="1031" y="1144" type="line"/>
<point x="817" y="927" type="line"/>
<point x="1031" y="710" type="line"/>
<point x="817" y="493" type="line"/>
<point x="1031" y="276" type="line"/>
<point x="854" y="96" type="line"/>
<point x="929" y="21" type="line"/>
<point x="1180" y="276" type="line"/>
<point x="966" y="493" type="line"/>
<point x="1180" y="710" type="line"/>
<point x="966" y="927" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/volume-vibrate.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/volume-vibrate.glif",
"repo_id": "cascadia-code",
"token_count": 539
}
| 653 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="water-check" format="2">
<advance width="1200"/>
<unicode hex="F1504"/>
<note>
water-check
</note>
<outline>
<contour>
<point x="1094" y="508" type="line"/>
<point x="831" y="246" type="line"/>
<point x="714" y="363" type="line"/>
<point x="628" y="277" type="line"/>
<point x="831" y="56" type="line"/>
<point x="1180" y="404" type="line"/>
</contour>
<contour>
<point x="762" y="712"/>
<point x="876" y="719" type="qcurve"/>
<point x="814" y="898"/>
<point x="631" y="1150" type="qcurve" smooth="yes"/>
<point x="541" y="1274"/>
<point x="462" y="1364" type="qcurve"/>
<point x="462" y="1364" type="line"/>
<point x="393" y="1281" type="line" smooth="yes"/>
<point x="310" y="1178"/>
<point x="241" y="1078" type="qcurve" smooth="yes"/>
<point x="144" y="936"/>
<point x="89" y="819" type="qcurve" smooth="yes"/>
<point x="20" y="674"/>
<point x="20" y="570" type="qcurve" smooth="yes"/>
<point x="20" y="453"/>
<point x="137" y="249"/>
<point x="341" y="128"/>
<point x="462" y="128" type="qcurve" smooth="yes"/>
<point x="486" y="128" type="line"/>
<point x="462" y="201"/>
<point x="462" y="277" type="qcurve" smooth="yes"/>
<point x="462" y="391"/>
<point x="572" y="587"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/water-check.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/water-check.glif",
"repo_id": "cascadia-code",
"token_count": 707
}
| 654 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="window-shutter-settings" format="2">
<advance width="1200"/>
<unicode hex="F1A8B"/>
<note>
window-shutter-settings
</note>
<outline>
<contour>
<point x="20" y="1095" type="line"/>
<point x="150" y="1095" type="line"/>
<point x="150" y="322" type="line"/>
<point x="277" y="322" type="line"/>
<point x="277" y="1095" type="line"/>
<point x="923" y="1095" type="line"/>
<point x="923" y="322" type="line"/>
<point x="1050" y="322" type="line"/>
<point x="1050" y="1095" type="line"/>
<point x="1180" y="1095" type="line"/>
<point x="1180" y="1355" type="line"/>
<point x="20" y="1355" type="line"/>
</contour>
<contour>
<point x="343" y="902" type="line"/>
<point x="857" y="902" type="line"/>
<point x="857" y="1032" type="line"/>
<point x="343" y="1032" type="line"/>
</contour>
<contour>
<point x="343" y="708" type="line"/>
<point x="857" y="708" type="line"/>
<point x="857" y="838" type="line"/>
<point x="343" y="838" type="line"/>
</contour>
<contour>
<point x="343" y="515" type="line"/>
<point x="857" y="515" type="line"/>
<point x="857" y="645" type="line"/>
<point x="343" y="645" type="line"/>
</contour>
<contour>
<point x="343" y="322" type="line"/>
<point x="857" y="322" type="line"/>
<point x="857" y="452" type="line"/>
<point x="343" y="452" type="line"/>
</contour>
<contour>
<point x="663" y="65" type="line"/>
<point x="663" y="195" type="line"/>
<point x="537" y="195" type="line"/>
<point x="537" y="65" type="line"/>
</contour>
<contour>
<point x="923" y="65" type="line"/>
<point x="923" y="195" type="line"/>
<point x="793" y="195" type="line"/>
<point x="793" y="65" type="line"/>
</contour>
<contour>
<point x="407" y="65" type="line"/>
<point x="407" y="195" type="line"/>
<point x="277" y="195" type="line"/>
<point x="277" y="65" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/window-shutter-settings.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/window-shutter-settings.glif",
"repo_id": "cascadia-code",
"token_count": 1046
}
| 655 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="yahoo" format="2">
<advance width="1200"/>
<unicode hex="F0B4F"/>
<note>
yahoo
</note>
<outline>
<contour>
<point x="731" y="966" type="line"/>
<point x="513" y="966" type="line"/>
<point x="377" y="639" type="line"/>
<point x="243" y="966" type="line"/>
<point x="20" y="966" type="line"/>
<point x="268" y="405" type="line"/>
<point x="178" y="204" type="line"/>
<point x="396" y="204" type="line"/>
</contour>
<contour>
<point x="834" y="666"/>
<point x="774" y="666" type="qcurve" smooth="yes"/>
<point x="714" y="666"/>
<point x="635" y="590"/>
<point x="635" y="484"/>
<point x="712" y="410"/>
<point x="829" y="410"/>
<point x="910" y="487"/>
<point x="910" y="596"/>
</contour>
<contour>
<point x="1180" y="1216" type="line"/>
<point x="932" y="1216" type="line"/>
<point x="709" y="715" type="line"/>
<point x="959" y="715" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/yahoo.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/yahoo.glif",
"repo_id": "cascadia-code",
"token_count": 535
}
| 656 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="zodiac-aquarius" format="2">
<advance width="1200"/>
<unicode hex="F0A7D"/>
<note>
zodiac-aquarius
</note>
<outline>
<contour>
<point x="959" y="895" type="line"/>
<point x="1096" y="758" type="line"/>
<point x="1180" y="842" type="line"/>
<point x="959" y="1063" type="line"/>
<point x="779" y="884" type="line"/>
<point x="600" y="1063" type="line"/>
<point x="421" y="884" type="line"/>
<point x="241" y="1063" type="line"/>
<point x="20" y="842" type="line"/>
<point x="104" y="758" type="line"/>
<point x="241" y="895" type="line"/>
<point x="421" y="716" type="line"/>
<point x="600" y="895" type="line"/>
<point x="779" y="716" type="line"/>
</contour>
<contour>
<point x="779" y="357" type="line"/>
<point x="959" y="536" type="line"/>
<point x="1096" y="399" type="line"/>
<point x="1180" y="483" type="line"/>
<point x="959" y="704" type="line"/>
<point x="779" y="525" type="line"/>
<point x="600" y="704" type="line"/>
<point x="421" y="525" type="line"/>
<point x="241" y="704" type="line"/>
<point x="20" y="483" type="line"/>
<point x="104" y="399" type="line"/>
<point x="241" y="536" type="line"/>
<point x="421" y="357" type="line"/>
<point x="600" y="536" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/zodiac-aquarius.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/MaterialDesignIconsDesktop.ufo/glyphs/zodiac-aquarius.glif",
"repo_id": "cascadia-code",
"token_count": 696
}
| 657 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name=".notdef" format="2">
<advance width="1200"/>
<unicode hex="E0C9"/>
<unicode hex="E0CB"/>
<unicode hex="E0D3"/>
<note>
.notdef
</note>
<outline>
<contour>
<point x="1378" y="2857" type="line"/>
<point x="-178" y="2857" type="line"/>
<point x="-178" y="151" type="line"/>
<point x="1378" y="151" type="line"/>
</contour>
<contour>
<point x="1186" y="343" type="line"/>
<point x="14" y="343" type="line"/>
<point x="14" y="2665" type="line"/>
<point x="1186" y="2665" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/PowerlineExtraSymbols.ufo/glyphs/_notdef.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/PowerlineExtraSymbols.ufo/glyphs/_notdef.glif",
"repo_id": "cascadia-code",
"token_count": 306
}
| 658 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="POWER SLEEP SYMBOL" format="2">
<advance width="1200"/>
<unicode hex="23FE"/>
<note>
POWER SLEEP SYMBOL
</note>
<outline>
<contour>
<point x="528" y="1420" type="curve"/>
<point x="153" y="1257"/>
<point x="-19" y="821"/>
<point x="144" y="446" type="curve" smooth="yes"/>
<point x="306" y="71"/>
<point x="742" y="-101"/>
<point x="1118" y="61" type="curve"/>
<point x="844" y="127"/>
<point x="604" y="316"/>
<point x="483" y="593" type="curve" smooth="yes"/>
<point x="363" y="871"/>
<point x="389" y="1176"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/Unicode_IEC_symbol_font.ufo/glyphs/P_O_W_E_R_ S_L_E_E_P_ S_Y_M_B_O_L_.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/Unicode_IEC_symbol_font.ufo/glyphs/P_O_W_E_R_ S_L_E_E_P_ S_Y_M_B_O_L_.glif",
"repo_id": "cascadia-code",
"token_count": 335
}
| 659 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="arrow-small-down" format="2">
<advance width="1200"/>
<unicode hex="EA9D"/>
<note>
arrow-small-down
</note>
<outline>
<contour>
<point x="791" y="682" type="line"/>
<point x="645" y="536" type="line"/>
<point x="645" y="1026" type="line"/>
<point x="555" y="1026" type="line"/>
<point x="555" y="536" type="line"/>
<point x="409" y="677" type="line"/>
<point x="348" y="616" type="line"/>
<point x="569" y="394" type="line"/>
<point x="631" y="394" type="line"/>
<point x="852" y="616" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/arrow-small-down.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/arrow-small-down.glif",
"repo_id": "cascadia-code",
"token_count": 306
}
| 660 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="bracket-error" format="2">
<advance width="1200"/>
<unicode hex="EBE6"/>
<note>
bracket-error
</note>
<outline>
<contour>
<point x="378" y="1292" type="line"/>
<point x="368" y="1292" type="line" smooth="yes"/>
<point x="326" y="1292"/>
<point x="249" y="1259"/>
<point x="187" y="1197"/>
<point x="173" y="1159" type="qcurve"/>
<point x="173" y="1159" type="line"/>
<point x="158" y="1120"/>
<point x="154" y="1082" type="qcurve"/>
<point x="154" y="1082" type="line"/>
<point x="149" y="1044"/>
<point x="154" y="1006" type="qcurve" smooth="yes"/>
<point x="154" y="929" type="line" smooth="yes"/>
<point x="154" y="901"/>
<point x="144" y="877" type="qcurve"/>
<point x="144" y="877" type="line"/>
<point x="135" y="853"/>
<point x="101" y="820"/>
<point x="53" y="796"/>
<point x="25" y="796" type="qcurve" smooth="yes"/>
<point x="20" y="796" type="line"/>
<point x="20" y="710" type="line"/>
<point x="25" y="710" type="line" smooth="yes"/>
<point x="53" y="710"/>
<point x="77" y="696" type="qcurve"/>
<point x="77" y="696" type="line"/>
<point x="101" y="686"/>
<point x="115" y="672" type="qcurve"/>
<point x="115" y="667" type="line"/>
<point x="135" y="653"/>
<point x="144" y="629" type="qcurve"/>
<point x="144" y="629" type="line"/>
<point x="154" y="605"/>
<point x="154" y="576" type="qcurve" smooth="yes"/>
<point x="154" y="500" type="line" smooth="yes"/>
<point x="149" y="462"/>
<point x="154" y="423" type="qcurve" smooth="yes"/>
<point x="154" y="419" type="line" smooth="yes"/>
<point x="158" y="385"/>
<point x="173" y="347" type="qcurve"/>
<point x="173" y="347" type="line"/>
<point x="187" y="309"/>
<point x="249" y="247"/>
<point x="326" y="218"/>
<point x="368" y="218" type="qcurve" smooth="yes"/>
<point x="378" y="218" type="line"/>
<point x="378" y="304" type="line"/>
<point x="368" y="304" type="line" smooth="yes"/>
<point x="340" y="304"/>
<point x="292" y="323"/>
<point x="259" y="361"/>
<point x="240" y="409"/>
<point x="240" y="438" type="qcurve" smooth="yes"/>
<point x="240" y="557" type="line" smooth="yes"/>
<point x="240" y="586"/>
<point x="230" y="643"/>
<point x="211" y="696"/>
<point x="178" y="734"/>
<point x="154" y="753" type="qcurve"/>
<point x="178" y="772"/>
<point x="211" y="810"/>
<point x="230" y="863"/>
<point x="240" y="920"/>
<point x="240" y="949" type="qcurve" smooth="yes"/>
<point x="240" y="1068" type="line" smooth="yes"/>
<point x="240" y="1097"/>
<point x="259" y="1144"/>
<point x="292" y="1182"/>
<point x="340" y="1202"/>
<point x="368" y="1202" type="qcurve" smooth="yes"/>
<point x="378" y="1202" type="line"/>
<point x="378" y="1202" type="line"/>
</contour>
<contour>
<point x="1022" y="743"/>
<point x="1085" y="710" type="qcurve"/>
<point x="1094" y="710" type="line"/>
<point x="1094" y="796" type="line"/>
<point x="1085" y="796" type="line" smooth="yes"/>
<point x="1056" y="796"/>
<point x="1032" y="810" type="qcurve"/>
<point x="1032" y="810" type="line"/>
<point x="1008" y="820"/>
<point x="994" y="834" type="qcurve"/>
<point x="994" y="839" type="line"/>
<point x="975" y="853"/>
<point x="965" y="877" type="qcurve"/>
<point x="965" y="877" type="line"/>
<point x="956" y="901"/>
<point x="956" y="929" type="qcurve" smooth="yes"/>
<point x="956" y="1006" type="line" smooth="yes"/>
<point x="960" y="1044"/>
<point x="956" y="1082" type="qcurve" smooth="yes"/>
<point x="956" y="1087" type="line" smooth="yes"/>
<point x="951" y="1120"/>
<point x="937" y="1159" type="qcurve"/>
<point x="937" y="1159" type="line"/>
<point x="922" y="1197"/>
<point x="860" y="1259"/>
<point x="784" y="1292"/>
<point x="741" y="1292" type="qcurve" smooth="yes"/>
<point x="736" y="1292" type="line"/>
<point x="736" y="1202" type="line"/>
<point x="741" y="1202" type="line" smooth="yes"/>
<point x="769" y="1202"/>
<point x="817" y="1182"/>
<point x="851" y="1144"/>
<point x="870" y="1097"/>
<point x="870" y="1068" type="qcurve" smooth="yes"/>
<point x="870" y="949" type="line" smooth="yes"/>
<point x="870" y="920"/>
<point x="879" y="863"/>
<point x="898" y="810"/>
<point x="932" y="772"/>
<point x="956" y="753" type="qcurve"/>
<point x="951" y="753" type="line" smooth="yes"/>
</contour>
<contour>
<point x="698" y="571"/>
<point x="636" y="423"/>
<point x="664" y="261"/>
<point x="779" y="147"/>
<point x="941" y="118"/>
<point x="1089" y="180"/>
<point x="1180" y="314"/>
<point x="1180" y="395" type="qcurve" smooth="yes"/>
<point x="1180" y="505"/>
<point x="1022" y="662"/>
<point x="913" y="662" type="qcurve" smooth="yes"/>
<point x="832" y="662"/>
<point x="765" y="619" type="qcurve" smooth="yes"/>
</contour>
<contour>
<point x="1080" y="495" type="line"/>
<point x="975" y="395" type="line"/>
<point x="1080" y="295" type="line"/>
<point x="1013" y="228" type="line"/>
<point x="913" y="333" type="line"/>
<point x="812" y="228" type="line"/>
<point x="746" y="295" type="line"/>
<point x="851" y="395" type="line"/>
<point x="746" y="495" type="line"/>
<point x="812" y="562" type="line"/>
<point x="913" y="457" type="line"/>
<point x="1013" y="562" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/bracket-error.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/bracket-error.glif",
"repo_id": "cascadia-code",
"token_count": 2943
}
| 661 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="chrome-close" format="2">
<advance width="1200"/>
<unicode hex="EAB8"/>
<note>
chrome-close
</note>
<outline>
<contour>
<point x="20" y="1193" type="line"/>
<point x="503" y="710" type="line"/>
<point x="20" y="227" type="line"/>
<point x="117" y="130" type="line"/>
<point x="600" y="613" type="line"/>
<point x="1083" y="130" type="line"/>
<point x="1180" y="227" type="line"/>
<point x="697" y="710" type="line"/>
<point x="1180" y="1193" type="line"/>
<point x="1083" y="1290" type="line"/>
<point x="600" y="807" type="line"/>
<point x="117" y="1290" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/chrome-close.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/chrome-close.glif",
"repo_id": "cascadia-code",
"token_count": 344
}
| 662 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="file" format="2">
<advance width="1200"/>
<unicode hex="EA7B"/>
<note>
file
</note>
<outline>
<contour>
<point x="1161" y="1014" type="line"/>
<point x="1128" y="1089" type="line"/>
<point x="825" y="1393" type="line"/>
<point x="754" y="1420" type="line"/>
<point x="142" y="1420" type="line"/>
<point x="39" y="1322" type="line"/>
<point x="39" y="103" type="line"/>
<point x="142" y="0" type="line"/>
<point x="1058" y="0" type="line"/>
<point x="1161" y="103" type="line"/>
</contour>
<contour>
<point x="1058" y="916" type="line"/>
<point x="1058" y="103" type="line"/>
<point x="142" y="103" type="line"/>
<point x="142" y="1322" type="line"/>
<point x="651" y="1322" type="line"/>
<point x="651" y="916" type="line"/>
</contour>
<contour>
<point x="1058" y="1014" type="line"/>
<point x="754" y="1014" type="line"/>
<point x="754" y="1322" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/file.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/file.glif",
"repo_id": "cascadia-code",
"token_count": 522
}
| 663 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="graph" format="2">
<advance width="1200"/>
<unicode hex="EB03"/>
<note>
graph
</note>
<outline>
<contour>
<point x="20" y="170" type="line"/>
<point x="60" y="130" type="line"/>
<point x="1180" y="130" type="line"/>
<point x="1180" y="210" type="line"/>
<point x="104" y="210" type="line"/>
<point x="104" y="1290" type="line"/>
<point x="20" y="1290" type="line"/>
</contour>
<contour>
<point x="228" y="294" type="line"/>
<point x="392" y="294" type="line"/>
<point x="436" y="334" type="line"/>
<point x="436" y="998" type="line"/>
<point x="392" y="1042" type="line"/>
<point x="228" y="1042" type="line"/>
<point x="184" y="998" type="line"/>
<point x="184" y="334" type="line"/>
</contour>
<contour>
<point x="268" y="378" type="line"/>
<point x="268" y="958" type="line"/>
<point x="352" y="958" type="line"/>
<point x="352" y="378" type="line"/>
</contour>
<contour>
<point x="892" y="1206" type="line"/>
<point x="848" y="1166" type="line"/>
<point x="848" y="334" type="line"/>
<point x="892" y="294" type="line"/>
<point x="1056" y="294" type="line"/>
<point x="1100" y="334" type="line"/>
<point x="1100" y="1166" type="line"/>
<point x="1056" y="1206" type="line"/>
</contour>
<contour>
<point x="932" y="1126" type="line"/>
<point x="1016" y="1126" type="line"/>
<point x="1016" y="378" type="line"/>
<point x="932" y="378" type="line"/>
</contour>
<contour>
<point x="560" y="294" type="line"/>
<point x="724" y="294" type="line"/>
<point x="768" y="334" type="line"/>
<point x="768" y="834" type="line"/>
<point x="724" y="874" type="line"/>
<point x="560" y="874" type="line"/>
<point x="516" y="834" type="line"/>
<point x="516" y="334" type="line"/>
</contour>
<contour>
<point x="600" y="378" type="line"/>
<point x="600" y="794" type="line"/>
<point x="684" y="794" type="line"/>
<point x="684" y="378" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/graph.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/graph.glif",
"repo_id": "cascadia-code",
"token_count": 1075
}
| 664 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="milestone" format="2">
<advance width="1200"/>
<unicode hex="EB20"/>
<note>
milestone
</note>
<outline>
<contour>
<point x="600" y="1126" type="line"/>
<point x="600" y="1290" type="line"/>
<point x="516" y="1290" type="line"/>
<point x="516" y="1126" type="line"/>
<point x="60" y="1126" type="line"/>
<point x="20" y="1082" type="line"/>
<point x="20" y="750" type="line"/>
<point x="60" y="710" type="line"/>
<point x="516" y="710" type="line"/>
<point x="516" y="130" type="line"/>
<point x="600" y="130" type="line"/>
<point x="600" y="710" type="line"/>
<point x="972" y="710" type="line"/>
<point x="1003" y="719" type="line"/>
<point x="1180" y="887" type="line"/>
<point x="1180" y="949" type="line"/>
<point x="1003" y="1113" type="line"/>
<point x="972" y="1126" type="line"/>
</contour>
<contour>
<point x="1091" y="918" type="line"/>
<point x="954" y="794" type="line"/>
<point x="104" y="794" type="line"/>
<point x="104" y="1042" type="line"/>
<point x="954" y="1042" type="line"/>
</contour>
<contour>
<point x="352" y="878" type="line"/>
<point x="768" y="878" type="line"/>
<point x="768" y="958" type="line"/>
<point x="352" y="958" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/milestone.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/milestone.glif",
"repo_id": "cascadia-code",
"token_count": 688
}
| 665 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="output" format="2">
<advance width="1200"/>
<unicode hex="EB9D"/>
<note>
output
</note>
<outline>
<contour>
<point x="953" y="1420" type="line"/>
<point x="953" y="1330" type="line"/>
<point x="778" y="1330" type="line"/>
<point x="778" y="1420" type="line"/>
<point x="688" y="1420" type="line"/>
<point x="688" y="1330" type="line"/>
<point x="508" y="1330" type="line"/>
<point x="508" y="1420" type="line"/>
<point x="423" y="1420" type="line"/>
<point x="423" y="1330" type="line"/>
<point x="243" y="1330" type="line"/>
<point x="243" y="1420" type="line"/>
<point x="153" y="1420" type="line"/>
<point x="153" y="1330" type="line"/>
<point x="68" y="1245" type="line"/>
<point x="68" y="90" type="line"/>
<point x="153" y="0" type="line"/>
<point x="1043" y="0" type="line"/>
<point x="1133" y="90" type="line"/>
<point x="1133" y="1245" type="line"/>
<point x="1043" y="1330" type="line"/>
<point x="1043" y="1420" type="line"/>
</contour>
<contour>
<point x="153" y="1245" type="line"/>
<point x="1043" y="1245" type="line"/>
<point x="1043" y="90" type="line"/>
<point x="153" y="90" type="line"/>
</contour>
<contour>
<point x="333" y="975" type="line"/>
<point x="863" y="975" type="line"/>
<point x="863" y="1065" type="line"/>
<point x="333" y="1065" type="line"/>
</contour>
<contour>
<point x="863" y="620" type="line"/>
<point x="863" y="710" type="line"/>
<point x="333" y="710" type="line"/>
<point x="333" y="620" type="line"/>
</contour>
<contour>
<point x="333" y="265" type="line"/>
<point x="863" y="265" type="line"/>
<point x="863" y="355" type="line"/>
<point x="333" y="355" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/output.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/output.glif",
"repo_id": "cascadia-code",
"token_count": 951
}
| 666 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="run-above" format="2">
<advance width="1200"/>
<unicode hex="EBBD"/>
<note>
run-above
</note>
<outline>
<contour>
<point x="809" y="792" type="line"/>
<point x="80" y="1275" type="line"/>
<point x="20" y="1240" type="line"/>
<point x="20" y="270" type="line"/>
<point x="80" y="240" type="line"/>
<point x="809" y="723" type="line"/>
</contour>
<contour>
<point x="714" y="757" type="line"/>
<point x="102" y="348" type="line"/>
<point x="102" y="1167" type="line"/>
</contour>
<contour>
<point x="719" y="507" type="line"/>
<point x="775" y="451" type="line"/>
<point x="908" y="585" type="line"/>
<point x="908" y="145" type="line"/>
<point x="990" y="145" type="line"/>
<point x="990" y="585" type="line"/>
<point x="1124" y="451" type="line"/>
<point x="1180" y="507" type="line"/>
<point x="977" y="710" type="line"/>
<point x="921" y="710" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/run-above.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/run-above.glif",
"repo_id": "cascadia-code",
"token_count": 518
}
| 667 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="symbol-property" format="2">
<advance width="1200"/>
<unicode hex="EB65"/>
<note>
symbol-property
</note>
<outline>
<contour>
<point x="220" y="129"/>
<point x="259" y="165" type="qcurve" smooth="yes"/>
<point x="401" y="298"/>
<point x="702" y="603" type="qcurve"/>
<point x="782" y="572"/>
<point x="945" y="590"/>
<point x="1016" y="638" type="qcurve" smooth="yes"/>
<point x="1052" y="660"/>
<point x="1078" y="687" type="qcurve" smooth="yes"/>
<point x="1127" y="736"/>
<point x="1180" y="868"/>
<point x="1180" y="1010"/>
<point x="1153" y="1076" type="qcurve" smooth="yes"/>
<point x="1131" y="1130" type="line"/>
<point x="883" y="891" type="line"/>
<point x="782" y="992" type="line"/>
<point x="1021" y="1240" type="line"/>
<point x="968" y="1262" type="line" smooth="yes"/>
<point x="888" y="1298"/>
<point x="706" y="1280"/>
<point x="631" y="1231" type="qcurve" smooth="yes"/>
<point x="538" y="1174"/>
<point x="494" y="1068" type="qcurve" smooth="yes"/>
<point x="467" y="1006"/>
<point x="467" y="873"/>
<point x="490" y="811" type="qcurve"/>
<point x="206" y="537"/>
<point x="51" y="368" type="qcurve" smooth="yes"/>
<point x="16" y="324"/>
<point x="25" y="213"/>
<point x="109" y="129"/>
<point x="171" y="129" type="qcurve" smooth="yes"/>
</contour>
<contour>
<point x="857" y="1214"/>
<point x="883" y="1209" type="qcurve"/>
<point x="671" y="992" type="line"/>
<point x="883" y="780" type="line"/>
<point x="1100" y="992" type="line"/>
<point x="1105" y="966"/>
<point x="1105" y="939" type="qcurve" smooth="yes"/>
<point x="1105" y="882"/>
<point x="1065" y="780"/>
<point x="1025" y="740" type="qcurve" smooth="yes"/>
<point x="1003" y="718"/>
<point x="976" y="705" type="qcurve" smooth="yes"/>
<point x="914" y="665"/>
<point x="773" y="652"/>
<point x="706" y="683" type="qcurve" smooth="yes"/>
<point x="684" y="691" type="line"/>
<point x="662" y="674" type="line"/>
<point x="339" y="342"/>
<point x="206" y="222" type="qcurve" smooth="yes"/>
<point x="193" y="209"/>
<point x="158" y="209"/>
<point x="131" y="218"/>
<point x="100" y="249"/>
<point x="96" y="298"/>
<point x="109" y="320" type="qcurve"/>
<point x="264" y="483"/>
<point x="565" y="775" type="qcurve" smooth="yes"/>
<point x="583" y="793" type="line"/>
<point x="569" y="820" type="line" smooth="yes"/>
<point x="534" y="899"/>
<point x="565" y="1068"/>
<point x="627" y="1130" type="qcurve" smooth="yes"/>
<point x="649" y="1152"/>
<point x="675" y="1165" type="qcurve"/>
<point x="746" y="1214"/>
<point x="826" y="1214" type="qcurve" smooth="yes"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/symbol-property.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/symbol-property.glif",
"repo_id": "cascadia-code",
"token_count": 1496
}
| 668 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="terminal-powershell" format="2">
<advance width="1200"/>
<unicode hex="EBC7"/>
<note>
terminal-powershell
</note>
<outline>
<contour>
<point x="153" y="1188" type="line"/>
<point x="20" y="247" type="line"/>
<point x="51" y="208" type="line"/>
<point x="1004" y="196" type="line"/>
<point x="1047" y="232" type="line"/>
<point x="1180" y="1177" type="line"/>
<point x="1149" y="1212" type="line"/>
<point x="196" y="1224" type="line"/>
</contour>
<contour>
<point x="223" y="1149" type="line"/>
<point x="1102" y="1142" type="line"/>
<point x="977" y="271" type="line"/>
<point x="98" y="282" type="line"/>
</contour>
<contour>
<point x="887" y="532" type="line"/>
<point x="524" y="536" type="line"/>
<point x="512" y="466" type="line"/>
<point x="879" y="458" type="line"/>
</contour>
<contour>
<point x="321" y="509" type="line"/>
<point x="649" y="716" type="line"/>
<point x="379" y="931" type="line"/>
<point x="329" y="872" type="line"/>
<point x="524" y="720" type="line"/>
<point x="286" y="567" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/terminal-powershell.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/terminal-powershell.glif",
"repo_id": "cascadia-code",
"token_count": 606
}
| 669 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="type-hierarchy-super" format="2">
<advance width="1200"/>
<unicode hex="EBBB"/>
<note>
type-hierarchy-super
</note>
<outline>
<contour>
<point x="1180" y="1290" type="line"/>
<point x="1132" y="1333" type="line"/>
<point x="866" y="1333" type="line"/>
<point x="823" y="1290" type="line"/>
<point x="823" y="1024" type="line"/>
<point x="866" y="976" type="line"/>
<point x="871" y="976" type="line"/>
<point x="600" y="567" type="line"/>
<point x="324" y="976" type="line"/>
<point x="329" y="976" type="line"/>
<point x="377" y="1024" type="line"/>
<point x="377" y="1290" type="line"/>
<point x="329" y="1333" type="line"/>
<point x="63" y="1333" type="line"/>
<point x="20" y="1290" type="line"/>
<point x="20" y="1024" type="line"/>
<point x="63" y="976" type="line"/>
<point x="220" y="976" type="line"/>
<point x="514" y="534" type="line"/>
<point x="419" y="534" type="line"/>
<point x="377" y="487" type="line"/>
<point x="377" y="130" type="line"/>
<point x="419" y="87" type="line"/>
<point x="776" y="87" type="line"/>
<point x="823" y="130" type="line"/>
<point x="823" y="487" type="line"/>
<point x="776" y="534" type="line"/>
<point x="681" y="534" type="line"/>
<point x="980" y="976" type="line"/>
<point x="1132" y="976" type="line"/>
<point x="1180" y="1024" type="line"/>
</contour>
<contour>
<point x="286" y="1067" type="line"/>
<point x="110" y="1067" type="line"/>
<point x="110" y="1247" type="line"/>
<point x="286" y="1247" type="line"/>
</contour>
<contour>
<point x="467" y="444" type="line"/>
<point x="733" y="444" type="line"/>
<point x="733" y="178" type="line"/>
<point x="467" y="178" type="line"/>
</contour>
<contour>
<point x="1090" y="1247" type="line"/>
<point x="1090" y="1067" type="line"/>
<point x="909" y="1067" type="line"/>
<point x="909" y="1247" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/type-hierarchy-super.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/codicon.ufo/glyphs/type-hierarchy-super.glif",
"repo_id": "cascadia-code",
"token_count": 1052
}
| 670 |
<?xml version='1.0' encoding='UTF-8'?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>uniE600</key>
<string>uniE_600.glif</string>
<key>uniE601</key>
<string>uniE_601.glif</string>
<key>uniE602</key>
<string>uniE_602.glif</string>
<key>uniE603</key>
<string>uniE_603.glif</string>
<key>uniE604</key>
<string>uniE_604.glif</string>
<key>uniE605</key>
<string>uniE_605.glif</string>
<key>uniE606</key>
<string>uniE_606.glif</string>
<key>uniE607</key>
<string>uniE_607.glif</string>
<key>uniE608</key>
<string>uniE_608.glif</string>
<key>uniE609</key>
<string>uniE_609.glif</string>
<key>uniE60A</key>
<string>uniE_60A_.glif</string>
<key>uniE60B</key>
<string>uniE_60B_.glif</string>
<key>uniE60C</key>
<string>uniE_60C_.glif</string>
<key>uniE60D</key>
<string>uniE_60D_.glif</string>
<key>uniE60E</key>
<string>uniE_60E_.glif</string>
<key>uniE60F</key>
<string>uniE_60F_.glif</string>
<key>uniE610</key>
<string>uniE_610.glif</string>
<key>uniE611</key>
<string>uniE_611.glif</string>
<key>uniE612</key>
<string>uniE_612.glif</string>
<key>uniE613</key>
<string>uniE_613.glif</string>
<key>uniE614</key>
<string>uniE_614.glif</string>
<key>uniE615</key>
<string>uniE_615.glif</string>
<key>uniE616</key>
<string>uniE_616.glif</string>
<key>uniE617</key>
<string>uniE_617.glif</string>
<key>uniE618</key>
<string>uniE_618.glif</string>
<key>uniE619</key>
<string>uniE_619.glif</string>
<key>uniE61A</key>
<string>uniE_61A_.glif</string>
<key>uniE61B</key>
<string>uniE_61B_.glif</string>
<key>uniE61C</key>
<string>uniE_61C_.glif</string>
<key>uniE61D</key>
<string>uniE_61D_.glif</string>
<key>uniE61E</key>
<string>uniE_61E_.glif</string>
<key>uniE61F</key>
<string>uniE_61F_.glif</string>
<key>uniE620</key>
<string>uniE_620.glif</string>
<key>uniE621</key>
<string>uniE_621.glif</string>
<key>uniE622</key>
<string>uniE_622.glif</string>
<key>uniE623</key>
<string>uniE_623.glif</string>
<key>uniE624</key>
<string>uniE_624.glif</string>
<key>uniE625</key>
<string>uniE_625.glif</string>
<key>uniE626</key>
<string>uniE_626.glif</string>
<key>uniE627</key>
<string>uniE_627.glif</string>
<key>uniE628</key>
<string>uniE_628.glif</string>
<key>uniE629</key>
<string>uniE_629.glif</string>
<key>uniE62A</key>
<string>uniE_62A_.glif</string>
<key>uniE62B</key>
<string>uniE_62B_.glif</string>
<key>uniE62C</key>
<string>uniE_62C_.glif</string>
<key>uniE62D</key>
<string>uniE_62D_.glif</string>
<key>uniE62E</key>
<string>uniE_62E_.glif</string>
<key>uniE62F</key>
<string>uniE_62F_.glif</string>
<key>uniE630</key>
<string>uniE_630.glif</string>
<key>uniE631</key>
<string>uniE_631.glif</string>
<key>uniE632</key>
<string>uniE_632.glif</string>
<key>uniE633</key>
<string>uniE_633.glif</string>
<key>uniE634</key>
<string>uniE_634.glif</string>
<key>uniE635</key>
<string>uniE_635.glif</string>
<key>uniE636</key>
<string>uniE_636.glif</string>
<key>uniE637</key>
<string>uniE_637.glif</string>
<key>uniE638</key>
<string>uniE_638.glif</string>
<key>uniE639</key>
<string>uniE_639.glif</string>
<key>uniE63A</key>
<string>uniE_63A_.glif</string>
<key>uniE63B</key>
<string>uniE_63B_.glif</string>
<key>uniE63C</key>
<string>uniE_63C_.glif</string>
<key>uniE63D</key>
<string>uniE_63D_.glif</string>
<key>uniE63E</key>
<string>uniE_63E_.glif</string>
<key>uniE63F</key>
<string>uniE_63F_.glif</string>
<key>uniE640</key>
<string>uniE_640.glif</string>
<key>uniE641</key>
<string>uniE_641.glif</string>
<key>uniE642</key>
<string>uniE_642.glif</string>
<key>uniE643</key>
<string>uniE_643.glif</string>
<key>uniE644</key>
<string>uniE_644.glif</string>
<key>uniE645</key>
<string>uniE_645.glif</string>
<key>uniE646</key>
<string>uniE_646.glif</string>
<key>uniE647</key>
<string>uniE_647.glif</string>
<key>uniE648</key>
<string>uniE_648.glif</string>
<key>uniE649</key>
<string>uniE_649.glif</string>
<key>uniE64A</key>
<string>uniE_64A_.glif</string>
<key>uniE64B</key>
<string>uniE_64B_.glif</string>
<key>uniE64C</key>
<string>uniE_64C_.glif</string>
<key>uniE64D</key>
<string>uniE_64D_.glif</string>
<key>uniE64E</key>
<string>uniE_64E_.glif</string>
<key>uniE64F</key>
<string>uniE_64F_.glif</string>
<key>uniE650</key>
<string>uniE_650.glif</string>
<key>uniE651</key>
<string>uniE_651.glif</string>
<key>uniE652</key>
<string>uniE_652.glif</string>
<key>uniE653</key>
<string>uniE_653.glif</string>
<key>uniE654</key>
<string>uniE_654.glif</string>
<key>uniE655</key>
<string>uniE_655.glif</string>
<key>uniE656</key>
<string>uniE_656.glif</string>
<key>uniE657</key>
<string>uniE_657.glif</string>
<key>uniE658</key>
<string>uniE_658.glif</string>
<key>uniE659</key>
<string>uniE_659.glif</string>
<key>uniE65A</key>
<string>uniE_65A_.glif</string>
<key>uniE65B</key>
<string>uniE_65B_.glif</string>
<key>uniE65C</key>
<string>uniE_65C_.glif</string>
<key>uniE65D</key>
<string>uniE_65D_.glif</string>
<key>uniE65E</key>
<string>uniE_65E_.glif</string>
<key>uniE65F</key>
<string>uniE_65F_.glif</string>
<key>uniE660</key>
<string>uniE_660.glif</string>
<key>uniE661</key>
<string>uniE_661.glif</string>
<key>uniE662</key>
<string>uniE_662.glif</string>
<key>uniE663</key>
<string>uniE_663.glif</string>
<key>uniE664</key>
<string>uniE_664.glif</string>
<key>uniE665</key>
<string>uniE_665.glif</string>
<key>uniE666</key>
<string>uniE_666.glif</string>
<key>uniE667</key>
<string>uniE_667.glif</string>
<key>uniE668</key>
<string>uniE_668.glif</string>
<key>uniE669</key>
<string>uniE_669.glif</string>
<key>uniE66A</key>
<string>uniE_66A_.glif</string>
<key>uniE66B</key>
<string>uniE_66B_.glif</string>
<key>uniE66C</key>
<string>uniE_66C_.glif</string>
<key>uniE66D</key>
<string>uniE_66D_.glif</string>
<key>uniE66E</key>
<string>uniE_66E_.glif</string>
<key>uniE66F</key>
<string>uniE_66F_.glif</string>
<key>uniE670</key>
<string>uniE_670.glif</string>
<key>uniE671</key>
<string>uniE_671.glif</string>
<key>uniE672</key>
<string>uniE_672.glif</string>
<key>uniE673</key>
<string>uniE_673.glif</string>
<key>uniE674</key>
<string>uniE_674.glif</string>
<key>uniE675</key>
<string>uniE_675.glif</string>
<key>uniE676</key>
<string>uniE_676.glif</string>
<key>uniE677</key>
<string>uniE_677.glif</string>
<key>uniE678</key>
<string>uniE_678.glif</string>
<key>uniE679</key>
<string>uniE_679.glif</string>
<key>uniE67A</key>
<string>uniE_67A_.glif</string>
<key>uniE67B</key>
<string>uniE_67B_.glif</string>
<key>uniE67C</key>
<string>uniE_67C_.glif</string>
<key>uniE67D</key>
<string>uniE_67D_.glif</string>
<key>uniE67E</key>
<string>uniE_67E_.glif</string>
<key>uniE67F</key>
<string>uniE_67F_.glif</string>
<key>uniE680</key>
<string>uniE_680.glif</string>
<key>uniE681</key>
<string>uniE_681.glif</string>
<key>uniE682</key>
<string>uniE_682.glif</string>
<key>uniE683</key>
<string>uniE_683.glif</string>
<key>uniE684</key>
<string>uniE_684.glif</string>
<key>uniE685</key>
<string>uniE_685.glif</string>
<key>uniE686</key>
<string>uniE_686.glif</string>
<key>uniE687</key>
<string>uniE_687.glif</string>
<key>uniE688</key>
<string>uniE_688.glif</string>
<key>uniE689</key>
<string>uniE_689.glif</string>
<key>uniE68A</key>
<string>uniE_68A_.glif</string>
<key>uniE68B</key>
<string>uniE_68B_.glif</string>
<key>uniE68C</key>
<string>uniE_68C_.glif</string>
<key>uniE68D</key>
<string>uniE_68D_.glif</string>
<key>uniE68E</key>
<string>uniE_68E_.glif</string>
<key>uniE68F</key>
<string>uniE_68F_.glif</string>
<key>uniE690</key>
<string>uniE_690.glif</string>
<key>uniE691</key>
<string>uniE_691.glif</string>
<key>uniE692</key>
<string>uniE_692.glif</string>
<key>uniE693</key>
<string>uniE_693.glif</string>
<key>uniE694</key>
<string>uniE_694.glif</string>
<key>uniE695</key>
<string>uniE_695.glif</string>
<key>uniE696</key>
<string>uniE_696.glif</string>
<key>uniE697</key>
<string>uniE_697.glif</string>
<key>uniE698</key>
<string>uniE_698.glif</string>
<key>uniE699</key>
<string>uniE_699.glif</string>
<key>uniE69A</key>
<string>uniE_69A_.glif</string>
<key>uniE69B</key>
<string>uniE_69B_.glif</string>
<key>uniE69C</key>
<string>uniE_69C_.glif</string>
<key>uniE69D</key>
<string>uniE_69D_.glif</string>
<key>uniE69E</key>
<string>uniE_69E_.glif</string>
<key>uniE69F</key>
<string>uniE_69F_.glif</string>
<key>uniE6A0</key>
<string>uniE_6A_0.glif</string>
<key>uniE6A1</key>
<string>uniE_6A_1.glif</string>
<key>uniE6A2</key>
<string>uniE_6A_2.glif</string>
<key>uniE6A3</key>
<string>uniE_6A_3.glif</string>
<key>uniE6A4</key>
<string>uniE_6A_4.glif</string>
<key>uniE6A5</key>
<string>uniE_6A_5.glif</string>
<key>uniE6A6</key>
<string>uniE_6A_6.glif</string>
<key>uniE6A7</key>
<string>uniE_6A_7.glif</string>
<key>uniE6A8</key>
<string>uniE_6A_8.glif</string>
<key>uniE6A9</key>
<string>uniE_6A_9.glif</string>
<key>uniE6AA</key>
<string>uniE_6A_A_.glif</string>
<key>uniE6AB</key>
<string>uniE_6A_B_.glif</string>
<key>uniE6AC</key>
<string>uniE_6A_C_.glif</string>
<key>uniE6AD</key>
<string>uniE_6A_D_.glif</string>
<key>uniE6AE</key>
<string>uniE_6A_E_.glif</string>
<key>uniE6AF</key>
<string>uniE_6A_F_.glif</string>
<key>uniE6B0</key>
<string>uniE_6B_0.glif</string>
<key>uniE6B1</key>
<string>uniE_6B_1.glif</string>
<key>uniE6B2</key>
<string>uniE_6B_2.glif</string>
<key>uniE6B3</key>
<string>uniE_6B_3.glif</string>
<key>uniE6B4</key>
<string>uniE_6B_4.glif</string>
<key>uniE6B5</key>
<string>uniE_6B_5.glif</string>
<key>uniE6B6</key>
<string>uniE_6B_6.glif</string>
<key>uniE6B7</key>
<string>uniE_6B_7.glif</string>
<key>uniE6B8</key>
<string>uniE_6B_8.glif</string>
<key>uniE6B9</key>
<string>uniE_6B_9.glif</string>
<key>uniE6BA</key>
<string>uniE_6B_A_.glif</string>
<key>uniE6BB</key>
<string>uniE_6B_B_.glif</string>
<key>uniE6BC</key>
<string>uniE_6B_C_.glif</string>
<key>uniE6BD</key>
<string>uniE_6B_D_.glif</string>
<key>uniE6BE</key>
<string>uniE_6B_E_.glif</string>
<key>uniE6BF</key>
<string>uniE_6B_F_.glif</string>
<key>uniE6C0</key>
<string>uniE_6C_0.glif</string>
<key>uniE6C1</key>
<string>uniE_6C_1.glif</string>
<key>uniE6C2</key>
<string>uniE_6C_2.glif</string>
<key>uniE6C3</key>
<string>uniE_6C_3.glif</string>
<key>uniE6C4</key>
<string>uniE_6C_4.glif</string>
<key>uniE6C5</key>
<string>uniE_6C_5.glif</string>
</dict>
</plist>
|
cascadia-code/sources/nerdfonts/full/processed/devicons.ufo/glyphs/contents.plist/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/devicons.ufo/glyphs/contents.plist",
"repo_id": "cascadia-code",
"token_count": 6354
}
| 671 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="virus" format="2">
<advance width="1200"/>
<unicode hex="E214"/>
<note>
virus
</note>
<outline>
<contour>
<point x="1195" y="710"/>
<point x="1165" y="729" type="qcurve" smooth="yes"/>
<point x="1117" y="755" type="line"/>
<point x="1067" y="784" type="line" smooth="yes"/>
<point x="1036" y="805"/>
<point x="1029" y="834" type="qcurve" smooth="yes"/>
<point x="1014" y="879"/>
<point x="988" y="927" type="qcurve" smooth="yes"/>
<point x="974" y="960"/>
<point x="981" y="991" type="qcurve" smooth="yes"/>
<point x="995" y="1043" type="line"/>
<point x="1010" y="1098" type="line" smooth="yes"/>
<point x="1014" y="1110"/>
<point x="1000" y="1124"/>
<point x="986" y="1122" type="qcurve" smooth="yes"/>
<point x="931" y="1108" type="line"/>
<point x="879" y="1093" type="line" smooth="yes"/>
<point x="843" y="1084"/>
<point x="814" y="1101" type="qcurve" smooth="yes"/>
<point x="769" y="1124"/>
<point x="721" y="1139" type="qcurve" smooth="yes"/>
<point x="688" y="1146"/>
<point x="671" y="1177" type="qcurve" smooth="yes"/>
<point x="643" y="1227" type="line"/>
<point x="617" y="1275" type="line" smooth="yes"/>
<point x="600" y="1305"/>
<point x="581" y="1275" type="qcurve" smooth="yes"/>
<point x="552" y="1224" type="line"/>
<point x="524" y="1177" type="line"/>
<point x="509" y="1146"/>
<point x="476" y="1136" type="qcurve" smooth="yes"/>
<point x="426" y="1122"/>
<point x="386" y="1098" type="qcurve" smooth="yes"/>
<point x="357" y="1082"/>
<point x="321" y="1091" type="qcurve" smooth="yes"/>
<point x="267" y="1105" type="line"/>
<point x="212" y="1120" type="line" smooth="yes"/>
<point x="200" y="1124"/>
<point x="186" y="1110"/>
<point x="188" y="1096" type="qcurve" smooth="yes"/>
<point x="202" y="1041" type="line"/>
<point x="219" y="984" type="line" smooth="yes"/>
<point x="228" y="953"/>
<point x="209" y="922" type="qcurve"/>
<point x="209" y="920" type="line"/>
<point x="188" y="874"/>
<point x="174" y="831" type="qcurve" smooth="yes"/>
<point x="164" y="798"/>
<point x="136" y="784" type="qcurve" smooth="yes"/>
<point x="86" y="755" type="line"/>
<point x="35" y="727" type="line" smooth="yes"/>
<point x="5" y="710"/>
<point x="35" y="691" type="qcurve" smooth="yes"/>
<point x="86" y="662" type="line"/>
<point x="136" y="634" type="line" smooth="yes"/>
<point x="169" y="615"/>
<point x="176" y="584" type="qcurve"/>
<point x="212" y="498" type="line"/>
<point x="228" y="469"/>
<point x="219" y="434" type="qcurve" smooth="yes"/>
<point x="205" y="379" type="line"/>
<point x="190" y="322" type="line"/>
<point x="186" y="310"/>
<point x="200" y="296"/>
<point x="214" y="298" type="qcurve" smooth="yes"/>
<point x="269" y="312" type="line"/>
<point x="326" y="329" type="line" smooth="yes"/>
<point x="357" y="336"/>
<point x="390" y="322" type="qcurve"/>
<point x="390" y="319" type="line"/>
<point x="436" y="298"/>
<point x="479" y="284" type="qcurve" smooth="yes"/>
<point x="509" y="274"/>
<point x="529" y="246" type="qcurve" smooth="yes"/>
<point x="555" y="196" type="line"/>
<point x="583" y="145" type="line" smooth="yes"/>
<point x="600" y="115"/>
<point x="619" y="145" type="qcurve" smooth="yes"/>
<point x="648" y="196" type="line"/>
<point x="676" y="246" type="line" smooth="yes"/>
<point x="698" y="276"/>
<point x="726" y="284" type="qcurve" smooth="yes"/>
<point x="774" y="298"/>
<point x="814" y="322" type="qcurve" smooth="yes"/>
<point x="845" y="338"/>
<point x="879" y="329" type="qcurve" smooth="yes"/>
<point x="933" y="315" type="line"/>
<point x="988" y="300" type="line" smooth="yes"/>
<point x="1019" y="293"/>
<point x="1012" y="324" type="qcurve" smooth="yes"/>
<point x="998" y="379" type="line"/>
<point x="983" y="434" type="line" smooth="yes"/>
<point x="974" y="467"/>
<point x="991" y="496" type="qcurve"/>
<point x="991" y="498" type="line"/>
<point x="1012" y="536"/>
<point x="1029" y="589" type="qcurve" smooth="yes"/>
<point x="1036" y="619"/>
<point x="1069" y="639" type="qcurve" smooth="yes"/>
<point x="1117" y="667" type="line"/>
<point x="1165" y="693" type="line" smooth="yes"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:45:38 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/font-awesome-extension.ufo/glyphs/virus.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/font-awesome-extension.ufo/glyphs/virus.glif",
"repo_id": "cascadia-code",
"token_count": 2358
}
| 672 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="Arch Linux" format="2">
<advance width="1200"/>
<unicode hex="F303"/>
<note>
Arch Linux
</note>
<outline>
<contour>
<point x="473" y="1020"/>
<point x="437" y="946" type="qcurve"/>
<point x="505" y="873"/>
<point x="580" y="821" type="qcurve"/>
<point x="480" y="862"/>
<point x="421" y="912" type="qcurve"/>
<point x="315" y="687"/>
<point x="20" y="159" type="qcurve"/>
<point x="290" y="314"/>
<point x="455" y="345" type="qcurve"/>
<point x="448" y="384"/>
<point x="448" y="420" type="qcurve" smooth="yes"/>
<point x="448" y="427" type="line" smooth="yes"/>
<point x="450" y="511"/>
<point x="525" y="622"/>
<point x="629" y="615"/>
<point x="700" y="488"/>
<point x="697" y="404" type="qcurve" smooth="yes"/>
<point x="697" y="372"/>
<point x="691" y="345" type="qcurve"/>
<point x="856" y="314"/>
<point x="1121" y="159" type="qcurve"/>
<point x="1098" y="203"/>
<point x="1028" y="332" type="qcurve"/>
<point x="1021" y="338"/>
<point x="967" y="381"/>
<point x="935" y="404"/>
<point x="872" y="445"/>
<point x="838" y="463" type="qcurve"/>
<point x="931" y="438"/>
<point x="990" y="404" type="qcurve"/>
<point x="935" y="504"/>
<point x="847" y="674"/>
<point x="784" y="794"/>
<point x="722" y="916"/>
<point x="693" y="977"/>
<point x="645" y="1086"/>
<point x="632" y="1120"/>
<point x="580" y="1240"/>
<point x="571" y="1261" type="qcurve"/>
</contour>
<contour>
<point x="1126" y="257" type="line"/>
<point x="1130" y="257" type="line"/>
<point x="1130" y="289" type="line"/>
<point x="1141" y="289" type="line"/>
<point x="1141" y="291" type="line"/>
<point x="1117" y="291" type="line"/>
<point x="1117" y="289" type="line"/>
<point x="1126" y="289" type="line"/>
<point x="1126" y="257" type="line"/>
</contour>
<contour>
<point x="1148" y="257" type="line"/>
<point x="1153" y="257" type="line"/>
<point x="1153" y="286" type="line"/>
<point x="1162" y="257" type="line"/>
<point x="1166" y="257" type="line"/>
<point x="1175" y="286" type="line"/>
<point x="1175" y="257" type="line"/>
<point x="1180" y="257" type="line"/>
<point x="1180" y="291" type="line"/>
<point x="1173" y="291" type="line"/>
<point x="1166" y="268" type="line" smooth="yes"/>
<point x="1164" y="264"/>
<point x="1164" y="261" type="qcurve"/>
<point x="1164" y="264"/>
<point x="1162" y="268" type="qcurve" smooth="yes"/>
<point x="1155" y="291" type="line"/>
<point x="1148" y="291" type="line"/>
<point x="1148" y="257" type="line"/>
</contour>
</outline>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/font-logos.ufo/glyphs/A_rch L_inux.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/font-logos.ufo/glyphs/A_rch L_inux.glif",
"repo_id": "cascadia-code",
"token_count": 1456
}
| 673 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="clock-fill" format="2">
<advance width="1200"/>
<unicode hex="F4AB"/>
<note>
clock-fill
</note>
<outline>
<contour>
<point x="112" y="378"/>
<point x="268" y="222"/>
<point x="482" y="130"/>
<point x="718" y="130"/>
<point x="932" y="222"/>
<point x="1088" y="378"/>
<point x="1180" y="592"/>
<point x="1180" y="828"/>
<point x="1088" y="1042"/>
<point x="932" y="1198"/>
<point x="718" y="1290"/>
<point x="482" y="1290"/>
<point x="268" y="1198"/>
<point x="112" y="1042"/>
<point x="20" y="828"/>
<point x="20" y="592"/>
</contour>
<contour>
<point x="642" y="733" type="line"/>
<point x="785" y="675" type="line" smooth="yes"/>
<point x="802" y="668"/>
<point x="822" y="638"/>
<point x="822" y="620" type="qcurve" smooth="yes"/>
<point x="822" y="595"/>
<point x="787" y="560"/>
<point x="762" y="560" type="qcurve" smooth="yes"/>
<point x="751" y="560"/>
<point x="741" y="564" type="qcurve" smooth="yes"/>
<point x="560" y="636" type="line" smooth="yes"/>
<point x="543" y="643"/>
<point x="522" y="674"/>
<point x="522" y="692" type="qcurve" smooth="yes"/>
<point x="522" y="946" type="line" smooth="yes"/>
<point x="522" y="971"/>
<point x="557" y="1006"/>
<point x="607" y="1006"/>
<point x="642" y="971"/>
<point x="642" y="946" type="qcurve" smooth="yes"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:49:53 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/clock-fill.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/clock-fill.glif",
"repo_id": "cascadia-code",
"token_count": 870
}
| 674 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="device-mobile" format="2">
<advance width="1200"/>
<unicode hex="F42C"/>
<note>
device-mobile
</note>
<outline>
<contour>
<point x="158" y="1420"/>
<point x="68" y="1329"/>
<point x="68" y="1265" type="qcurve" smooth="yes"/>
<point x="68" y="155" type="line" smooth="yes"/>
<point x="68" y="91"/>
<point x="158" y="0"/>
<point x="223" y="0" type="qcurve" smooth="yes"/>
<point x="977" y="0" type="line" smooth="yes"/>
<point x="1042" y="0"/>
<point x="1133" y="91"/>
<point x="1133" y="155" type="qcurve" smooth="yes"/>
<point x="1133" y="1265" type="line" smooth="yes"/>
<point x="1133" y="1329"/>
<point x="1042" y="1420"/>
<point x="977" y="1420" type="qcurve" smooth="yes"/>
<point x="223" y="1420" type="line" smooth="yes"/>
</contour>
<contour>
<point x="201" y="1274"/>
<point x="214" y="1287"/>
<point x="223" y="1287" type="qcurve" smooth="yes"/>
<point x="977" y="1287" type="line" smooth="yes"/>
<point x="986" y="1287"/>
<point x="999" y="1274"/>
<point x="999" y="1265" type="qcurve" smooth="yes"/>
<point x="999" y="155" type="line" smooth="yes"/>
<point x="999" y="146"/>
<point x="986" y="133"/>
<point x="977" y="133" type="qcurve" smooth="yes"/>
<point x="223" y="133" type="line" smooth="yes"/>
<point x="214" y="133"/>
<point x="201" y="146"/>
<point x="201" y="155" type="qcurve" smooth="yes"/>
<point x="201" y="1265" type="line" smooth="yes"/>
</contour>
<contour>
<point x="689" y="318"/>
<point x="689" y="392"/>
<point x="637" y="444"/>
<point x="563" y="444"/>
<point x="511" y="392"/>
<point x="511" y="318"/>
<point x="563" y="266"/>
<point x="637" y="266"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:49:52 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/device-mobile.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/device-mobile.glif",
"repo_id": "cascadia-code",
"token_count": 1016
}
| 675 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="move-to-end" format="2">
<advance width="1200"/>
<unicode hex="F4F0"/>
<note>
move-to-end
</note>
<outline>
<contour>
<point x="928" y="678"/>
<point x="928" y="742"/>
<point x="906" y="764" type="qcurve" smooth="yes"/>
<point x="528" y="1142" type="line" smooth="yes"/>
<point x="505" y="1164"/>
<point x="442" y="1164"/>
<point x="398" y="1120"/>
<point x="398" y="1057"/>
<point x="420" y="1035" type="qcurve" smooth="yes"/>
<point x="669" y="786" type="line"/>
<point x="-5" y="786" type="line" smooth="yes"/>
<point x="-37" y="786"/>
<point x="-81" y="742"/>
<point x="-81" y="678"/>
<point x="-37" y="634"/>
<point x="-5" y="634" type="qcurve" smooth="yes"/>
<point x="669" y="634" type="line"/>
<point x="607" y="572" type="line" smooth="yes"/>
<point x="545" y="510"/>
<point x="420" y="385"/>
<point x="420" y="385" type="qcurve"/>
<point x="398" y="363"/>
<point x="398" y="300"/>
<point x="442" y="256"/>
<point x="505" y="256"/>
<point x="528" y="278" type="qcurve" smooth="yes"/>
<point x="906" y="656" type="line" smooth="yes"/>
</contour>
<contour>
<point x="1130" y="250"/>
<point x="1174" y="206"/>
<point x="1237" y="206"/>
<point x="1281" y="250"/>
<point x="1281" y="281" type="qcurve" smooth="yes"/>
<point x="1281" y="1139" type="line" smooth="yes"/>
<point x="1281" y="1170"/>
<point x="1237" y="1214"/>
<point x="1174" y="1214"/>
<point x="1130" y="1170"/>
<point x="1130" y="1139" type="qcurve" smooth="yes"/>
<point x="1130" y="281" type="line" smooth="yes"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:49:53 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/move-to-end.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/octicons.ufo/glyphs/move-to-end.glif",
"repo_id": "cascadia-code",
"token_count": 979
}
| 676 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="i_seti_ignored" format="2">
<advance width="1200"/>
<unicode hex="E668"/>
<note>
i_seti_ignored
</note>
<outline>
<contour>
<point x="1037" y="1072" type="line"/>
<point x="1044" y="1078" type="line"/>
<point x="1050" y="1091"/>
<point x="1056" y="1104"/>
<point x="1056" y="1117" type="curve"/>
<point x="1050" y="1142"/>
<point x="1037" y="1155"/>
<point x="1018" y="1161" type="curve" smooth="yes"/>
<point x="999" y="1168"/>
<point x="979" y="1161"/>
<point x="960" y="1149" type="curve"/>
<point x="832" y="1020" type="line" smooth="yes"/>
<point x="825" y="1014"/>
<point x="819" y="1014"/>
<point x="806" y="1014" type="curve"/>
<point x="658" y="1072"/>
<point x="517" y="1072"/>
<point x="369" y="1007" type="curve" smooth="yes"/>
<point x="222" y="943"/>
<point x="112" y="834"/>
<point x="29" y="706" type="curve"/>
<point x="16" y="680"/>
<point x="16" y="661"/>
<point x="35" y="641" type="curve"/>
<point x="67" y="603"/>
<point x="106" y="564"/>
<point x="145" y="532" type="curve" smooth="yes"/>
<point x="183" y="500"/>
<point x="222" y="474"/>
<point x="260" y="449" type="curve"/>
<point x="254" y="449"/>
<point x="254" y="442"/>
<point x="247" y="442" type="curve"/>
<point x="215" y="410"/>
<point x="183" y="384"/>
<point x="157" y="352" type="curve"/>
<point x="145" y="339"/>
<point x="138" y="320"/>
<point x="145" y="295" type="curve" smooth="yes"/>
<point x="151" y="275"/>
<point x="164" y="262"/>
<point x="189" y="256" type="curve"/>
<point x="209" y="256"/>
<point x="228" y="262"/>
<point x="241" y="275" type="curve" smooth="yes"/>
</contour>
<contour>
<point x="395" y="584" type="curve"/>
<point x="350" y="539" type="line"/>
<point x="337" y="539" type="line"/>
<point x="267" y="577"/>
<point x="202" y="622"/>
<point x="145" y="686" type="curve"/>
<point x="209" y="770"/>
<point x="286" y="847"/>
<point x="382" y="898" type="curve"/>
<point x="318" y="789"/>
<point x="324" y="686"/>
</contour>
<contour>
<point x="639" y="892" type="curve" smooth="yes"/>
<point x="633" y="872"/>
<point x="620" y="860"/>
<point x="594" y="860" type="curve" smooth="yes"/>
<point x="536" y="860"/>
<point x="491" y="821"/>
<point x="485" y="763" type="curve"/>
<point x="485" y="744" type="line" smooth="yes"/>
<point x="485" y="718"/>
<point x="466" y="706"/>
<point x="446" y="706" type="curve" smooth="yes"/>
<point x="421" y="706"/>
<point x="408" y="725"/>
<point x="408" y="750" type="curve" smooth="yes"/>
<point x="408" y="853"/>
<point x="491" y="943"/>
<point x="594" y="943" type="curve" smooth="yes"/>
<point x="626" y="943"/>
<point x="645" y="917"/>
</contour>
<contour>
<point x="1166" y="712" type="curve" smooth="yes"/>
<point x="1133" y="750"/>
<point x="1101" y="795"/>
<point x="1069" y="834" type="curve" smooth="yes"/>
<point x="1044" y="866"/>
<point x="1005" y="892"/>
<point x="979" y="924" type="curve"/>
<point x="902" y="847" type="line"/>
<point x="960" y="802"/>
<point x="1011" y="751"/>
<point x="1056" y="686" type="curve"/>
<point x="1050" y="680"/>
<point x="1044" y="673"/>
<point x="1037" y="673" type="curve"/>
<point x="1018" y="654"/>
<point x="999" y="635"/>
<point x="979" y="622" type="curve"/>
<point x="851" y="519"/>
<point x="710" y="468"/>
<point x="549" y="487" type="curve"/>
<point x="536" y="487"/>
<point x="530" y="487"/>
<point x="523" y="481" type="curve" smooth="yes"/>
<point x="511" y="468"/>
<point x="498" y="455"/>
<point x="485" y="436" type="curve"/>
<point x="446" y="397" type="line"/>
<point x="453" y="397" type="line"/>
<point x="543" y="378"/>
<point x="639" y="372"/>
<point x="729" y="391" type="curve" smooth="yes"/>
<point x="909" y="423"/>
<point x="1050" y="519"/>
<point x="1166" y="654" type="curve"/>
<point x="1185" y="667"/>
<point x="1185" y="686"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:42:08 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/glyphs/i_seti_ignored.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/glyphs/i_seti_ignored.glif",
"repo_id": "cascadia-code",
"token_count": 2348
}
| 677 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="i_seti_reasonml" format="2">
<advance width="1200"/>
<unicode hex="E687"/>
<note>
i_seti_reasonml
</note>
<outline>
<contour>
<point x="1180" y="1290" type="line"/>
<point x="20" y="1290" type="line"/>
<point x="20" y="130" type="line"/>
<point x="1180" y="130" type="line"/>
</contour>
<contour>
<point x="426" y="447" type="line"/>
<point x="510" y="447" type="line" smooth="yes"/>
<point x="557" y="447"/>
<point x="585" y="472"/>
<point x="585" y="517" type="curve" smooth="yes"/>
<point x="585" y="560"/>
<point x="557" y="584"/>
<point x="510" y="584" type="curve" smooth="yes"/>
<point x="426" y="584" type="line"/>
</contour>
<contour>
<point x="769" y="232" type="line"/>
<point x="769" y="676" type="line"/>
<point x="1119" y="676" type="line"/>
<point x="1119" y="584" type="line"/>
<point x="885" y="584" type="line"/>
<point x="885" y="501" type="line"/>
<point x="1097" y="501" type="line"/>
<point x="1097" y="409" type="line"/>
<point x="885" y="408" type="line"/>
<point x="885" y="325" type="line"/>
<point x="1126" y="325" type="line"/>
<point x="1126" y="232" type="line"/>
</contour>
<contour>
<point x="616" y="377" type="curve"/>
<point x="708" y="232" type="line"/>
<point x="577" y="232" type="line"/>
<point x="512" y="355" type="line"/>
<point x="426" y="355" type="line"/>
<point x="426" y="232" type="line"/>
<point x="310" y="232" type="line"/>
<point x="310" y="676" type="line"/>
<point x="510" y="676" type="line" smooth="yes"/>
<point x="628" y="676"/>
<point x="695" y="619"/>
<point x="695" y="520" type="curve" smooth="yes"/>
<point x="695" y="453"/>
<point x="667" y="404"/>
</contour>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.lastChange</key>
<string>2024-02-27 18:42:08 +0000</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/glyphs/i_seti_reasonml.glif/0
|
{
"file_path": "cascadia-code/sources/nerdfonts/full/processed/original-source.ufo/glyphs/i_seti_reasonml.glif",
"repo_id": "cascadia-code",
"token_count": 1012
}
| 678 |
# Documentation
This documentation relies on `sphinx` and `autoapi` to generate the documentation from the source code.
It can be built using the following command:
```console
make html
```
This generates the documentation in the `build/html` directory.
# Helpful Resources
- https://docs.readthedocs.io/en/stable/guides/jupyter.html
- https://coderefinery.github.io/documentation/gh_workflow/
- https://www.sphinx-doc.org/en/master/tutorial/deploying.html#publishing-sources
- https://coderefinery.github.io/documentation/sphinx/
- https://github.com/brechtm/rinohtype/blob/master/.github/workflows/tests.yml (for examples of a versioned documentation)
- https://stackoverflow.com/questions/72089650/how-to-host-multiple-version-of-a-sphinx-based-documentation-on-github
|
causica/docs/README.md/0
|
{
"file_path": "causica/docs/README.md",
"repo_id": "causica",
"token_count": 251
}
| 679 |
"""This scripts is for Lorenz experiments."""
import argparse
import os
import pytorch_lightning as pl
import torch
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint, TQDMProgressBar
from pytorch_lightning.loggers import MLFlowLogger
from scotch.latent_learning.scotch_data_module import SCOTCHDataModule
from scotch.latent_learning.scotch_module import SCOTCHModule
from tensordict import TensorDict
from causica.datasets.causica_dataset_format import Variable, VariablesMetadata
from causica.datasets.variable_types import VariableTypeEnum
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run Lorenz experiments.")
parser.add_argument("-e", "--epoch", type=int, help="max number of epochs", default=5000)
parser.add_argument("-l", "--lr", type=float, help="learning rate", default=3e-3)
parser.add_argument("-s", "--sparsity", type=float, help="sparsity penalty", default=500)
parser.add_argument("-t", "--dt", type=float, help="dt", default=1)
parser.add_argument("-nor", "--normalize", action="store_true", help="whether to normalize")
parser.add_argument("-sd", "--seed", type=int, help="random seed", required=True)
parser.add_argument("-en", "--experiment_name", type=str, help="experiment name", required=True)
parser.add_argument("-res", "--res_connection", action="store_true", help="whether to use res_connection")
parser.add_argument("-ln", "--layer_norm", action="store_true", help="whether to use layer_norm")
parser.add_argument("-warm", "--lr_warmup", type=int, default=100, help="warmup epochs")
parser.add_argument("-deci", "--deci_diffusion", action="store_true", help="whether to use deci diffusion function")
parser.add_argument(
"-sig",
"--sigmoid_output",
action="store_true",
help="whether to use sigmoid output for deci diffusion function",
)
# ADDED
parser.add_argument("-p", "--missing_prob", type=float, help="missing probability", default=0.3)
# Specific to lorenz
parser.add_argument("-d", "--dimension", type=int, help="dimension of lorenz dataset", default=10)
parser.add_argument(
"-Nt", "--num_time_points", type=int, help="number of time points, must be 100, 200, 500, or 1000", default=100
)
parser.add_argument("-tmax", "--t_max", type=float, help="max time", default=100)
parser.add_argument(
"-Ns", "--train_size", type=int, help="number of training time series, must be <= 100", default=10
)
args = parser.parse_args()
seed_everything(args.seed)
# HParams
experiment_name = args.experiment_name
max_epochs = args.epoch
default_lr = args.lr
res_connection = args.res_connection
layer_norm = args.layer_norm
deci_diffusion = args.deci_diffusion
sigmoid_output = args.sigmoid_output
missing_prob = args.missing_prob
num_time_points = args.num_time_points
t_max = args.t_max
train_size = args.train_size
lrs = {
"graph": default_lr, # changed from 1e-2
"qz0_mean_net": default_lr,
"qz0_logstd_net": default_lr,
"pz0_mean": default_lr,
"pz0_logstd": default_lr,
"prior_drift_fn": default_lr,
"diffusion_fn": default_lr,
"posterior_drift_fn": default_lr,
"trajectory_encoder": default_lr,
}
prior_sparsity_lambda = args.sparsity
val_size = -1
dt = args.dt # sde solver dt = observation interval
normalize = args.normalize
lr_warmup_iters = args.lr_warmup
layer_norm = False
hparams = {
"seed": args.seed,
"epoch": args.epoch,
"dt": args.dt,
"default_lr": default_lr,
"train_size": train_size,
"val_size": val_size,
"prior_sparsity_lambda": prior_sparsity_lambda,
"t_max": t_max,
"num_time_points": num_time_points,
"normalize": normalize,
"lr_warmup_iters": lr_warmup_iters,
"res_connection": res_connection,
"layer_rnorm": layer_norm,
"deci_diffusion": deci_diffusion,
"sigmoid_output": sigmoid_output,
"missing_prob": missing_prob,
"dimension": args.dimension,
}
# lorenz
state_size = args.dimension
variables_metadata = VariablesMetadata(
[Variable(name=f"x{i}", type=VariableTypeEnum.CONTINUOUS, group_name=f"x{i}") for i in range(state_size)]
)
subf = "norm" if args.normalize else "unnorm"
ts = torch.load(
f"data/lorenz96_processed/{args.dimension}/{subf}/times_{args.num_time_points}_{str(args.missing_prob)}_{args.seed}.pt"
)
training_data = torch.load(
f"data/lorenz96_processed/{args.dimension}/{subf}/data_{args.num_time_points}_{str(args.missing_prob)}_{args.seed}.pt"
)
true_graph = torch.load(f"data/lorenz96_processed/{args.dimension}/{subf}/true_graph.pt").to("cuda")
training_data = training_data[: args.train_size, :, :] # reduce training data.
training_data = TensorDict(
{f"x{i}": training_data[:, :, i].unsqueeze(dim=2) for i in range(state_size)},
batch_size=[train_size],
)
validation_data = training_data
scotch_data = SCOTCHDataModule(
ts=ts,
training_data=training_data,
validation_data=validation_data,
true_graph=true_graph,
variables_metadata=variables_metadata,
batch_size=1024,
)
# SCOTCH Module
scotch = SCOTCHModule(
learning_rates=lrs,
prior_sparsity_lambda=prior_sparsity_lambda,
dt=dt,
layer_norm=layer_norm,
res_connections=res_connection,
deci_diffusion=True,
add_diffusion_self_connections=True,
sigmoid_output=sigmoid_output,
)
mlf_logger = MLFlowLogger(
experiment_name=experiment_name,
tracking_uri="file:./mlflow_logs/mlruns",
)
mlf_logger.log_hyperparams(hparams)
trainer = pl.Trainer(
accelerator="auto",
max_epochs=max_epochs,
fast_dev_run=False,
callbacks=[
TQDMProgressBar(refresh_rate=19),
ModelCheckpoint(every_n_epochs=50),
],
check_val_every_n_epoch=50,
logger=mlf_logger,
)
trainer.fit(scotch, datamodule=scotch_data)
|
causica/research_experiments/scotch/src/scotch/experiments/lorenz.py/0
|
{
"file_path": "causica/research_experiments/scotch/src/scotch/experiments/lorenz.py",
"repo_id": "causica",
"token_count": 2654
}
| 680 |
seed_everything: 234
model:
class_path: causica.lightning.modules.deci_module.DECIModule
init_args:
noise_dist: "GAUSSIAN"
embedding_size: 32
out_dim_g: 32
num_layers_g: 2
num_layers_zeta: 2
init_alpha: 0.0
init_rho: 1.0
prior_sparsity_lambda: 5.0
gumbel_temp: 0.25
auglag_config:
class_path: causica.training.auglag.AugLagLRConfig
init_args:
lr_update_lag: 500
lr_update_lag_best: 250
lr_init_dict:
vardist: 0.01
functional_relationships: 0.01
noise_dist: 5e-3
aggregation_period: 20
lr_factor: 0.1
penalty_progress_rate: 0.65
safety_rho: 1e13
safety_alpha: 1e13
max_lr_down: 3
inner_early_stopping_patience: 1500
max_outer_steps: 100
patience_penalty_reached: 5
patience_max_rho: 3
penalty_tolerance: 1e-5
max_inner_steps: 6000
trainer:
max_epochs: 2000
best_checkpoint_callback:
dirpath: "./outputs"
filename: "best_model"
save_top_k: 1
mode: "max"
monitor: "batch_log_prob"
every_n_epochs: 1
last_checkpoint_callback:
save_last: true
filename: "last_model"
save_top_k: 0 # only the last checkpoint is saved
|
causica/src/causica/config/lightning/default_gaussian.yaml/0
|
{
"file_path": "causica/src/causica/config/lightning/default_gaussian.yaml",
"repo_id": "causica",
"token_count": 604
}
| 681 |
"""Module that provides data normalization functionality."""
from typing import Any, Callable, Optional
import torch
import torch.distributions as td
from tensordict import TensorDictBase
from torch import nn
from torch.distributions import constraints
from causica.distributions.transforms import JointTransformModule, SequentialTransformModule, TransformModule
Normalizer = TransformModule[TensorDictBase, TensorDictBase]
FitNormalizerType = Callable[[TensorDictBase], Normalizer]
class LoadNoneTensorMixin(nn.Module):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._register_load_state_dict_pre_hook(self._update_tensor_size_on_load)
def _update_tensor_size_on_load(self, state_dict: dict[str, torch.Tensor], prefix: str, *args, **kwargs) -> None:
_ = args, kwargs
for key, value in state_dict.items():
local_key = key.removeprefix(prefix)
if hasattr(self, local_key) and getattr(self, local_key) is None:
setattr(self, local_key, torch.empty_like(value))
class LogTransform(TransformModule[torch.Tensor, torch.Tensor], td.Transform, LoadNoneTensorMixin):
"""
A transform to apply the log function to a single tensor plus an offset.
"""
bijective = True
domain = constraints.greater_than_eq(0)
codomain = constraints.real
arg_constraints = {"offset": constraints.greater_than_eq(0)}
def __init__(self, offset: Optional[torch.Tensor]) -> None:
"""
Args:
offset: the offset added to the single tensor
"""
super().__init__()
self.offset: torch.Tensor
self.register_buffer("offset", offset)
def _call(self, x: torch.Tensor) -> torch.Tensor:
return torch.log(x + self.offset)
def _inverse(self, y: torch.Tensor) -> torch.Tensor:
return torch.exp(y) - self.offset
def log_abs_det_jacobian(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return torch.log(torch.abs(1.0 / (x + self.offset)))
class Standardizer(TransformModule[torch.Tensor, torch.Tensor], td.AffineTransform, LoadNoneTensorMixin):
"""Standardizer module for a single variable, ie a single tensor."""
def __init__(self, mean: Optional[torch.Tensor], std: Optional[torch.Tensor], *args, **kwargs) -> None:
"""
Args:
mean: Mean of the variable
std: Standard deviation of the variable
*args, **kwargs: Passed to the AffineTransform
"""
loc = scale = None
if mean is not None and std is not None:
loc = -mean / std
scale = 1 / std
super().__init__(loc, scale, *args, **kwargs)
del self.loc, self.scale # Unset these temporarily to allow registering as buffers
self.loc: torch.Tensor
self.scale: torch.Tensor
self.register_buffer("loc", loc)
self.register_buffer("scale", scale)
def fit_log_normalizer(
data: TensorDictBase, default_offset: float = 1.0, min_margin: float = 0.0, keys: Optional[list[str]] = None
) -> Normalizer:
"""Fits a log standardizer to the tensordict.
The fitted log normalizer computes:
log(x - min(data) * (min(data) < 0) + min + (max(data) - min(data)) * min_margin + offset).
Args:
data: The data to fit the standardizer to.
default_offset: An additional offset to use. The offset is the min value + default_offset. Must be positive.
min_margin: Adds a fraction of the range of data to the minimum offset to avoid log(x<=0) on unseen data.
keys: Limit the set of keys to log transform if set.
Returns:
The log standardizer.
"""
assert default_offset > 0, "default_offset must be positive"
# For min_value >= 0, offset = default_offset; min_value<0 offset=abs(min_value)+default_offset
data = data.select(*keys) if keys else data
min_values = data.apply(lambda x: torch.min(x, dim=0, keepdim=False).values, batch_size=torch.Size())
max_values = data.apply(lambda x: torch.max(x, dim=0, keepdim=False).values, batch_size=torch.Size())
offsets = min_values.apply(
lambda min_, max_: torch.where(
min_ >= 0, default_offset * torch.ones_like(min_), torch.abs(min_) + default_offset
)
+ (max_ - min_) * min_margin,
max_values,
)
return JointTransformModule({key: LogTransform(offset) for key, offset in offsets.items()})
def fit_standardizer(data: TensorDictBase, keys: Optional[list[str]] = None) -> Normalizer:
"""Return a standardizer that updates data to zero mean and unit standard deviation."""
data = data.select(*keys) if keys else data
means = data.apply(
lambda x: torch.mean(x, dim=0, keepdim=False),
batch_size=torch.Size(),
)
# Filter out std == 0
stds = data.apply(
lambda x: torch.std(x, dim=0, keepdim=False),
batch_size=torch.Size(),
).apply(lambda x: torch.where(x == 0, torch.ones_like(x), x))
return JointTransformModule({key: Standardizer(means.get(key), stds.get(key)) for key in means.keys()})
def chain_normalizers(*fit_functions: FitNormalizerType) -> FitNormalizerType:
"""Chain a number of normalizers together.
Args:
*fit_functions: Functions that produce normalizers.
Returns:
A function that fits the sequence of normalizers.
"""
def sequential_fitting(X: TensorDictBase) -> Normalizer:
transform_modules = []
for fit_function in fit_functions:
transform_module = fit_function(X)
X = transform_module(X)
transform_modules.append(transform_module)
return SequentialTransformModule[TensorDictBase, TensorDictBase](*transform_modules)
return sequential_fitting
def infer_compatible_log_normalizer_from_checkpoint(state_dict: dict[str, Any]) -> Normalizer:
"""Infers a normalizer compatible with a model checkpoint.
Assumes that `normalizer` is stored in the toplevel of the checkpoint and that it is a `SequentialTransformModule`,
and may contain a `LogTransform` and a `Standardizer`. If both are set they are always in that order.
Args:
state_dict: The state dict of the checkpoint to load.
Returns:
An unitiliazed normalizer.
"""
# Infer normalizers per variable from the checkpoint
normalizers_dicts: dict[int, dict[str, TransformModule[torch.Tensor, torch.Tensor]]] = {0: {}, 1: {}}
for key in state_dict.keys():
if key.startswith("normalizer."):
*_, variable_name, state_name = key.split(".")
if state_name == "offset":
normalizers_dicts[0][variable_name] = LogTransform(None)
elif state_name in {"loc", "scale"} and variable_name not in normalizers_dicts[1]:
normalizers_dicts[1][variable_name] = Standardizer(None, None)
# Construct the full empty normalizer ready to be initialized from the checkpoint
joint_transforms = [JointTransformModule(normalizer) for normalizer in normalizers_dicts.values() if normalizer]
if not joint_transforms:
return JointTransformModule({}) # No normalizer, return a passhtrough normalizer
return SequentialTransformModule[TensorDictBase, TensorDictBase](*joint_transforms)
|
causica/src/causica/datasets/normalization.py/0
|
{
"file_path": "causica/src/causica/datasets/normalization.py",
"repo_id": "causica",
"token_count": 2748
}
| 682 |
from causica.distributions.noise.bernoulli import BernoulliNoise, BernoulliNoiseModule
from causica.distributions.noise.categorical import CategoricalNoise, CategoricalNoiseModule
from causica.distributions.noise.joint import ContinuousNoiseDist, JointNoise, JointNoiseModule, create_noise_modules
from causica.distributions.noise.noise import IndependentNoise, Noise, NoiseModule
from causica.distributions.noise.spline import SplineNoise, SplineNoiseModule, create_spline_dist_params
from causica.distributions.noise.univariate_cauchy import UnivariateCauchyNoise, UnivariateCauchyNoiseModule
from causica.distributions.noise.univariate_laplace import UnivariateLaplaceNoise, UnivariateLaplaceNoiseModule
from causica.distributions.noise.univariate_normal import UnivariateNormalNoise, UnivariateNormalNoiseModule
|
causica/src/causica/distributions/noise/__init__.py/0
|
{
"file_path": "causica/src/causica/distributions/noise/__init__.py",
"repo_id": "causica",
"token_count": 244
}
| 683 |
from causica.functional_relationships.deci_functional_relationships import DECIEmbedFunctionalRelationships
from causica.functional_relationships.do_functional_relationships import (
DoFunctionalRelationships,
create_do_functional_relationship,
)
from causica.functional_relationships.functional_relationships import FunctionalRelationships
from causica.functional_relationships.linear_functional_relationships import LinearFunctionalRelationships
from causica.functional_relationships.rff_functional_relationships import RFFFunctionalRelationships
|
causica/src/causica/functional_relationships/__init__.py/0
|
{
"file_path": "causica/src/causica/functional_relationships/__init__.py",
"repo_id": "causica",
"token_count": 133
}
| 684 |
"""Lightning Classes for loading data in the default format used in Azure Blob Storage."""
import functools
import os
from collections import defaultdict
from functools import partial
from typing import Any, Iterable, Optional, Union
import torch
from tensordict import TensorDict, TensorDictBase
from torch.utils.data import DataLoader
from causica.datasets.causica_dataset_format import CAUSICA_DATASETS_PATH, DataEnum, VariablesMetadata, load_data
from causica.datasets.interventional_data import CounterfactualData, InterventionData
from causica.datasets.normalization import (
FitNormalizerType,
Normalizer,
chain_normalizers,
fit_log_normalizer,
fit_standardizer,
)
from causica.datasets.tensordict_utils import identity, tensordict_shapes
from causica.datasets.variable_types import VariableTypeEnum
from causica.distributions.transforms import JointTransformModule
from causica.lightning.data_modules.deci_data_module import DECIDataModule
class VariableSpecDataModule(DECIDataModule):
"""
Loads training and test data from fully specified paths for `variables.json` formatted data.
This format assumes the `variables.json` specified all metadata, and that the corresponding CSV files do not have
any header rows.
Note:
Uses `fsspec` to load the data in the paths. To load from a cloud storage location, make sure the relevant
`fsspec` plugin is available and provide its corresponding scheme. Provide **storage_options for authentication.
E.g., to load from an Azure storage account, install `adlfs` and use `az://container@storage_account/path`.
"""
def __init__(
self,
root_path: str,
batch_size: int = 128,
dataset_name: str = "anonymous_dataset",
standardize: Union[bool, Iterable[str]] = False,
log_normalize: Union[bool, Iterable[str]] = False,
exclude_standardization: Iterable[str] = tuple(),
exclude_log_normalization: Iterable[str] = tuple(),
default_offset: float = 1.0,
log_normalize_min_margin: float = 0.0,
load_counterfactual: bool = False,
load_interventional: bool = False,
load_validation: bool = False,
**storage_options: Any,
):
"""
Args:
root_path: Path to directory with causal data
batch_size: Batch size for training and test data.
dataset_name: A name for the dataset
standardize: Whether to standardize the data or not. It is applied to all continuous variables if True, or applied to those
variables specifed in normalize except those specified in
`exclude_normalization`. The standardizer is column-wise: (x_i-mean_i)/std_i for ith column.
If both standardize and log_normalize are True, log_normalize will be applied first.
log_normalize: Whether to log normalize the data. If True, it will log normalize all continuous variables.
Or it will be applied to those variables specified in log_normalize except those specified in `exclude_log_normalization`.
The operation is
log(x_i - min_i * (min_i < 0) + min_i + (max_i - min_i) * min_margin + offset). Also see the reference
in datasets.normalization.LogTransform. If both standardize and log_normalize are True,
log_normalize will be applied first.
exclude_standardization: Which variables to exclude from standardization
exclude_log_normalization: Which variables to exclude from log normalization
default_offset: Default offset for log normalization.
log_normalize_min_margin: Minimum margin for log normalization.
load_counterfactual: Whether counterfactual data should be loaded
load_interventional: Whether interventional data should be loaded
load_validation: Whether to load the validation dataset
**storage_options: Storage options forwarded to `fsspec` when loading files.
"""
super().__init__()
self.batch_size = batch_size
self._dataset_name = dataset_name
self.root_path = root_path
self.batch_size = batch_size
self.storage_options = storage_options
self.standardize = standardize
self.log_normalize = log_normalize
self.exclude_standardization = set(exclude_standardization)
self.exclude_log_normalization = set(exclude_log_normalization)
self.load_counterfactual = load_counterfactual
self.load_interventional = load_interventional
self.load_validation = load_validation
self.default_offset = default_offset
self.log_normalize_min_margin = log_normalize_min_margin
self.use_normalizer = standardize or log_normalize
self.normalizer: Optional[Normalizer] = None
self._dataset_train: TensorDictBase
self._dataset_test: TensorDictBase
self._dataset_valid: TensorDictBase
self.true_adj: torch.Tensor
self.save_hyperparameters()
@property
def variable_shapes(self) -> dict[str, torch.Size]:
return _check_exists(self, "_variable_shapes")
@property
def variable_types(self) -> dict[str, VariableTypeEnum]:
return _check_exists(self, "_variable_types")
@property
def column_names(self) -> dict[str, list[str]]:
return _check_exists(self, "_column_names")
@property
def dataset_train(self) -> TensorDict:
return _check_exists(self, "_dataset_train")
@property
def dataset_test(self) -> TensorDict:
return _check_exists(self, "_dataset_test")
@property
def dataset_valid(self) -> TensorDict:
return _check_exists(self, "_dataset_valid")
@property
def dataset_name(self) -> str:
return self._dataset_name
def create_normalizer(self, normalization_variables: set[str]) -> FitNormalizerType:
"""Return a fitting method for a sequence of normalizers.
This function is used to return a fitting method for a sequence of normalizers (e.g. log_normalize and standardize).
The variables for each normalizer is normalization_variables - exclude_normalizer, where normalization_variables is
a large set of variables (e.g. all continuous variables) and exclude_normalizer is a set of variables that should be excluded specific
to this normalizer (e.g. exclude_log_normalization and exclude_standardization).
Args:
normalization_variables: A larger set of variables to be normalized. It should at least contain the union of variables from all normalizers.
Returns:
Return a fitting method for a sequence of normalizers.
"""
preprocessing: list[FitNormalizerType] = []
# Setup different separate normalizers
if self.log_normalize:
log_normalize_keys = normalization_variables - self.exclude_log_normalization
if isinstance(self.log_normalize, Iterable):
log_normalize_keys = set(self.log_normalize) - self.exclude_log_normalization
fit_log_normalizer_with_key = functools.partial(
fit_log_normalizer,
default_offset=self.default_offset,
min_margin=self.log_normalize_min_margin,
keys=log_normalize_keys,
)
preprocessing.append(fit_log_normalizer_with_key)
if self.standardize:
standardize_keys = normalization_variables - self.exclude_standardization
if isinstance(self.standardize, Iterable):
standardize_keys = set(self.standardize) - self.exclude_standardization
fit_standardizer_with_key = functools.partial(
fit_standardizer,
keys=standardize_keys,
)
preprocessing.append(fit_standardizer_with_key)
return chain_normalizers(*preprocessing)
def _load_all_data(self, variables_metadata: VariablesMetadata):
_load_data = partial(
load_data, root_path=self.root_path, variables_metadata=variables_metadata, **self.storage_options
)
dataset_train = _load_data(data_enum=DataEnum.TRAIN)
dataset_test = _load_data(data_enum=DataEnum.TEST)
if self.load_validation:
dataset_valid = _load_data(data_enum=DataEnum.VALIDATION)
assert isinstance(dataset_valid, TensorDict)
self._dataset_valid = dataset_valid
true_adj = _load_data(data_enum=DataEnum.TRUE_ADJACENCY)
assert isinstance(dataset_train, TensorDict)
assert isinstance(dataset_test, TensorDict)
assert isinstance(true_adj, torch.Tensor)
self._dataset_train = dataset_train
self._dataset_test = dataset_test
self.true_adj = true_adj
self.interventions = []
if self.load_interventional:
self.interventions = _load_data(data_enum=DataEnum.INTERVENTIONS)
self.counterfactuals = []
if self.load_counterfactual:
self.counterfactuals = _load_data(data_enum=DataEnum.COUNTERFACTUALS)
def prepare_data(self):
# WARNING: Do not remove partial here. For some reason, if `load_data` is called directly from inside this data
# module without being wrapped in partial, it's first optional `variables_metadata` argument is added to the
# config values of this class. Upon init through the CLI, this argument then becomes interpreted as a value in
# `storage_options`, whereby other calls to `load_data` will fail due to having two keyword arguments named
# `variables_metadata`.
#
# I.e. if `load_data` is called directly from here, the lightning command line with this class as a data module
# and `--print_config` produces:
# ...
# data:
# class_path: causica.lightning.data_modules.VariableSpecDataModule
# init_args:
# ...
# variables_metadata: null
_load_data = partial(load_data, root_path=self.root_path, **self.storage_options) # type: ignore
variables_metadata: VariablesMetadata = _load_data(data_enum=DataEnum.VARIABLES_JSON) # type: ignore
self._load_all_data(variables_metadata)
train_keys = set(self._dataset_train.keys())
test_keys = set(self._dataset_test.keys())
assert (
train_keys == test_keys
), f"node_names for the training and test data must match. Diff: {train_keys.symmetric_difference(test_keys)}"
self._variable_shapes = tensordict_shapes(self._dataset_train)
self._variable_types = {var.group_name: var.type for var in variables_metadata.variables}
self._column_names = defaultdict(list)
for variable in variables_metadata.variables:
self._column_names[variable.group_name].append(variable.name)
if self.use_normalizer:
# Only applied to continuous variables
normalization_variables = {k for k, v in self._variable_types.items() if v == VariableTypeEnum.CONTINUOUS}
self.normalizer = self.create_normalizer(normalization_variables)(
self._dataset_train.select(*normalization_variables)
)
self.normalize_data()
else:
self.normalizer = JointTransformModule({})
def normalize_data(self):
self._dataset_train = self.normalizer(self._dataset_train)
self._dataset_test = self.normalizer(self._dataset_test)
if self.load_validation:
self._dataset_valid = self.normalizer(self._dataset_valid)
if self.load_interventional:
for intervention in self.interventions:
for i in intervention:
if isinstance(i, InterventionData):
i.intervention_data = self.normalizer(i.intervention_data)
i.intervention_values = self.normalizer(i.intervention_values)
i.condition_values = self.normalizer(i.condition_values)
if self.load_counterfactual:
for cf in self.counterfactuals:
for c in cf:
if isinstance(c, CounterfactualData):
c.counterfactual_data = self.normalizer(c.counterfactual_data)
c.factual_data = self.normalizer(c.factual_data)
c.intervention_values = self.normalizer(c.intervention_values)
def train_dataloader(self):
return DataLoader(
dataset=self.dataset_train,
collate_fn=identity,
batch_size=self.batch_size,
shuffle=True,
drop_last=True,
)
def test_dataloader(self):
test_dataloader = DataLoader(dataset=self.dataset_test, collate_fn=identity, batch_size=self.batch_size)
dataloader_list = [
test_dataloader,
DataLoader(dataset=self.true_adj[None, ...]),
DataLoader(dataset=self.interventions, collate_fn=identity, batch_size=None),
DataLoader(dataset=self.counterfactuals, collate_fn=identity, batch_size=None),
]
return dataloader_list
class CSuiteDataModule(VariableSpecDataModule):
"""CSuite data module for loading by the datasets name.
Args:
dataset_name: Name of the dataset to load.
batch_size: Batch size for all datasets.
dataset_path: Path to CSuite dataset mirror.
"""
def __init__(
self,
dataset_name: str,
batch_size: int = 128,
dataset_path: str = CAUSICA_DATASETS_PATH,
load_counterfactual: bool = False,
load_interventional: bool = False,
standardize: Union[bool, Iterable[str]] = False,
):
super().__init__(
root_path=os.path.join(dataset_path, dataset_name),
batch_size=batch_size,
dataset_name=dataset_name,
load_counterfactual=load_counterfactual,
load_interventional=load_interventional,
standardize=standardize,
)
def _check_exists(obj: Any, attribute_name: str):
"""Check if an attribute exists otherwise print a message."""
try:
return getattr(obj, attribute_name)
except AttributeError as exc:
display_string = attribute_name.replace("_", " ").strip()
raise ValueError(f"Tried to get {display_string} before data was downloaded.") from exc
|
causica/src/causica/lightning/data_modules/variable_spec_data.py/0
|
{
"file_path": "causica/src/causica/lightning/data_modules/variable_spec_data.py",
"repo_id": "causica",
"token_count": 5859
}
| 685 |
import math
from collections import defaultdict
from typing import Iterable
import torch
from tensordict import TensorDict
from causica.datasets.causica_dataset_format import CounterfactualWithEffects, InterventionWithEffects
from causica.sem.structural_equation_model import SEM, ate, ite
def eval_intervention_likelihoods(sems: list[SEM], intervention_with_effects: InterventionWithEffects) -> torch.Tensor:
"""
Calculate the average log-prob of interventional data.
Specifically we calculate 𝔼_sample[log(𝔼_G[p(sample | G)])]
Args:
sems: An iterable of SEMS to evaluate the interventional log prob of
interventions: True interventional data to use for evaluation.
Returns:
Log-likelihood of the interventional data for each interventional datapoint
"""
total_log_sum_exp_per_int = [] # this will end up being length number of interventions
for intervention in intervention_with_effects[:2]:
inner_log_probs = [
sem.do(interventions=intervention.intervention_values).log_prob(intervention.intervention_data)
for sem in sems
]
# calculate log(𝔼_G[p(sample | G)]) for each sample
log_prob = list_logsumexp(inner_log_probs) - math.log(len(sems)) # batch_size
assert len(log_prob.shape) == 1
total_log_sum_exp_per_int.append(log_prob)
# log(𝔼_G[p(sample | G)]) for each sample in both interventions, shape 2 * batch_size
return torch.cat(total_log_sum_exp_per_int, dim=-1)
def eval_ate_rmse(
sems: Iterable[SEM], intervention: InterventionWithEffects, samples_per_graph: int = 1000
) -> TensorDict:
"""Evaluate the ATEs of a model
Args:
sems: An iterable of structural equation models to evaluate the ATE RMSE of
intervention: True interventional data to use for evaluation.
samples_per_graph: Number of samples to draw per graph to calculate the ATE.
Returns:
Dict of the RMSE of the ATE for each node we're interested in
"""
intervention_a, intervention_b, effects = intervention
# each node has shape [batch_size, node_dim]
true_ates = {
effect: intervention_a.intervention_data[effect].mean() - intervention_b.intervention_data[effect].mean()
for effect in effects
}
# generate samples from the intervened distribution and the base distribution
ates_per_graph: dict[str, list[torch.Tensor]] = defaultdict(list)
for sem in sems:
graph_ates = ate(
sem, intervention_a.intervention_values, intervention_b.intervention_values, effects, samples_per_graph
)
for key in effects:
ates_per_graph[key].append(graph_ates[key].detach())
# each node has shape [node_dim]
generated_ates = {k: list_mean(v) for k, v in ates_per_graph.items()}
return TensorDict(
{key: torch.sqrt(torch.mean(torch.sum((generated_ates[key] - true_ates[key]) ** 2, -1))) for key in effects},
batch_size=torch.Size([]),
)
def eval_ite_rmse(sems: Iterable[SEM], counterfactual_data: CounterfactualWithEffects) -> TensorDict:
"""Evaluate the ITEs of a model.
Args:
sems: An iterable of structural equation models to evaluate the ITE RMSE of
counterfactual_data: Data of true counterfactuals to use for evaluation.
Returns:
Dict of RMSEs for each effect variable we're interested in
"""
intervention_a, intervention_b, effects = counterfactual_data
if intervention_b is None:
raise ValueError("ITE evaluation must have reference counterfactuals")
for key, a_val in intervention_a.factual_data.items():
b_val = intervention_b.factual_data[key]
assert torch.allclose(a_val, b_val), "Base data must be the same for ITEs"
# each node has shape [batch_size, node_dim]
true_ites = {
effect: intervention_a.counterfactual_data[effect] - intervention_b.counterfactual_data[effect]
for effect in effects
}
# generate samples from the intervened distribution and the base distribution
per_graph_ites: dict[str, list[torch.Tensor]] = defaultdict(list)
for sem in sems:
sem_ites = ite(
sem,
intervention_a.factual_data,
intervention_a.intervention_values,
intervention_b.intervention_values,
effects,
)
for key in effects:
per_graph_ites[key].append(sem_ites[key].detach())
# average the treatment value over all graphs, each node has shape [batch_size, node_dim]
generated_ites = {k: list_mean(v) for k, v in per_graph_ites.items()}
return TensorDict(
{key: torch.sqrt(torch.mean(torch.sum((generated_ites[key] - true_ites[key]) ** 2, -1))) for key in effects},
batch_size=torch.Size(),
)
def list_mean(list_of_tensors: list[torch.Tensor]) -> torch.Tensor:
"""Take the mean of a list of torch tensors, they must all have the same shape"""
return torch.stack(list_of_tensors, dim=0).mean(dim=0)
def list_logsumexp(list_of_tensors: list[torch.Tensor]) -> torch.Tensor:
"""Take the logsumexp of a list of torch tensors, they must all have the same shape"""
return torch.logsumexp(torch.stack(list_of_tensors, dim=0), dim=0)
|
causica/src/causica/training/evaluation.py/0
|
{
"file_path": "causica/src/causica/training/evaluation.py",
"repo_id": "causica",
"token_count": 1988
}
| 686 |
import pytest
import torch
from causica.distributions.adjacency.gibbs_dag_prior import ExpertGraphContainer, GibbsDAGPrior
def test_expert_graph_dataclass():
mask = torch.Tensor([[0, 1, 1], [0, 0, 0], [0, 0, 0]])
dag = torch.Tensor([[0, 0, 1], [0, 0, 0], [0, 0, 0]])
confidence = 0.8
scale = 10
expert_graph_container = ExpertGraphContainer(dag, mask, confidence, scale)
assert expert_graph_container.mask.shape[0] == 3
assert expert_graph_container.mask.shape[1] == 3
assert expert_graph_container.dag.shape[0] == 3
assert expert_graph_container.dag.shape[1] == 3
assert expert_graph_container.confidence == 0.8
def test_get_sparsity_term():
gibbs_dag_prior = GibbsDAGPrior(num_nodes=2, sparsity_lambda=torch.tensor(1))
dag = torch.Tensor([[0, 0], [0, 0]])
assert gibbs_dag_prior.get_sparsity_term(dag) == 0
dense_dag = torch.Tensor([[1, 1], [1, 1]])
sparse_dag = torch.Tensor([[0, 1], [0, 1]])
assert gibbs_dag_prior.get_sparsity_term(dense_dag) > gibbs_dag_prior.get_sparsity_term(sparse_dag)
def test_get_expert_graph_term():
mask = torch.Tensor([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
dag = torch.Tensor([[0, 0, 1], [0, 0, 0], [0, 0, 0]])
confidence = 0.8
scale = 10
expert_graph_container = ExpertGraphContainer(dag, mask, confidence, scale)
gibbs_dag_prior = GibbsDAGPrior(
num_nodes=3,
sparsity_lambda=torch.tensor(1),
expert_graph_container=expert_graph_container,
)
A = torch.Tensor([[0, 0, 1], [0, 1, 1], [1, 0, 1]])
assert gibbs_dag_prior.get_expert_graph_term(A) == torch.tensor(0)
mask = torch.Tensor([[0, 0, 1], [0, 0, 0], [0, 0, 0]])
expert_graph_container = ExpertGraphContainer(dag, mask, confidence, scale)
gibbs_dag_prior = GibbsDAGPrior(
num_nodes=3,
sparsity_lambda=torch.tensor(1),
expert_graph_container=expert_graph_container,
)
torch.testing.assert_close(gibbs_dag_prior.get_expert_graph_term(A), torch.tensor(0.2))
def test_log_prob():
gibbs_dag_prior = GibbsDAGPrior(num_nodes=123, sparsity_lambda=torch.tensor(1))
A = torch.Tensor([[0, 0, 1], [0, 0, 1], [0, 0, 0]])
with pytest.raises(AssertionError):
gibbs_dag_prior.log_prob(A)
gibbs_dag_prior = GibbsDAGPrior(num_nodes=2, sparsity_lambda=torch.tensor(1))
A = torch.Tensor([[1, 1], [0, 1]])
torch.testing.assert_close(
gibbs_dag_prior.log_prob(A),
torch.tensor(-3.0),
)
A = torch.Tensor([[0, 1], [0, 0]])
torch.testing.assert_close(gibbs_dag_prior.log_prob(A), torch.tensor(-1.0))
|
causica/test/distributions/adjacency/test_gibbs_dag_prior.py/0
|
{
"file_path": "causica/test/distributions/adjacency/test_gibbs_dag_prior.py",
"repo_id": "causica",
"token_count": 1192
}
| 687 |
import pytest
import torch
from tensordict import TensorDict
from causica.functional_relationships import LinearFunctionalRelationships, create_do_functional_relationship
@pytest.fixture(name="two_variable_dict")
def fixture_two_variable_dict():
return {"x1": torch.Size([1]), "x2": torch.Size([2])}
@pytest.fixture(name="three_variable_dict")
def fixture_three_variable_dict():
return {"x1": torch.Size([2]), "x2": torch.Size([2]), "x3": torch.Size([1])}
def test_do_linear_graph_stack(two_variable_dict):
graph = torch.tensor([[[0, 1], [0, 0.0]], [[0, 0], [1, 0.0]]])
coef_matrix = torch.randn((3, 3))
interventions = TensorDict({"x2": torch.tensor([1.42, 0.42])}, batch_size=torch.Size())
func = LinearFunctionalRelationships(two_variable_dict, coef_matrix)
do_func, do_graph = create_do_functional_relationship(interventions, func, graph)
array = torch.linspace(-2, 2, 100)[..., None, None].expand(-1, 2, 1)
prediction = do_func.forward(TensorDict({"x1": array}, batch_size=array.shape[:-1]), graphs=do_graph)
assert prediction["x1"].shape == (100, 2, 1)
assert torch.allclose(prediction["x1"][:, 0, :], torch.tensor(0.0))
true_pred = torch.matmul(torch.tensor([1.42, 0.42]).unsqueeze(-2), coef_matrix[1:, :1]).squeeze()
assert torch.allclose(prediction["x1"][:, 1, :], true_pred)
def test_linear_3d_graph_do_1_node(three_variable_dict):
coef_matrix = torch.rand((5, 5))
graph = torch.zeros(3, 3)
graph[0, 1] = graph[0, 2] = 1
func = LinearFunctionalRelationships(three_variable_dict, coef_matrix)
interventions = TensorDict({"x1": torch.tensor([1.42, 0.42])}, batch_size=torch.Size())
do_func, do_graph = create_do_functional_relationship(interventions, func, graph)
test_val = torch.rand(100, 3)
input_noise = TensorDict({"x2": test_val[:, 0:2], "x3": test_val[:, 2:]}, batch_size=test_val.shape[:-1])
prediction = do_func.forward(input_noise, graphs=do_graph)
assert "x1" not in input_noise.keys()
assert prediction["x2"].shape == (100, 2)
assert prediction["x3"].shape == (100, 1)
true_pred = torch.matmul(torch.tensor([1.42, 0.42]).unsqueeze(-2), coef_matrix[:2, 2:]).squeeze()
assert torch.allclose(prediction["x2"], true_pred[:2])
assert torch.allclose(prediction["x3"], true_pred[2:])
def test_linear_3d_graph_do_2_nodes(three_variable_dict):
coef_matrix = torch.rand((5, 5))
graph = torch.zeros(3, 3)
graph[0, 1] = graph[0, 2] = graph[1, 2] = 1
func = LinearFunctionalRelationships(three_variable_dict, coef_matrix)
interventions = TensorDict(
{"x1": torch.tensor([1.42, 0.42]), "x2": torch.tensor([-0.42, 0.402])}, batch_size=torch.Size()
)
do_func, do_graph = create_do_functional_relationship(interventions, func, graph)
test_val = torch.linspace(-2, 2, 100).unsqueeze(-1)
input_noise = TensorDict({"x3": test_val}, batch_size=test_val.shape[:-1])
prediction = do_func(input_noise, do_graph)
assert "x1" not in input_noise.keys() and "x2" not in input_noise.keys()
assert prediction["x3"].shape == (100, 1)
true_pred = torch.matmul(torch.tensor([1.42, 0.42, -0.42, 0.402]).unsqueeze(-2), coef_matrix[:4, 4:]).squeeze()
assert torch.allclose(true_pred, prediction["x3"])
|
causica/test/functional_relationships/test_do_functional_relationships.py/0
|
{
"file_path": "causica/test/functional_relationships/test_do_functional_relationships.py",
"repo_id": "causica",
"token_count": 1345
}
| 688 |
import pytest
import torch
import torch.distributions as td
from tensordict import TensorDict
from causica.datasets.variable_types import VariableTypeEnum
from causica.distributions import JointNoiseModule, create_noise_modules
from causica.distributions.noise.joint import ContinuousNoiseDist
from causica.functional_relationships import LinearFunctionalRelationships
from causica.sem.distribution_parameters_sem import DistributionParametersSEM
from . import create_lingauss_sem, create_rffgauss_sem
@pytest.fixture(name="two_variable_dict")
def fixture_two_variable_dict():
return {"x1": torch.Size([1]), "x2": torch.Size([2])}
@pytest.fixture(name="three_variable_dict")
def fixture_three_variable_dict():
return {"x1": torch.Size([2]), "x2": torch.Size([2]), "x3": torch.Size([1])}
@pytest.mark.parametrize("graph", [torch.tensor([[0, 0], [1, 0.0]]), torch.tensor([[0, 1], [0, 0.0]])])
def test_do_linear_sem(graph, two_variable_dict):
coef_matrix = torch.rand((3, 3))
sem = create_lingauss_sem(two_variable_dict, coef_matrix, graph)
intervention_value = torch.tensor([1.42, 0.42])
do_sem = sem.do(TensorDict({"x2": intervention_value}, batch_size=tuple()))
array = torch.linspace(-2, 2, 100).unsqueeze(-1)
log_probs = do_sem.log_prob(TensorDict({"x1": array}, batch_size=[100]))
if graph[1, 0] == 1.0:
expected_mean = torch.einsum("i,ij->j", intervention_value, coef_matrix[1:, :1])
else:
expected_mean = torch.tensor([0.0])
expected_log_probs = td.Independent(td.Normal(expected_mean, 1.0), 1).log_prob(array)
torch.testing.assert_close(log_probs, expected_log_probs)
noise = sem.sample_noise((10,))
do_sample = do_sem.noise_to_sample(noise)
sample = sem.noise_to_sample(noise)
if graph[1, 0] == 1.0:
torch.testing.assert_close(do_sample["x1"], expected_mean + noise["x1"])
else:
torch.testing.assert_close(do_sample["x1"], sample["x1"])
# Test multi-dim interventions
do_sem = sem.do(
TensorDict(
{"x2": intervention_value.expand(3, 2)},
batch_size=[
3,
],
)
)
noise = sem.sample_noise((10, 3, 1))
do_sample = do_sem.noise_to_sample(noise)
sample = sem.noise_to_sample(noise)
if graph[1, 0] == 1.0:
torch.testing.assert_close(do_sample["x1"], expected_mean + noise["x1"])
else:
torch.testing.assert_close(do_sample["x1"], sample["x1"])
@pytest.mark.parametrize("graph", [torch.tensor([[0, 0], [1, 0.0]]), torch.tensor([[0, 1], [0, 0.0]])])
@pytest.mark.parametrize(
"intervention_variable,intervention_value",
[("x1", [[1.42], [0.42], [1.12]]), ("x2", [[1.42, 0.42], [0.42, 1.42], [0.42, 0.42]])],
)
def test_batched_intervention_2d_graph(graph, intervention_variable, intervention_value, two_variable_dict):
rff_features = torch.rand((10, 3))
coef_matrix = torch.rand((10,))
sem = create_rffgauss_sem(two_variable_dict, rff_features, coef_matrix, graph)
variable_names = set(two_variable_dict.keys())
sampled_variables = variable_names - {intervention_variable}
assert len(sampled_variables) == 1
sampled_variable = sampled_variables.pop()
intervention_value = torch.tensor(intervention_value)
batched_do_sem = sem.do(
TensorDict(
{intervention_variable: intervention_value},
batch_size=[
3,
],
)
)
noise = batched_do_sem.sample_noise((10,))
do_sample = batched_do_sem.noise_to_sample(noise)
non_batch_sample_list = []
for i, intervention in enumerate(intervention_value):
non_batch_sample_list.append(
sem.do(TensorDict({intervention_variable: intervention}, batch_size=tuple()))
.noise_to_sample(noise[:, i, None])
.squeeze(1)
)
non_batch_sample = torch.stack(non_batch_sample_list, dim=1)
torch.testing.assert_close(do_sample[sampled_variable], non_batch_sample[sampled_variable], atol=1e-5, rtol=1e-4)
def test_batched_intervention_3d_graph(three_variable_dict):
graph = torch.zeros(3, 3)
graph[0, 1] = graph[0, 2] = graph[1, 2] = 1
rff_features = torch.rand((10, 5))
coef_matrix = torch.rand((10,))
sem = create_rffgauss_sem(three_variable_dict, rff_features, coef_matrix, graph)
intervention_value = torch.tensor([[1.42, 0.42], [0.42, 1.42], [0.42, 0.42]])
batched_do_sem = sem.do(
TensorDict(
{"x2": intervention_value},
batch_size=[
3,
],
)
)
noise = batched_do_sem.sample_noise((10,))
do_sample = batched_do_sem.noise_to_sample(noise)
inferred_noise = batched_do_sem.sample_to_noise(do_sample)
torch.testing.assert_close(noise["x1"], inferred_noise["x1"], atol=1e-5, rtol=1e-4)
torch.testing.assert_close(noise["x3"], inferred_noise["x3"], atol=1e-5, rtol=1e-4)
non_batch_sample_list = []
for i, intervention in enumerate(intervention_value):
non_batch_sample_list.append(
sem.do(TensorDict({"x2": intervention}, batch_size=tuple())).noise_to_sample(noise[:, i, None]).squeeze(1)
)
non_batch_sample = torch.stack(non_batch_sample_list, dim=1)
torch.testing.assert_close(do_sample["x1"], non_batch_sample["x1"], atol=1e-5, rtol=1e-4)
torch.testing.assert_close(do_sample["x3"], non_batch_sample["x3"], atol=1e-5, rtol=1e-4)
def test_intervention_batched_3d_graph(three_variable_dict):
graph = torch.zeros(3, 3)
graph[0, 1] = graph[0, 2] = graph[1, 2] = 1
graphs = torch.stack([graph, graph], dim=0)
rff_features = torch.rand((10, 5))
coef_matrix = torch.rand((10,))
sem = create_rffgauss_sem(three_variable_dict, rff_features, coef_matrix, graphs)
regular_noise = sem.sample_noise(torch.Size([10]))
samples = sem.noise_to_sample(regular_noise)
inferred_noise = sem.sample_to_noise(samples)
torch.testing.assert_close(regular_noise, inferred_noise, atol=1e-5, rtol=1e-4)
intervention_value = torch.tensor([1.42, 0.42])
do_sem = sem.do(
TensorDict(
{"x2": intervention_value},
batch_size=torch.Size([]),
)
)
noise = do_sem.sample_noise(torch.Size([10]))
do_sample = do_sem.noise_to_sample(noise)
inferred_noise = do_sem.sample_to_noise(do_sample)
torch.testing.assert_close(noise["x1"], inferred_noise["x1"], atol=1e-5, rtol=1e-4)
torch.testing.assert_close(noise["x3"], inferred_noise["x3"], atol=1e-5, rtol=1e-4)
def test_batched_intervention_batched_3d_graph(three_variable_dict):
graph = torch.zeros(3, 3)
graph[0, 1] = graph[0, 2] = graph[1, 2] = 1
graphs = torch.stack([graph, graph], dim=0)
rff_features = torch.rand((10, 5))
coef_matrix = torch.rand((10,))
sem = create_rffgauss_sem(three_variable_dict, rff_features, coef_matrix, graphs)
intervention_value = torch.tensor([[1.42, 0.42], [0.42, 1.42], [0.42, 0.42]])
batched_do_sem = sem.do(
TensorDict(
{"x2": intervention_value},
batch_size=[
3,
],
)
)
noise = batched_do_sem.sample_noise((10,))
do_sample = batched_do_sem.noise_to_sample(noise)
assert do_sample.batch_size == torch.Size([10, 3, 2]) # 10 samples, 3 interventions, 2 graphs
inferred_noise = batched_do_sem.sample_to_noise(do_sample)
torch.testing.assert_close(noise["x1"], inferred_noise["x1"], atol=1e-5, rtol=1e-4)
torch.testing.assert_close(noise["x3"], inferred_noise["x3"], atol=1e-5, rtol=1e-4)
non_batch_sample_list = []
for i, intervention in enumerate(intervention_value):
non_batch_sample_list.append(
sem.do(TensorDict({"x2": intervention}, batch_size=tuple())).noise_to_sample(noise[:, i, None]).squeeze(1)
)
non_batch_sample = torch.stack(non_batch_sample_list, dim=1)
torch.testing.assert_close(do_sample["x1"], non_batch_sample["x1"], atol=1e-5, rtol=1e-4)
torch.testing.assert_close(do_sample["x3"], non_batch_sample["x3"], atol=1e-5, rtol=1e-4)
def test_linear_sem_3d_graph_do_1_node(three_variable_dict):
coef_matrix = torch.rand((5, 5))
graph = torch.zeros(3, 3)
graph[0, 1] = graph[0, 2] = 1
sem = create_lingauss_sem(three_variable_dict, coef_matrix, graph)
intervention_value = torch.tensor([1.42, 0.42])
do_sem = sem.do(TensorDict({"x1": intervention_value}, batch_size=tuple()))
test_val = torch.rand(100, 3)
log_probs = do_sem.log_prob(TensorDict({"x2": test_val[:, 0:2], "x3": test_val[:, 2:]}, batch_size=[100]))
expected_mean = torch.einsum("i,ij->j", intervention_value, coef_matrix[:2, 2:])
expected_log_probs = td.Independent(td.Normal(expected_mean, 1.0), 1).log_prob(test_val)
torch.testing.assert_close(log_probs, expected_log_probs)
def test_linear_sem_3d_graph_condition_1_node(three_variable_dict):
coef_matrix = torch.rand((5, 5))
graph = torch.zeros(3, 3)
graph[0, 1] = graph[0, 2] = graph[1, 2] = 1
sem = create_lingauss_sem(three_variable_dict, coef_matrix, graph)
condition_value = torch.tensor([1.42, 0.42])
condition_sem = sem.condition(TensorDict({"x1": condition_value}, batch_size=tuple()))
test_val = torch.rand(100, 3)
log_probs = condition_sem.log_prob(TensorDict({"x2": test_val[:, 0:2], "x3": test_val[:, 2:]}, batch_size=[100]))
with pytest.raises(NotImplementedError, match=r"on x2"):
sem.condition(TensorDict({"x2": condition_value}, batch_size=tuple()))
condition_sem = sem.condition(TensorDict({"x1": condition_value, "x2": condition_value}, batch_size=tuple()))
test_val = torch.rand(100, 1)
log_probs = condition_sem.log_prob(TensorDict({"x3": test_val}, batch_size=[100]))
expected_mean = torch.einsum("i,ij->j", torch.cat([condition_value, condition_value]), coef_matrix[:4, 4:])
expected_log_probs = td.Independent(td.Normal(expected_mean, 1.0), 1).log_prob(test_val)
torch.testing.assert_close(log_probs, expected_log_probs)
def test_linear_sem_3d_graph_do_2_nodes(three_variable_dict):
coef_matrix = torch.rand((5, 5))
graph = torch.triu(torch.ones(3, 3), diagonal=1)
sem = create_lingauss_sem(three_variable_dict, coef_matrix, graph)
int_data = TensorDict({"x1": torch.tensor([1.42, 0.42]), "x2": torch.tensor([-0.42, 0.402])}, batch_size=tuple())
do_sem = sem.do(int_data)
test_val = torch.linspace(-2, 2, 100).unsqueeze(-1)
log_probs = do_sem.log_prob(TensorDict({"x3": test_val}, batch_size=[100]))
expected_mean = torch.einsum("i,ij->j", torch.cat((int_data["x1"], int_data["x2"])), coef_matrix[:4, 4:])
expected_log_probs = td.Independent(td.Normal(expected_mean, 1.0), 1).log_prob(test_val)
torch.testing.assert_close(log_probs, expected_log_probs)
@pytest.mark.parametrize("graph", [torch.tensor([[0, 0], [1, 0.0]]), torch.tensor([[0, 1], [0, 0.0]])])
def test_do_linear_sem_bernoulli(graph, two_variable_dict):
coef_matrix = torch.rand((3, 3))
func = LinearFunctionalRelationships(two_variable_dict, initial_linear_coefficient_matrix=coef_matrix)
noise_modules = create_noise_modules(
shapes=two_variable_dict,
types=dict.fromkeys(two_variable_dict, VariableTypeEnum.BINARY),
continuous_noise_dist=ContinuousNoiseDist.GAUSSIAN,
)
noise_dist = JointNoiseModule(noise_modules)
sem = DistributionParametersSEM(graph=graph, noise_dist=noise_dist, func=func)
intervention_value = torch.tensor([1.42, 0.42])
do_sem = sem.do(TensorDict({"x2": intervention_value}, batch_size=tuple()))
array = torch.bernoulli(0.5 * torch.ones(100, 1))
log_probs = do_sem.log_prob(TensorDict({"x1": array}, batch_size=array.shape[:-1]))
if graph[1, 0] == 1.0:
expected_logits = torch.einsum("i,ij->j", intervention_value, coef_matrix[1:, :1])
else:
expected_logits = torch.tensor([0.0])
expected_log_probs = td.Independent(td.Bernoulli(logits=expected_logits), 1).log_prob(array)
torch.testing.assert_close(log_probs, expected_log_probs)
noise = sem.sample_noise((10,))
do_sample = do_sem.noise_to_sample(noise)
sample = sem.noise_to_sample(noise)
if graph[1, 0] == 1.0:
torch.testing.assert_close(do_sample["x1"], ((expected_logits + noise["x1"]) > 0).float())
else:
torch.testing.assert_close(do_sample["x1"], sample["x1"])
def test_linear_sem_3d_graph_do_1_node_bernoulli(three_variable_dict):
coef_matrix = torch.rand((5, 5))
graph = torch.zeros(3, 3)
graph[0, 1] = graph[0, 2] = 1
func = LinearFunctionalRelationships(three_variable_dict, coef_matrix)
noise_modules = create_noise_modules(
shapes=three_variable_dict,
types=dict.fromkeys(three_variable_dict, VariableTypeEnum.BINARY),
continuous_noise_dist=ContinuousNoiseDist.GAUSSIAN,
)
noise_dist = JointNoiseModule(noise_modules)
sem = DistributionParametersSEM(graph=graph, noise_dist=noise_dist, func=func)
intervention_value = torch.tensor([1.42, 0.42])
do_sem = sem.do(TensorDict({"x1": intervention_value}, batch_size=tuple()))
array = torch.bernoulli(0.5 * torch.ones(100, 3))
log_probs = do_sem.log_prob(TensorDict({"x2": array[:, :2], "x3": array[:, 2:]}, batch_size=array.shape[:-1]))
expected_logits = torch.einsum("i,ij->j", intervention_value, coef_matrix[:2, 2:])
expected_log_probs = td.Independent(td.Bernoulli(logits=expected_logits), 1).log_prob(array)
torch.testing.assert_close(log_probs, expected_log_probs)
def test_linear_sem_3d_graph_do_2_nodes_bernoulli(three_variable_dict):
coef_matrix = torch.rand((5, 5))
graph = torch.triu(torch.ones(3, 3), diagonal=1)
func = LinearFunctionalRelationships(three_variable_dict, coef_matrix)
noise_modules = create_noise_modules(
shapes=three_variable_dict,
types=dict.fromkeys(three_variable_dict, VariableTypeEnum.BINARY),
continuous_noise_dist=ContinuousNoiseDist.GAUSSIAN,
)
noise_dist = JointNoiseModule(noise_modules)
sem = DistributionParametersSEM(graph=graph, noise_dist=noise_dist, func=func)
int_data = TensorDict({"x1": torch.tensor([1.42, 0.42]), "x2": torch.tensor([-0.42, 0.402])}, batch_size=tuple())
do_sem = sem.do(int_data)
array = torch.bernoulli(0.5 * torch.ones(100, 1))
log_probs = do_sem.log_prob(TensorDict({"x3": array}, batch_size=array.shape[:-1]))
expected_logits = torch.einsum("i,ij->j", torch.cat((int_data["x1"], int_data["x2"])), coef_matrix[:4, 4:])
expected_log_probs = td.Independent(td.Bernoulli(logits=expected_logits), 1).log_prob(array)
torch.testing.assert_close(log_probs, expected_log_probs)
|
causica/test/sem/test_sem.py/0
|
{
"file_path": "causica/test/sem/test_sem.py",
"repo_id": "causica",
"token_count": 6448
}
| 689 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
###########################################################################################
# CUSTOMIZED ENCODING/DECODING LAYERS AS USED In THE PAPER: #
# Clifford Neural Layers for PDE Modeling #
###########################################################################################
import torch
import torch.nn.functional as F
from typing import Union
from cliffordlayers.nn.modules.cliffordconv import (
CliffordConv2d,
CliffordConv3d,
)
from cliffordlayers.models.basic.custom_kernels import (
get_2d_scalar_vector_encoding_kernel,
get_2d_scalar_vector_decoding_kernel,
get_2d_rotation_scalar_vector_encoding_kernel,
get_2d_rotation_scalar_vector_decoding_kernel,
get_3d_maxwell_encoding_kernel,
get_3d_maxwell_decoding_kernel,
)
class CliffordConv2dScalarVectorEncoder(CliffordConv2d):
"""2d Clifford convolution encoder for scalar+vector input fields which inherits from CliffordConv2d."""
def __init__(
self,
g: Union[tuple, list, torch.Tensor],
in_channels: int,
out_channels: int,
kernel_size: int = 1,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = "zeros",
rotation: bool = False,
):
super().__init__(
g,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
padding_mode,
rotation,
)
if rotation:
self._get_kernel = get_2d_rotation_scalar_vector_encoding_kernel
else:
self._get_kernel = get_2d_scalar_vector_encoding_kernel
def forward(self, x: torch.Tensor) -> torch.Tensor:
return super(CliffordConv2d, self).forward(x, F.conv2d)
class CliffordConv2dScalarVectorDecoder(CliffordConv2d):
"""2d Clifford convolution decoder for scalar+vector output fields which inherits from CliffordConv2d."""
def __init__(
self,
g: Union[tuple, list, torch.Tensor],
in_channels: int,
out_channels: int,
kernel_size: int = 1,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
bias: bool = False,
padding_mode: str = "zeros",
rotation: bool = False,
):
super().__init__(
g,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
padding_mode,
rotation,
)
if rotation:
self._get_kernel = get_2d_rotation_scalar_vector_decoding_kernel
else:
self._get_kernel = get_2d_scalar_vector_decoding_kernel
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.bias is True:
raise ValueError("Bias needs to be set to False for 2d Clifford decoding layers.")
return super(CliffordConv2d, self).forward(x, F.conv2d)
class CliffordConv3dMaxwellEncoder(CliffordConv3d):
"""3d Clifford convolution encoder for vector+bivector inputs which inherits from CliffordConv3d."""
def __init__(
self,
g: Union[tuple, list, torch.Tensor],
in_channels: int,
out_channels: int,
kernel_size: int = 1,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = "zeros",
):
super().__init__(
g,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
padding_mode,
)
self._get_kernel = get_3d_maxwell_encoding_kernel
def forward(self, x: torch.Tensor) -> torch.Tensor:
return super(CliffordConv3d, self).forward(x, F.conv3d)
class CliffordConv3dMaxwellDecoder(CliffordConv3d):
"""3d Clifford convolution decoder for vector+bivector inputs which inherits from CliffordConv3d."""
def __init__(
self,
g: Union[tuple, list, torch.Tensor],
in_channels: int,
out_channels: int,
kernel_size: int = 1,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
bias: bool = False,
padding_mode: str = "zeros",
):
super().__init__(
g,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
padding_mode,
)
self._get_kernel = get_3d_maxwell_decoding_kernel
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.bias is True:
raise ValueError("Bias needs to be set to False for 3d Clifford decoding layers.")
return super(CliffordConv3d, self).forward(x, F.conv3d)
|
cliffordlayers/cliffordlayers/models/basic/custom_layers.py/0
|
{
"file_path": "cliffordlayers/cliffordlayers/models/basic/custom_layers.py",
"repo_id": "cliffordlayers",
"token_count": 2561
}
| 690 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
###########################################################################################
# THIS IS AN OLD IMPLEMENTATION OF THE CLIFFORD FOURIER TRANSFORM LAYERS. #
# WE KEEP IT FOR UNIT TESTING FOR THE TIME BEING. #
###########################################################################################
import torch
from torch import nn
def batchmul2d(input, weights):
# (batch, in_channel, x,y ), (in_channel, out_channel, x,y) -> (batch, out_channel, x,y)
return torch.einsum("bixy,ioxy->boxy", input, weights)
def batchmul3d(input, weights):
# (batch, in_channel, x,y,t ), (in_channel, out_channel, x,y,t) -> (batch, out_channel, x,y,t)
return torch.einsum("bixyz,ioxyz->boxyz", input, weights)
def get_clifford_linear_kernel_2d(weights):
assert len(weights) == 4 or weights.size(0) == 4
kernel1 = torch.cat([weights[0], weights[1], weights[2], -weights[3]], dim=0)
kernel2 = torch.cat([weights[1], weights[0], -weights[3], weights[2]], dim=0)
kernel3 = torch.cat([weights[2], weights[3], weights[0], -weights[1]], dim=0)
kernel4 = torch.cat([weights[3], weights[2], -weights[1], weights[0]], dim=0)
kernel = torch.cat([kernel1, kernel2, kernel3, kernel4], dim=1)
return kernel
def get_clifford_linear_kernel_3d(weights):
kernel1 = torch.cat(
[
weights[0],
weights[1],
weights[2],
weights[3],
-weights[4],
-weights[5],
-weights[6],
-weights[7],
],
dim=0,
)
kernel2 = torch.cat(
[
weights[1],
weights[0],
-weights[4],
-weights[5],
weights[2],
weights[3],
-weights[7],
-weights[6],
],
dim=0,
)
kernel3 = torch.cat(
[
weights[2],
weights[4],
weights[0],
-weights[6],
-weights[1],
weights[7],
weights[3],
weights[5],
],
dim=0,
)
kernel4 = torch.cat(
[
weights[3],
weights[5],
weights[6],
weights[0],
-weights[7],
-weights[1],
-weights[2],
-weights[4],
],
dim=0,
)
kernel5 = torch.cat(
[
weights[4],
weights[2],
-weights[1],
weights[7],
weights[0],
-weights[6],
weights[5],
weights[3],
],
dim=0,
)
kernel6 = torch.cat(
[
weights[5],
weights[3],
-weights[7],
-weights[1],
weights[6],
weights[0],
-weights[4],
-weights[2],
],
dim=0,
)
kernel7 = torch.cat(
[
weights[6],
weights[7],
weights[3],
-weights[2],
-weights[5],
weights[4],
weights[0],
weights[1],
],
dim=0,
)
kernel8 = torch.cat(
[
weights[7],
weights[6],
-weights[5],
weights[4],
weights[3],
-weights[2],
weights[1],
weights[0],
],
dim=0,
)
kernel = torch.cat([kernel1, kernel2, kernel3, kernel4, kernel5, kernel6, kernel7, kernel8], dim=1)
return kernel
class CliffordSpectralConv2d_deprecated(nn.Module):
"""2d Clifford Fourier transform.
Performs (i) Clifford Fourier transform over the multivector of 2d Clifford algebras,
(ii) weight multiplication in the Clifford Fourier space using the geometric product,
(iii) inverse Clifford Fourier transform.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
modes1 (int): Number of Fourier modes to use in the first dimension.
modes2 (int): Number of Fourier modes to use in the second dimension.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
modes1: int,
modes2: int,
) -> None:
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1
self.modes2 = modes2
scale = 1 / (in_channels * out_channels)
self.weights = nn.Parameter(
scale * torch.rand(4, in_channels, out_channels, self.modes1 * 2, self.modes2 * 2, dtype=torch.float32)
)
def forward(self, vector: torch.Tensor, spinor: torch.Tensor) -> torch.Tensor:
# TODO: : should the inputs and outputs be Multivectors?
vector.shape[0]
# Compute Fourier coefficients up to factor of e^(- something constant)
vector_ft = torch.fft.fft2(vector)
spinor_ft = torch.fft.fft2(spinor)
multivector_ft = torch.cat(
(
spinor_ft.real,
vector_ft.real,
vector_ft.imag,
spinor_ft.imag,
),
dim=1,
)
# Clifford Fourier modes
out_ft = torch.zeros_like(
multivector_ft,
dtype=torch.float,
device=multivector_ft.device,
)
input = torch.cat(
(
torch.cat(
(
multivector_ft[:, :, : self.modes1, : self.modes2],
multivector_ft[:, :, : self.modes1, -self.modes2 :],
),
-1,
),
torch.cat(
(
multivector_ft[:, :, -self.modes1 :, : self.modes2],
multivector_ft[:, :, -self.modes1 :, -self.modes2 :],
),
-1,
),
),
-2,
)
# TODO: refactor
# This is a bit ugly and likely doesn't need this function and should be using something from `cliffordkernels`
kernel = get_clifford_linear_kernel_2d(self.weights)
output = batchmul2d(input, kernel)
out_ft[:, :, : self.modes1, : self.modes2] = output[:, :, : self.modes1, : self.modes2]
out_ft[:, :, -self.modes1 :, : self.modes2] = output[:, :, -self.modes1 :, : self.modes2]
out_ft[:, :, : self.modes1, -self.modes2 :] = output[:, :, : self.modes1, -self.modes2 :]
out_ft[:, :, -self.modes1 :, -self.modes2 :] = output[:, :, -self.modes1 :, -self.modes2 :]
out_ft = out_ft.reshape(out_ft.size(0), 4, -1, *out_ft.shape[-2:])
out_vector_ft = torch.complex(out_ft[:, 1], out_ft[:, 2])
out_spinor_ft = torch.complex(out_ft[:, 0], out_ft[:, 3])
# Return to physical space
vector = torch.fft.ifft2(out_vector_ft, s=(vector.size(-2), vector.size(-1)))
spinor = torch.fft.ifft2(out_spinor_ft, s=(spinor.size(-2), spinor.size(-1)))
return vector, spinor
class CliffordSpectralConv3d_deprecated(nn.Module):
"""3d Clifford Fourier transform.
Performs (i) Clifford Fourier transform over the multivector of 3d Clifford algebras,
(ii) weight multiplication in the Clifford Fourier space using the geometric product,
(iii) inverse Clifford Fourier transform.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
modes1 (int): Number of Fourier modes to use in the first dimension.
modes2 (int): Number of Fourier modes to use in the second dimension.
modes3 (int): Number of Fourier modes to use in the third dimension.
"""
def __init__(self, in_channels: int, out_channels: int, modes1: int, modes2: int, modes3: int):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1
self.modes2 = modes2
self.modes3 = modes3
scale = 1 / (in_channels * out_channels)
self.weights = nn.Parameter(
scale
* torch.rand(
8,
in_channels,
out_channels,
self.modes1 * 2,
self.modes2 * 2,
self.modes3 * 2,
dtype=torch.float32,
)
)
def forward(
self, dual_pair_1: torch.Tensor, dual_pair_2: torch.Tensor, dual_pair_3: torch.Tensor, dual_pair_4: torch.Tensor
) -> torch.Tensor:
# TODO: should the inputs and outputs be Multivectors?
x_dual_pair_1_ft = torch.fft.fftn(dual_pair_1, dim=[-3, -2, -1])
x_dual_pair_2_ft = torch.fft.fftn(dual_pair_2, dim=[-3, -2, -1])
x_dual_pair_3_ft = torch.fft.fftn(dual_pair_3, dim=[-3, -2, -1])
x_dual_pair_4_ft = torch.fft.fftn(dual_pair_4, dim=[-3, -2, -1])
multivector_ft = torch.stack(
(
x_dual_pair_1_ft.real,
x_dual_pair_2_ft.real,
x_dual_pair_3_ft.real,
x_dual_pair_4_ft.real,
x_dual_pair_4_ft.imag,
x_dual_pair_3_ft.imag,
x_dual_pair_2_ft.imag,
x_dual_pair_1_ft.imag,
),
dim=1,
)
# Clifford Fourier modes
out_ft = torch.zeros_like(
multivector_ft,
dtype=torch.float,
device=multivector_ft.device,
)
input = torch.cat(
(
torch.cat(
(
torch.cat(
(
multivector_ft[:, :, :, : self.modes1, : self.modes2, : self.modes3],
multivector_ft[:, :, :, : self.modes1, : self.modes2, -self.modes3 :],
),
-1,
),
torch.cat(
(
multivector_ft[:, :, :, : self.modes1, -self.modes2 :, : self.modes3],
multivector_ft[:, :, :, : self.modes1, -self.modes2 :, -self.modes3 :],
),
-1,
),
),
-2,
),
torch.cat(
(
torch.cat(
(
multivector_ft[:, :, :, -self.modes1 :, : self.modes2, : self.modes3],
multivector_ft[:, :, :, -self.modes1 :, : self.modes2, -self.modes3 :],
),
-1,
),
torch.cat(
(
multivector_ft[:, :, :, -self.modes1 :, -self.modes2 :, : self.modes3],
multivector_ft[:, :, :, -self.modes1 :, -self.modes2 :, -self.modes3 :],
),
-1,
),
),
-2,
),
),
-3,
)
kernel = get_clifford_linear_kernel_3d(self.weights)
bs = input.size(0)
out = batchmul3d(input.reshape(bs, -1, *input.size()[3:]), kernel)
output = out.reshape(bs, 8, -1, *out.shape[-3:])
out_ft[:, :, :, : self.modes1, : self.modes2, : self.modes3] = output[
:, :, :, : self.modes1, : self.modes2, : self.modes3
]
out_ft[:, :, :, : self.modes1, : self.modes2, -self.modes3 :] = output[
:, :, :, : self.modes1, : self.modes2, -self.modes3 :
]
out_ft[:, :, :, : self.modes1, -self.modes2 :, : self.modes3] = output[
:, :, :, : self.modes1, -self.modes2 :, : self.modes3
]
out_ft[:, :, :, : self.modes1, -self.modes2 :, -self.modes3 :] = output[
:, :, :, : self.modes1, -self.modes2 :, -self.modes3 :
]
out_ft[:, :, :, -self.modes1 :, : self.modes2, : self.modes3] = output[
:, :, :, -self.modes1 :, : self.modes2, : self.modes3
]
out_ft[:, :, :, -self.modes1 :, : self.modes2, -self.modes3 :] = output[
:, :, :, : -self.modes1 :, : self.modes2, -self.modes3 :
]
out_ft[:, :, :, -self.modes1 :, -self.modes2 :, : self.modes3] = output[
:, :, :, -self.modes1 :, -self.modes2 :, : self.modes3
]
out_ft[:, :, :, -self.modes1 :, -self.modes2 :, -self.modes3 :] = output[
:, :, :, -self.modes1 :, -self.modes2 :, -self.modes3 :
]
out_x_dual_pair_1_ft = torch.complex(out_ft[:, 0], out_ft[:, 7])
out_x_dual_pair_2_ft = torch.complex(out_ft[:, 1], out_ft[:, 6])
out_x_dual_pair_3_ft = torch.complex(out_ft[:, 2], out_ft[:, 5])
out_x_dual_pair_4_ft = torch.complex(out_ft[:, 3], out_ft[:, 4])
# Return to physical space
out_x_dual_pair_1 = torch.fft.ifftn(
out_x_dual_pair_1_ft,
s=(dual_pair_1.size(-3), dual_pair_1.size(-2), dual_pair_1.size(-1)),
)
out_x_dual_pair_2 = torch.fft.ifftn(
out_x_dual_pair_2_ft,
s=(dual_pair_2.size(-3), dual_pair_2.size(-2), dual_pair_2.size(-1)),
)
out_x_dual_pair_3 = torch.fft.ifftn(
out_x_dual_pair_3_ft,
s=(dual_pair_3.size(-3), dual_pair_3.size(-2), dual_pair_3.size(-1)),
)
out_x_dual_pair_4 = torch.fft.ifftn(
out_x_dual_pair_4_ft,
s=(dual_pair_4.size(-3), dual_pair_4.size(-2), dual_pair_4.size(-1)),
)
return out_x_dual_pair_1, out_x_dual_pair_2, out_x_dual_pair_3, out_x_dual_pair_4
|
cliffordlayers/cliffordlayers/nn/modules/cliffordfourier_deprecated.py/0
|
{
"file_path": "cliffordlayers/cliffordlayers/nn/modules/cliffordfourier_deprecated.py",
"repo_id": "cliffordlayers",
"token_count": 7953
}
| 691 |
document$.subscribe(function() {
var tables = document.querySelectorAll("article table:not([class])")
tables.forEach(function(table) {
new Tablesort(table)
})
})
|
cliffordlayers/docs/javascripts/tablesort.js/0
|
{
"file_path": "cliffordlayers/docs/javascripts/tablesort.js",
"repo_id": "cliffordlayers",
"token_count": 64
}
| 692 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
from cliffordlayers.models.basic.custom_layers import (
CliffordConv2dScalarVectorEncoder,
CliffordConv2dScalarVectorDecoder,
CliffordConv3dMaxwellEncoder,
CliffordConv3dMaxwellDecoder,
)
def test_CliffordFluidNet2d_conv_encoding():
"""Test shapes of custom CliffordFluidNet2d encoding modules."""
in_channels = 8
out_channels = 16
x = torch.randn(1, in_channels, 128, 128, 3)
clifford_conv = CliffordConv2dScalarVectorEncoder(
g=[1, 1], in_channels=in_channels, out_channels=out_channels, kernel_size=3, padding=1
)
x_out = clifford_conv(x)
clifford_conv_rotation = CliffordConv2dScalarVectorEncoder(
g=[-1, -1], in_channels=in_channels, out_channels=out_channels, kernel_size=3, padding=1, rotation=True
)
x_out_rot = clifford_conv_rotation(x)
assert x_out.shape == (1, out_channels, 128, 128, 4)
assert x_out_rot.shape == (1, out_channels, 128, 128, 4)
def test_Clifford2dFluidNet_conv_decoding():
"""Test shapes of custom CliffordFluidNet2d decoding modules."""
in_channels = 8
out_channels = 16
x = torch.randn(1, in_channels, 128, 128, 4)
clifford_conv = CliffordConv2dScalarVectorDecoder(
g=[1, 1],
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
padding=1,
)
x_out = clifford_conv(x)
clifford_conv_rotation = CliffordConv2dScalarVectorDecoder(
g=[-1, -1], in_channels=in_channels, out_channels=out_channels, kernel_size=3, padding=1, rotation=True
)
x_out_rot = clifford_conv_rotation(x)
assert x_out.shape == (1, out_channels, 128, 128, 3)
assert x_out_rot.shape == (1, out_channels, 128, 128, 3)
def test_Clifford3dMaxwell_conv_encoding():
"""Test shapes of custom CliffordMaxwell3d encoding modules."""
in_channels = 8
out_channels = 16
x = torch.randn(1, in_channels, 64, 64, 64, 6)
clifford_conv = CliffordConv3dMaxwellEncoder(
g=[1, 1, 1], in_channels=in_channels, out_channels=out_channels, kernel_size=1, padding=0
)
x_out = clifford_conv(x)
assert x_out.shape == (1, out_channels, 64, 64, 64, 8)
def test_Clifford3dMaxwell_conv_decoding():
"""Test shapes of custom CliffordMaxwell3d decoding modules."""
in_channels = 8
out_channels = 16
x = torch.randn(1, in_channels, 64, 64, 64, 8)
clifford_conv = CliffordConv3dMaxwellDecoder(
g=[1, 1, 1], in_channels=in_channels, out_channels=out_channels, kernel_size=1, padding=0
)
x_out = clifford_conv(x)
assert x_out.shape == (1, out_channels, 64, 64, 64, 6)
|
cliffordlayers/tests/test_clifford_convolution_custom.py/0
|
{
"file_path": "cliffordlayers/tests/test_clifford_convolution_custom.py",
"repo_id": "cliffordlayers",
"token_count": 1155
}
| 693 |
#!/usr/bin/env/python
import numpy as np
import tensorflow as tf
import queue
import threading
import pickle
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
from rdkit import Chem
from rdkit.Chem import rdmolops
from collections import defaultdict, deque
import os
import heapq
import planarity
import sascorer
from rdkit.Chem import Crippen
from rdkit.Chem import QED
SMALL_NUMBER = 1e-7
LARGE_NUMBER= 1e10
geometry_numbers=[3, 4, 5, 6] # triangle, square, pentagen, hexagon
# bond mapping
bond_dict = {'SINGLE': 0, 'DOUBLE': 1, 'TRIPLE': 2, "AROMATIC": 3}
number_to_bond= {0: Chem.rdchem.BondType.SINGLE, 1:Chem.rdchem.BondType.DOUBLE,
2: Chem.rdchem.BondType.TRIPLE, 3:Chem.rdchem.BondType.AROMATIC}
def dataset_info(dataset): #qm9, zinc, cep
if dataset=='qm9':
return { 'atom_types': ["H", "C", "N", "O", "F"],
'maximum_valence': {0: 1, 1: 4, 2: 3, 3: 2, 4: 1},
'number_to_atom': {0: "H", 1: "C", 2: "N", 3: "O", 4: "F"},
'bucket_sizes': np.array(list(range(4, 28, 2)) + [29])
}
elif dataset=='zinc':
return { 'atom_types': ['Br1(0)', 'C4(0)', 'Cl1(0)', 'F1(0)', 'H1(0)', 'I1(0)',
'N2(-1)', 'N3(0)', 'N4(1)', 'O1(-1)', 'O2(0)', 'S2(0)','S4(0)', 'S6(0)'],
'maximum_valence': {0: 1, 1: 4, 2: 1, 3: 1, 4: 1, 5:1, 6:2, 7:3, 8:4, 9:1, 10:2, 11:2, 12:4, 13:6, 14:3},
'number_to_atom': {0: 'Br', 1: 'C', 2: 'Cl', 3: 'F', 4: 'H', 5:'I', 6:'N', 7:'N', 8:'N', 9:'O', 10:'O', 11:'S', 12:'S', 13:'S'},
'bucket_sizes': np.array([28,31,33,35,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,53,55,58,84])
}
elif dataset=="cep":
return { 'atom_types': ["C", "S", "N", "O", "Se", "Si"],
'maximum_valence': {0: 4, 1: 2, 2: 3, 3: 2, 4: 2, 5: 4},
'number_to_atom': {0: "C", 1: "S", 2: "N", 3: "O", 4: "Se", 5: "Si"},
'bucket_sizes': np.array([25,28,29,30, 32, 33,34,35,36,37,38,39,43,46])
}
else:
print("the datasets in use are qm9|zinc|cep")
exit(1)
# add one edge to adj matrix
def add_edge_mat(amat, src, dest, e, considering_edge_type=True):
if considering_edge_type:
amat[e, dest, src] = 1
amat[e, src, dest] = 1
else:
amat[src, dest] = 1
amat[dest, src] = 1
def graph_to_adj_mat(graph, max_n_vertices, num_edge_types, tie_fwd_bkwd=True, considering_edge_type=True):
if considering_edge_type:
amat = np.zeros((num_edge_types, max_n_vertices, max_n_vertices))
for src, e, dest in graph:
add_edge_mat(amat, src, dest, e)
else:
amat = np.zeros((max_n_vertices, max_n_vertices))
for src, e, dest in graph:
add_edge_mat(amat, src, dest, e, considering_edge_type=False)
return amat
def check_edge_prob(dataset):
with open('intermediate_results_%s' % dataset, 'rb') as f:
adjacency_matrix, edge_type_prob, edge_type_label, node_symbol_prob, node_symbol, edge_prob, edge_prob_label, qed_prediction, qed_labels,mean, logvariance=pickle.load(f)
for ep, epl in zip(edge_prob, edge_prob_label):
print("prediction")
print(ep)
print("label")
print(epl)
# check whether a graph is planar or not
def is_planar(location, adj_list, is_dense=False):
if is_dense:
new_adj_list=defaultdict(list)
for x in range(len(adj_list)):
for y in range(len(adj_list)):
if adj_list[x][y]==1:
new_adj_list[x].append((y,1))
adj_list=new_adj_list
edges=[]
seen=set()
for src, l in adj_list.items():
for dst, e in l:
if (dst, src) not in seen:
edges.append((src,dst))
seen.add((src,dst))
edges+=[location, (location[1], location[0])]
return planarity.is_planar(edges)
def check_edge_type_prob(filter=None):
with open('intermediate_results_%s' % dataset, 'rb') as f:
adjacency_matrix, edge_type_prob, edge_type_label, node_symbol_prob, node_symbol, edge_prob, edge_prob_label, qed_prediction, qed_labels,mean, logvariance=pickle.load(f)
for ep, epl in zip(edge_type_prob, edge_type_label):
print("prediction")
print(ep)
print("label")
print(epl)
def check_mean(dataset, filter=None):
with open('intermediate_results_%s' % dataset, 'rb') as f:
adjacency_matrix, edge_type_prob, edge_type_label, node_symbol_prob, node_symbol, edge_prob, edge_prob_label, qed_prediction, qed_labels,mean, logvariance=pickle.load(f)
print(mean.tolist()[:40])
def check_variance(dataset, filter=None):
with open('intermediate_results_%s' % dataset, 'rb') as f:
adjacency_matrix, edge_type_prob, edge_type_label, node_symbol_prob, node_symbol, edge_prob, edge_prob_label, qed_prediction, qed_labels,mean, logvariance=pickle.load(f)
print(np.exp(logvariance).tolist()[:40])
def check_node_prob(filter=None):
print(dataset)
with open('intermediate_results_%s' % dataset, 'rb') as f:
adjacency_matrix, edge_type_prob, edge_type_label, node_symbol_prob, node_symbol, edge_prob, edge_prob_label, qed_prediction, qed_labels,mean, logvariance=pickle.load(f)
print(node_symbol_prob[0])
print(node_symbol[0])
print(node_symbol_prob.shape)
def check_qed(filter=None):
with open('intermediate_results_%s' % dataset, 'rb') as f:
adjacency_matrix, edge_type_prob, edge_type_label, node_symbol_prob, node_symbol, edge_prob, edge_prob_label, qed_prediction, qed_labels,mean, logvariance=pickle.load(f)
print(qed_prediction)
print(qed_labels[0])
print(np.mean(np.abs(qed_prediction-qed_labels[0])))
def onehot(idx, len):
z = [0 for _ in range(len)]
z[idx] = 1
return z
def generate_empty_adj_matrix(maximum_vertice_num):
return np.zeros((1, 3, maximum_vertice_num, maximum_vertice_num))
# standard normal with shape [a1, a2, a3]
def generate_std_normal(a1, a2, a3):
return np.random.normal(0, 1, [a1, a2, a3])
def check_validity(dataset):
with open('generated_smiles_%s' % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
count=0
for smiles in all_smiles:
mol = Chem.MolFromSmiles(smiles)
if mol is not None:
count+=1
return len(all_smiles), count
# Get length for each graph based on node masks
def get_graph_length(all_node_mask):
all_lengths=[]
for graph in all_node_mask:
if 0 in graph:
length=np.argmin(graph)
else:
length=len(graph)
all_lengths.append(length)
return all_lengths
def make_dir(path):
if not os.path.exists(path):
os.mkdir(path)
print('made directory %s' % path)
# sample node symbols based on node predictions
def sample_node_symbol(all_node_symbol_prob, all_lengths, dataset):
all_node_symbol=[]
for graph_idx, graph_prob in enumerate(all_node_symbol_prob):
node_symbol=[]
for node_idx in range(all_lengths[graph_idx]):
symbol=np.random.choice(np.arange(len(dataset_info(dataset)['atom_types'])), p=graph_prob[node_idx])
node_symbol.append(symbol)
all_node_symbol.append(node_symbol)
return all_node_symbol
def dump(file_name, content):
with open(file_name, 'wb') as out_file:
pickle.dump(content, out_file, pickle.HIGHEST_PROTOCOL)
def load(file_name):
with open(file_name, 'rb') as f:
return pickle.load(f)
# generate a new feature on whether adding the edges will generate more than two overlapped edges for rings
def get_overlapped_edge_feature(edge_mask, color, new_mol):
overlapped_edge_feature=[]
for node_in_focus, neighbor in edge_mask:
if color[neighbor] == 1:
# attempt to add the edge
new_mol.AddBond(int(node_in_focus), int(neighbor), number_to_bond[0])
# Check whether there are two cycles having more than two overlap edges
try:
ssr = Chem.GetSymmSSSR(new_mol)
except:
ssr = []
overlap_flag = False
for idx1 in range(len(ssr)):
for idx2 in range(idx1+1, len(ssr)):
if len(set(ssr[idx1]) & set(ssr[idx2])) > 2:
overlap_flag=True
# remove that edge
new_mol.RemoveBond(int(node_in_focus), int(neighbor))
if overlap_flag:
overlapped_edge_feature.append((node_in_focus, neighbor))
return overlapped_edge_feature
# adj_list [3, v, v] or defaultdict. bfs distance on a graph
def bfs_distance(start, adj_list, is_dense=False):
distances={}
visited=set()
queue=deque([(start, 0)])
visited.add(start)
while len(queue) != 0:
current, d=queue.popleft()
for neighbor, edge_type in adj_list[current]:
if neighbor not in visited:
distances[neighbor]=d+1
visited.add(neighbor)
queue.append((neighbor, d+1))
return [(start, node, d) for node, d in distances.items()]
def get_initial_valence(node_symbol, dataset):
return [dataset_info(dataset)['maximum_valence'][s] for s in node_symbol]
def add_atoms(new_mol, node_symbol, dataset):
for number in node_symbol:
if dataset=='qm9' or dataset=='cep':
idx=new_mol.AddAtom(Chem.Atom(dataset_info(dataset)['number_to_atom'][number]))
elif dataset=='zinc':
new_atom = Chem.Atom(dataset_info(dataset)['number_to_atom'][number])
charge_num=int(dataset_info(dataset)['atom_types'][number].split('(')[1].strip(')'))
new_atom.SetFormalCharge(charge_num)
new_mol.AddAtom(new_atom)
def visualize_mol(path, new_mol):
AllChem.Compute2DCoords(new_mol)
print(path)
Draw.MolToFile(new_mol,path)
def get_idx_of_largest_frag(frags):
return np.argmax([len(frag) for frag in frags])
def remove_extra_nodes(new_mol):
frags=Chem.rdmolops.GetMolFrags(new_mol)
while len(frags) > 1:
# Get the idx of the frag with largest length
largest_idx = get_idx_of_largest_frag(frags)
for idx in range(len(frags)):
if idx != largest_idx:
# Remove one atom that is not in the largest frag
new_mol.RemoveAtom(frags[idx][0])
break
frags=Chem.rdmolops.GetMolFrags(new_mol)
def novelty_metric(dataset):
with open('all_smiles_%s.pkl' % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
with open('generated_smiles_%s' % dataset, 'rb') as f:
generated_all_smiles=set(pickle.load(f))
total_new_molecules=0
for generated_smiles in generated_all_smiles:
if generated_smiles not in all_smiles:
total_new_molecules+=1
return float(total_new_molecules)/len(generated_all_smiles)
def count_edge_type(dataset, generated=True):
if generated:
filename='generated_smiles_%s' % dataset
else:
filename='all_smiles_%s.pkl' % dataset
with open(filename, 'rb') as f:
all_smiles=set(pickle.load(f))
counter=defaultdict(int)
edge_type_per_molecule=[]
for smiles in all_smiles:
nodes, edges=to_graph(smiles, dataset)
edge_type_this_molecule=[0]* len(bond_dict)
for edge in edges:
edge_type=edge[1]
edge_type_this_molecule[edge_type]+=1
counter[edge_type]+=1
edge_type_per_molecule.append(edge_type_this_molecule)
total_sum=0
return len(all_smiles), counter, edge_type_per_molecule
def need_kekulize(mol):
for bond in mol.GetBonds():
if bond_dict[str(bond.GetBondType())] >= 3:
return True
return False
def check_planar(dataset):
with open("generated_smiles_%s" % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
total_non_planar=0
for smiles in all_smiles:
try:
nodes, edges=to_graph(smiles, dataset)
except:
continue
edges=[(src, dst) for src, e, dst in edges]
if edges==[]:
continue
if not planarity.is_planar(edges):
total_non_planar+=1
return len(all_smiles), total_non_planar
def count_atoms(dataset):
with open("generated_smiles_%s" % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
counter=defaultdict(int)
atom_count_per_molecule=[] # record the counts for each molecule
for smiles in all_smiles:
try:
nodes, edges=to_graph(smiles, dataset)
except:
continue
atom_count_this_molecule=[0]*len(dataset_info(dataset)['atom_types'])
for node in nodes:
atom_type=np.argmax(node)
atom_count_this_molecule[atom_type]+=1
counter[atom_type]+=1
atom_count_per_molecule.append(atom_count_this_molecule)
total_sum=0
return len(all_smiles), counter, atom_count_per_molecule
def to_graph(smiles, dataset):
mol = Chem.MolFromSmiles(smiles)
if mol is None:
return [], []
# Kekulize it
if need_kekulize(mol):
rdmolops.Kekulize(mol)
if mol is None:
return None, None
# remove stereo information, such as inward and outward edges
Chem.RemoveStereochemistry(mol)
edges = []
nodes = []
for bond in mol.GetBonds():
edges.append((bond.GetBeginAtomIdx(), bond_dict[str(bond.GetBondType())], bond.GetEndAtomIdx()))
assert bond_dict[str(bond.GetBondType())] != 3
for atom in mol.GetAtoms():
if dataset=='qm9' or dataset=="cep":
nodes.append(onehot(dataset_info(dataset)['atom_types'].index(atom.GetSymbol()), len(dataset_info(dataset)['atom_types'])))
elif dataset=='zinc': # transform using "<atom_symbol><valence>(<charge>)" notation
symbol = atom.GetSymbol()
valence = atom.GetTotalValence()
charge = atom.GetFormalCharge()
atom_str = "%s%i(%i)" % (symbol, valence, charge)
if atom_str not in dataset_info(dataset)['atom_types']:
print('unrecognized atom type %s' % atom_str)
return [], []
nodes.append(onehot(dataset_info(dataset)['atom_types'].index(atom_str), len(dataset_info(dataset)['atom_types'])))
return nodes, edges
def check_uniqueness(dataset):
with open('generated_smiles_%s' % dataset, 'rb') as f:
all_smiles=pickle.load(f)
original_num = len(all_smiles)
all_smiles=set(all_smiles)
new_num = len(all_smiles)
return new_num/original_num
def shape_count(dataset, remove_print=False, all_smiles=None):
if all_smiles==None:
with open('generated_smiles_%s' % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
geometry_counts=[0]*len(geometry_numbers)
geometry_counts_per_molecule=[] # record the geometry counts for each molecule
for smiles in all_smiles:
nodes, edges = to_graph(smiles, dataset)
if len(edges)<=0:
continue
new_mol=Chem.MolFromSmiles(smiles)
ssr = Chem.GetSymmSSSR(new_mol)
counts_for_molecule=[0] * len(geometry_numbers)
for idx in range(len(ssr)):
ring_len=len(list(ssr[idx]))
if ring_len in geometry_numbers:
geometry_counts[geometry_numbers.index(ring_len)]+=1
counts_for_molecule[geometry_numbers.index(ring_len)]+=1
geometry_counts_per_molecule.append(counts_for_molecule)
return len(all_smiles), geometry_counts, geometry_counts_per_molecule
def check_adjacent_sparse(adj_list, node, neighbor_in_doubt):
for neighbor, edge_type in adj_list[node]:
if neighbor == neighbor_in_doubt:
return True, edge_type
return False, None
def glorot_init(shape):
initialization_range = np.sqrt(6.0 / (shape[-2] + shape[-1]))
return np.random.uniform(low=-initialization_range, high=initialization_range, size=shape).astype(np.float32)
class ThreadedIterator:
"""An iterator object that computes its elements in a parallel thread to be ready to be consumed.
The iterator should *not* return None"""
def __init__(self, original_iterator, max_queue_size: int=2):
self.__queue = queue.Queue(maxsize=max_queue_size)
self.__thread = threading.Thread(target=lambda: self.worker(original_iterator))
self.__thread.start()
def worker(self, original_iterator):
for element in original_iterator:
assert element is not None, 'By convention, iterator elements much not be None'
self.__queue.put(element, block=True)
self.__queue.put(None, block=True)
def __iter__(self):
next_element = self.__queue.get(block=True)
while next_element is not None:
yield next_element
next_element = self.__queue.get(block=True)
self.__thread.join()
# Implements multilayer perceptron
class MLP(object):
def __init__(self, in_size, out_size, hid_sizes, dropout_keep_prob):
self.in_size = in_size
self.out_size = out_size
self.hid_sizes = hid_sizes
self.dropout_keep_prob = dropout_keep_prob
self.params = self.make_network_params()
def make_network_params(self):
dims = [self.in_size] + self.hid_sizes + [self.out_size]
weight_sizes = list(zip(dims[:-1], dims[1:]))
weights = [tf.Variable(self.init_weights(s), name='MLP_W_layer%i' % i)
for (i, s) in enumerate(weight_sizes)]
biases = [tf.Variable(np.zeros(s[-1]).astype(np.float32), name='MLP_b_layer%i' % i)
for (i, s) in enumerate(weight_sizes)]
network_params = {
"weights": weights,
"biases": biases,
}
return network_params
def init_weights(self, shape):
return np.sqrt(6.0 / (shape[-2] + shape[-1])) * (2 * np.random.rand(*shape).astype(np.float32) - 1)
def __call__(self, inputs):
acts = inputs
for W, b in zip(self.params["weights"], self.params["biases"]):
hid = tf.matmul(acts, tf.nn.dropout(W, self.dropout_keep_prob)) + b
acts = tf.nn.relu(hid)
last_hidden = hid
return last_hidden
class Graph():
def __init__(self, V, g):
self.V = V
self.graph = g
def addEdge(self, v, w):
# Add w to v ist.
self.graph[v].append(w)
# Add v to w list.
self.graph[w].append(v)
# A recursive function that uses visited[]
# and parent to detect cycle in subgraph
# reachable from vertex v.
def isCyclicUtil(self, v, visited, parent):
# Mark current node as visited
visited[v] = True
# Recur for all the vertices adjacent
# for this vertex
for i in self.graph[v]:
# If an adjacent is not visited,
# then recur for that adjacent
if visited[i] == False:
if self.isCyclicUtil(i, visited, v) == True:
return True
# If an adjacent is visited and not
# parent of current vertex, then there
# is a cycle.
elif i != parent:
return True
return False
# Returns true if the graph is a tree,
# else false.
def isTree(self):
# Mark all the vertices as not visited
# and not part of recursion stack
visited = [False] * self.V
# The call to isCyclicUtil serves multiple
# purposes. It returns true if graph reachable
# from vertex 0 is cyclcic. It also marks
# all vertices reachable from 0.
if self.isCyclicUtil(0, visited, -1) == True:
return False
# If we find a vertex which is not reachable
# from 0 (not marked by isCyclicUtil(),
# then we return false
for i in range(self.V):
if visited[i] == False:
return False
return True
# whether whether the graphs has no cycle or not
def check_cyclic(dataset, generated=True):
if generated:
with open("generated_smiles_%s" % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
else:
with open("all_smiles_%s.pkl" % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
tree_count=0
for smiles in all_smiles:
nodes, edges=to_graph(smiles, dataset)
edges=[(src, dst) for src, e, dst in edges]
if edges==[]:
continue
new_adj_list=defaultdict(list)
for src, dst in edges:
new_adj_list[src].append(dst)
new_adj_list[dst].append(src)
graph=Graph(len(nodes), new_adj_list)
if graph.isTree():
tree_count+=1
return len(all_smiles), tree_count
def check_sascorer(dataset):
with open('generated_smiles_%s' % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
sa_sum=0
total=0
sa_score_per_molecule=[]
for smiles in all_smiles:
new_mol=Chem.MolFromSmiles(smiles)
try:
val = sascorer.calculateScore(new_mol)
except:
continue
sa_sum+=val
sa_score_per_molecule.append(val)
total+=1
return sa_sum/total, sa_score_per_molecule
def check_logp(dataset):
with open('generated_smiles_%s' % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
logp_sum=0
total=0
logp_score_per_molecule=[]
for smiles in all_smiles:
new_mol=Chem.MolFromSmiles(smiles)
try:
val = Crippen.MolLogP(new_mol)
except:
continue
logp_sum+=val
logp_score_per_molecule.append(val)
total+=1
return logp_sum/total, logp_score_per_molecule
def check_qed(dataset):
with open('generated_smiles_%s' % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
qed_sum=0
total=0
qed_score_per_molecule=[]
for smiles in all_smiles:
new_mol=Chem.MolFromSmiles(smiles)
try:
val = QED.qed(new_mol)
except:
continue
qed_sum+=val
qed_score_per_molecule.append(val)
total+=1
return qed_sum/total, qed_score_per_molecule
def sssr_metric(dataset):
with open('generated_smiles_%s' % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
overlapped_molecule=0
for smiles in all_smiles:
new_mol=Chem.MolFromSmiles(smiles)
ssr = Chem.GetSymmSSSR(new_mol)
overlap_flag=False
for idx1 in range(len(ssr)):
for idx2 in range(idx1+1, len(ssr)):
if len(set(ssr[idx1]) & set(ssr[idx2])) > 2:
overlap_flag=True
if overlap_flag:
overlapped_molecule+=1
return overlapped_molecule/len(all_smiles)
# select the best based on shapes and probs
def select_best(all_mol):
# sort by shape
all_mol=sorted(all_mol)
best_shape=all_mol[-1][0]
all_mol=[(p, m) for s, p, m in all_mol if s==best_shape]
# sort by probs
all_mol=sorted(all_mol)
return all_mol[-1][1]
# a series util function converting sparse matrix representation to dense
def incre_adj_mat_to_dense(incre_adj_mat, num_edge_types, maximum_vertice_num):
new_incre_adj_mat=[]
for sparse_incre_adj_mat in incre_adj_mat:
dense_incre_adj_mat=np.zeros((num_edge_types, maximum_vertice_num,maximum_vertice_num))
for current, adj_list in sparse_incre_adj_mat.items():
for neighbor, edge_type in adj_list:
dense_incre_adj_mat[edge_type][current][neighbor]=1
new_incre_adj_mat.append(dense_incre_adj_mat)
return new_incre_adj_mat # [number_iteration,num_edge_types,maximum_vertice_num, maximum_vertice_num]
def distance_to_others_dense(distance_to_others, maximum_vertice_num):
new_all_distance=[]
for sparse_distances in distance_to_others:
dense_distances=np.zeros((maximum_vertice_num), dtype=int)
for x, y, d in sparse_distances:
dense_distances[y]=d
new_all_distance.append(dense_distances)
return new_all_distance # [number_iteration, maximum_vertice_num]
def overlapped_edge_features_to_dense(overlapped_edge_features, maximum_vertice_num):
new_overlapped_edge_features=[]
for sparse_overlapped_edge_features in overlapped_edge_features:
dense_overlapped_edge_features=np.zeros((maximum_vertice_num), dtype=int)
for node_in_focus, neighbor in sparse_overlapped_edge_features:
dense_overlapped_edge_features[neighbor]=1
new_overlapped_edge_features.append(dense_overlapped_edge_features)
return new_overlapped_edge_features # [number_iteration, maximum_vertice_num]
def node_sequence_to_dense(node_sequence,maximum_vertice_num):
new_node_sequence=[]
for node in node_sequence:
s=[0]*maximum_vertice_num
s[node]=1
new_node_sequence.append(s)
return new_node_sequence # [number_iteration, maximum_vertice_num]
def edge_type_masks_to_dense(edge_type_masks, maximum_vertice_num, num_edge_types):
new_edge_type_masks=[]
for mask_sparse in edge_type_masks:
mask_dense=np.zeros([num_edge_types, maximum_vertice_num])
for node_in_focus, neighbor, bond in mask_sparse:
mask_dense[bond][neighbor]=1
new_edge_type_masks.append(mask_dense)
return new_edge_type_masks #[number_iteration, 3, maximum_vertice_num]
def edge_type_labels_to_dense(edge_type_labels, maximum_vertice_num,num_edge_types):
new_edge_type_labels=[]
for labels_sparse in edge_type_labels:
labels_dense=np.zeros([num_edge_types, maximum_vertice_num])
for node_in_focus, neighbor, bond in labels_sparse:
labels_dense[bond][neighbor]= 1/float(len(labels_sparse)) # fix the probability bug here.
new_edge_type_labels.append(labels_dense)
return new_edge_type_labels #[number_iteration, 3, maximum_vertice_num]
def edge_masks_to_dense(edge_masks, maximum_vertice_num):
new_edge_masks=[]
for mask_sparse in edge_masks:
mask_dense=[0] * maximum_vertice_num
for node_in_focus, neighbor in mask_sparse:
mask_dense[neighbor]=1
new_edge_masks.append(mask_dense)
return new_edge_masks # [number_iteration, maximum_vertice_num]
def edge_labels_to_dense(edge_labels, maximum_vertice_num):
new_edge_labels=[]
for label_sparse in edge_labels:
label_dense=[0] * maximum_vertice_num
for node_in_focus, neighbor in label_sparse:
label_dense[neighbor]=1/float(len(label_sparse))
new_edge_labels.append(label_dense)
return new_edge_labels # [number_iteration, maximum_vertice_num]
|
constrained-graph-variational-autoencoder/utils.py/0
|
{
"file_path": "constrained-graph-variational-autoencoder/utils.py",
"repo_id": "constrained-graph-variational-autoencoder",
"token_count": 12760
}
| 694 |
MIT License
Copyright (c) Microsoft Corporation. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE
|
0xDeCA10B/LICENSE/0
|
{
"file_path": "0xDeCA10B/LICENSE",
"repo_id": "0xDeCA10B",
"token_count": 316
}
| 0 |
# See descriptions in `.env`.
# Use the data storage because it is helpful in development.
REACT_APP_ENABLE_SERVICE_DATA_STORE=true
# Disable online safety because it makes development more difficult.
REACT_APP_ENABLE_ONLINE_SAFETY=false
|
0xDeCA10B/demo/client/.env.development/0
|
{
"file_path": "0xDeCA10B/demo/client/.env.development",
"repo_id": "0xDeCA10B",
"token_count": 72
}
| 1 |
import Button from '@material-ui/core/Button'
import CircularProgress from '@material-ui/core/CircularProgress'
import green from '@material-ui/core/colors/green'
import Container from '@material-ui/core/Container'
import Grid from '@material-ui/core/Grid'
import InputLabel from '@material-ui/core/InputLabel'
import MenuItem from '@material-ui/core/MenuItem'
import Paper from '@material-ui/core/Paper'
import Select from '@material-ui/core/Select'
import { withStyles } from '@material-ui/core/styles'
import TextField from '@material-ui/core/TextField'
import Typography from '@material-ui/core/Typography'
import CheckIcon from '@material-ui/icons/Check'
import ClearIcon from '@material-ui/icons/Clear'
import { withSnackbar } from 'notistack'
import PropTypes from 'prop-types'
import React from 'react'
import { ContractLoader } from '../contracts/loader'
import { Encoder } from '../encoding/encoder'
import { getNetworkType, getWeb3 } from '../getWeb3'
import { OnlineSafetyValidator } from '../safety/validator'
import { ModelInformation } from '../storage/data-store'
import { DataStoreFactory } from '../storage/data-store-factory'
import { BASE_TITLE } from '../title'
import { checkStorages, renderStorageSelector } from './storageSelector'
const styles = theme => ({
root: {
...theme.mixins.gutters(),
paddingTop: theme.spacing(2),
paddingBottom: theme.spacing(2),
marginTop: theme.spacing(2),
marginBottom: theme.spacing(2),
},
form: {
paddingTop: 20,
display: 'flex',
flex: 1,
flexDirection: 'column',
},
contractStatus: {
marginTop: 30,
},
addressInput: {
maxWidth: 400,
},
input: {
},
button: {
marginTop: 20,
},
selectorLabel: {
marginTop: 8,
},
selector: {
paddingTop: theme.spacing(1),
marginBottom: 8,
},
detailsDivider: {
paddingTop: 20,
},
})
class AddDeployedModel extends React.Component {
constructor(props) {
super(props)
this.classes = props.classes
this.validator = new OnlineSafetyValidator()
this.web3 = null
// Default to local storage for storing original data.
const storageType = localStorage.getItem('storageType') || 'local'
this.storages = DataStoreFactory.getAll()
this.state = {
// The contract at the specific address is valid.
isValid: undefined,
validatingContract: false,
address: undefined,
name: undefined,
description: undefined,
modelType: 'Classifier64',
encoder: undefined,
storageType,
permittedStorageTypes: [],
}
this.save = this.save.bind(this)
this.handleInputChange = this.handleInputChange.bind(this)
}
componentDidMount = async () => {
document.title = `Add Deployed Model - ${BASE_TITLE}`
checkStorages(this.storages).then(permittedStorageTypes => {
this.setState({ permittedStorageTypes })
})
try {
this.web3 = await getWeb3()
this.contractLoader = new ContractLoader(this.web3)
} catch (error) {
this.notify("Failed to load web3, accounts, or contract. Check console for details.", { variant: 'error' })
console.error(error)
return
}
const currentUrlParams = new URLSearchParams(window.location.search)
const address = currentUrlParams.get('address')
if (address) {
this.setState({ address }, this.validateContract)
}
}
notify(...args) {
return this.props.enqueueSnackbar(...args)
}
dismissNotification(...args) {
return this.props.closeSnackbar(...args)
}
handleInputChange(event) {
const target = event.target
const value = target.type === "checkbox" ? target.checked : target.value
const name = target.name
this.setState({
[name]: value
}, _ => {
if (name === 'storageType') {
localStorage.setItem(name, value)
} else if (name === 'address') {
this.validateContract()
}
})
}
validateContract() {
this.setState({
restrictModelInfo: undefined,
isValid: undefined,
validatingContract: true,
invalidReason: undefined,
}, async () => {
const { address } = this.state
if (!address || address.length === 0) {
this.setState({
isValid: undefined,
validatingContract: false,
invalidReason: "No address was given",
})
return
}
// Make sure not already stored.
const storage = this.storages[this.state.storageType]
try {
await storage.getModel(null, address)
this.setState({
isValid: false,
validatingContract: false,
invalidReason: "A model at this address has already been recorded",
})
this.notify("A model at this address has already been recorded", { variant: 'error' })
return
} catch (_) {
// Nothing was found.
}
this.contractLoader.load(address).then(async collabTrainer => {
const restrictModelInfo = !this.validator.isPermitted(await getNetworkType(), address)
// Be careful that the name and description are not shown if content is restricted.
let name = undefined
let description = undefined
if (!restrictModelInfo) {
[name, description] = await Promise.all([
collabTrainer.name(),
collabTrainer.description()
])
}
const encoder = await collabTrainer.encoder()
this.setState({
name, description, encoder,
restrictModelInfo,
isValid: true,
validatingContract: false,
})
}).catch(err => {
console.error(err)
this.setState({
isValid: false,
validatingContract: false,
invalidReason: err.toString(),
})
this.notify(`The contract is not valid: ${err}`, { variant: 'error' })
return
})
})
}
renderContractStatus() {
let status, detailedStatus
if (this.state.validatingContract) {
status = <CircularProgress size={25} />
detailedStatus = "Checking"
} else if (this.state.isValid) {
status = <CheckIcon style={{ color: green[500] }} />
detailedStatus = "The contract is likely valid"
} else if (this.state.isValid === false) {
status = <ClearIcon color="error" />
detailedStatus = `The contract is likely invalid${this.state.invalidReason !== undefined ? `. ${this.state.invalidReason}.` : ""}`
} else {
detailedStatus = "enter a contract address"
}
return (<Grid container spacing={2}>
<Grid item>
<Typography component="p">
Contract Status:
</Typography>
</Grid>
<Grid item xs={1}>
{status}
</Grid>
<Grid item>
<Typography component="p">
{detailedStatus}
</Typography>
</Grid>
</Grid>)
}
render() {
const disableSave = !this.state.isValid
return (
<Container>
<Paper className={this.classes.root} elevation={1}>
<Typography variant="h5" component="h3">
List a deployed model
</Typography>
<Typography component="p">
Provide the address for the entry point contract that has already been deployed to a blockchain.
Then you will be prompted for other information about the contract.
</Typography>
<form className={this.classes.container} noValidate autoComplete="off">
<div className={this.classes.form} >
{this.renderContractStatus()}
<TextField
name="address"
label="Entry point address"
value={this.state.address || ""}
inputProps={{ 'aria-label': "Entry point address" }}
className={this.classes.addressInput}
margin="normal"
onChange={this.handleInputChange}
/>
<div className={this.classes.selector}>
{renderStorageSelector("Where to store the supplied meta-data about this model",
this.state.storageType, this.handleInputChange, this.state.permittedStorageTypes)}
</div>
<div className={this.classes.detailsDivider}></div>
<Typography component="p">
Provide a valid contract address before filling out the rest of the fields.
</Typography>
<TextField
name="name"
label="Model name"
value={this.state.name || ""}
inputProps={{ 'aria-label': "Model name" }}
margin="normal"
onChange={this.handleInputChange}
disabled={!this.state.isValid}
/>
<TextField
name="description"
label="Model description"
value={this.state.description || ""}
inputProps={{ 'aria-label': "Model description" }}
margin="normal"
onChange={this.handleInputChange}
disabled={!this.state.isValid}
/>
<InputLabel className={this.classes.selectorLabel} htmlFor="model-type">Model type</InputLabel>
<Select className={this.classes.selector}
onChange={this.handleInputChange}
value={this.state.modelType}
inputProps={{
name: 'modelType',
}}
disabled={!this.state.isValid}
>
<MenuItem value={"Classifier64"}>Classifier64</MenuItem>
</Select>
{(this.state.restrictModelInfo === false || Object.values(Encoder).indexOf(this.state.encoder) > -1) && <div>
<Typography variant="h6" component="h6">
Encoder: {this.state.encoder}
</Typography>
<Typography component="p">
An encoder is the method that is used to convert the input (text, image, etc.) into a machine readable format.
</Typography>
</div>}
</div>
</form>
<Button className={this.classes.button} variant="outlined" color="primary" onClick={this.save}
disabled={disableSave}
>
Save
</Button>
</Paper>
</Container>
)
}
async save() {
const { address, name, description, modelType } = this.state
// Validate
if (!name) {
this.notify("Please provide a name", { variant: 'error' })
return
}
if (modelType === undefined) {
this.notify("You must select model type", { variant: 'error' })
return
}
const modelInfo = new ModelInformation({ name, address, description, modelType })
// Save to a database.
const storage = this.storages[this.state.storageType]
storage.saveModelInformation(modelInfo).then(() => {
// Redirect
const redirectWaitS = 5
this.notify(`Saved. Will redirect in ${redirectWaitS} seconds.`, { variant: 'success' })
setTimeout(_ => {
this.props.history.push(`/model?address=${address}&metaDataLocation=${this.state.storageType}`)
}, redirectWaitS * 1000)
}).catch(err => {
console.error(err)
this.notify("There was an error saving the model information. Check the console for details.",
{ variant: 'error' })
})
}
}
AddDeployedModel.propTypes = {
classes: PropTypes.object.isRequired,
}
export default withSnackbar(withStyles(styles)(AddDeployedModel))
|
0xDeCA10B/demo/client/src/components/addDeployedModel.js/0
|
{
"file_path": "0xDeCA10B/demo/client/src/components/addDeployedModel.js",
"repo_id": "0xDeCA10B",
"token_count": 4289
}
| 2 |
pragma solidity ^0.6;
import "../../../lib/Math.sol";
import "../../../lib/SafeMath.sol";
import "../../../lib/SignedSafeMath.sol";
import {IncentiveMechanism, IncentiveMechanism64} from "./IncentiveMechanism.sol";
import {Ownable} from "../ownership/Ownable.sol";
/**
* A base class for contracts that want to reward contributions with points and no financial rewards.
*/
contract Points is Ownable, IncentiveMechanism {
using SafeMath for uint256;
/**
* A data contribution has been validated.
*/
event Refund(
/**
* The recipient of the refund which is the one who originally submitted the data contribution.
*/
address recipient
);
/**
* An award for reporting data has been issued.
*/
event Report(
/**
* The one who submitted the report.
*/
address recipient
);
constructor(
uint32 _refundWaitTimeS,
uint32 _ownerClaimWaitTimeS,
uint32 _anyAddressClaimWaitTimeS
) Ownable() IncentiveMechanism(_refundWaitTimeS, _ownerClaimWaitTimeS, _anyAddressClaimWaitTimeS) public {
}
function getNextAddDataCost() public override view returns (uint) {
return 0;
}
function getNextAddDataCost(uint /* currentTimeS */) public override view returns (uint) {
return 0;
}
}
contract Points64 is IncentiveMechanism64, Points {
using SafeMath for uint256;
using SignedSafeMath for int256;
constructor(
uint32 _refundWaitTimeS,
uint32 _ownerClaimWaitTimeS,
uint32 _anyAddressClaimWaitTimeS
) Points(_refundWaitTimeS, _ownerClaimWaitTimeS, _anyAddressClaimWaitTimeS) public {
}
function getNextAddDataCost(int64[] memory /* data */, uint64 /* classification */)
public override view
returns (uint) {
return 0;
}
function handleAddData(uint /* msgValue */, int64[] memory /* data */, uint64 /* classification */)
public override onlyOwner returns (uint cost) {
cost = 0;
totalSubmitted = totalSubmitted.add(1);
}
function handleRefund(
address submitter,
int64[] memory /* data */, uint64 classification,
uint addedTime,
uint claimableAmount, bool claimedBySubmitter,
uint64 prediction,
uint numClaims)
public override onlyOwner
returns (uint refundAmount) {
// `claimableAmount` should be 0.
refundAmount = claimableAmount;
require(numClaims == 0, "Already claimed.");
require(!claimedBySubmitter, "Already claimed by submitter.");
require(now - addedTime >= refundWaitTimeS, "Not enough time has passed."); // solium-disable-line security/no-block-members
require(prediction == classification, "The model doesn't agree with your contribution.");
addressStats[submitter].numValid += 1;
totalGoodDataCount = totalGoodDataCount.add(1);
emit Refund(submitter);
}
function handleReport(
address reporter,
int64[] memory /* data */, uint64 classification,
uint addedTime, address originalAuthor,
uint /* initialDeposit */, uint claimableAmount, bool claimedByReporter,
uint64 prediction,
uint numClaims)
public override onlyOwner
returns (uint rewardAmount) {
// `claimableAmount` should be 0.
rewardAmount = claimableAmount;
uint timeSinceAddedS = now - addedTime; // solium-disable-line security/no-block-members
require(
timeSinceAddedS >= refundWaitTimeS ||
timeSinceAddedS >= anyAddressClaimWaitTimeS ||
(timeSinceAddedS >= ownerClaimWaitTimeS && reporter == owner),
"Cannot be claimed yet.");
require(numClaims == 0, "Already claimed.");
require(reporter != originalAuthor, "Cannot report yourself.");
require(!claimedByReporter, "Already claimed by reporter.");
require(prediction != classification, "The model should not agree with the contribution.");
emit Report(reporter);
}
}
|
0xDeCA10B/demo/client/src/contracts/incentive/Points.sol/0
|
{
"file_path": "0xDeCA10B/demo/client/src/contracts/incentive/Points.sol",
"repo_id": "0xDeCA10B",
"token_count": 1526
}
| 3 |
const fs = require('fs')
const DensePerceptron = artifacts.require("./classification/DensePerceptron")
const NaiveBayesClassifier = artifacts.require("./classification/NaiveBayesClassifier")
const NearestCentroidClassifier = artifacts.require("./classification/NearestCentroidClassifier")
const SparseNearestCentroidClassifier = artifacts.require("./classification/SparseNearestCentroidClassifier")
const SparsePerceptron = artifacts.require("./classification/SparsePerceptron")
const { convertData, convertNum } = require('../float-utils-node')
const _toFloat = 1E9
async function deployDensePerceptron(model, web3, options) {
const { toFloat, initialChunkSize = 450, chunkSize = 450 } = options
let gasUsed = 0
const { classifications } = model
const weights = convertData(model.weights, web3, toFloat)
const intercept = convertNum(model.intercept || model.bias, web3, toFloat)
const learningRate = convertNum(model.learningRate || 1, web3, toFloat)
// TODO Handle feature indices.
if (model.featureIndices) {
throw new Error("featureIndices are not supported yet.")
}
console.log(` Deploying Dense Perceptron classifier with first ${Math.min(weights.length, initialChunkSize)} weights.`)
const classifierContract = await DensePerceptron.new(classifications, weights.slice(0, initialChunkSize), intercept, learningRate)
gasUsed += (await web3.eth.getTransactionReceipt(classifierContract.transactionHash)).gasUsed
// Add remaining weights.
for (let i = initialChunkSize; i < weights.length; i += chunkSize) {
// Do not parallelize so that weights are set in order.
const r = await classifierContract.initializeWeights(weights.slice(i, i + chunkSize))
console.debug(` Added classifier weights [${i}, ${Math.min(i + chunkSize, weights.length)}). gasUsed: ${r.receipt.gasUsed}`)
gasUsed += r.receipt.gasUsed
}
console.log(` Deployed Dense Perceptron classifier to ${classifierContract.address}. gasUsed: ${gasUsed}`)
return {
classifierContract,
gasUsed,
}
}
async function deploySparsePerceptron(model, web3, options) {
const { toFloat, initialChunkSize = 300, chunkSize = 300 } = options
const { classifications } = model
const weights = convertData(model.weights, web3, toFloat)
const intercept = convertNum(model.intercept || model.bias, web3, toFloat)
const learningRate = convertNum(model.learningRate || 1, web3, toFloat)
const sparseWeights = []
// TODO Handle feature indices.
if (model.featureIndices) {
throw new Error("featureIndices are not supported yet.")
}
if (typeof model.sparseWeights === 'object') {
for (let [featureIndexKey, weight] of Object.entries(model.sparseWeights)) {
const featureIndex = parseInt(featureIndexKey, 10)
sparseWeights.push([featureIndex, convertNum(weight, web3, toFloat)])
}
}
console.log(` Deploying Sparse Perceptron classifier with first ${Math.min(weights.length, initialChunkSize)} weights...`)
const classifierContract = await SparsePerceptron.new(classifications, weights.slice(0, initialChunkSize), intercept, learningRate)
let gasUsed = (await web3.eth.getTransactionReceipt(classifierContract.transactionHash)).gasUsed
console.log(` Deployed Sparse Perceptron classifier with first ${Math.min(weights.length, initialChunkSize)} weights. gasUsed: ${gasUsed}`)
// Add remaining weights.
for (let i = initialChunkSize; i < weights.length; i += chunkSize) {
const r = await classifierContract.initializeWeights(i, weights.slice(i, i + chunkSize))
console.debug(` Added classifier weights [${i}, ${Math.min(i + chunkSize, weights.length)}) gasUsed: ${r.receipt.gasUsed}`)
gasUsed += r.receipt.gasUsed
}
const sparseWeightsChunkSize = Math.round(chunkSize / 2)
for (let i = 0; i < sparseWeights.length; i += sparseWeightsChunkSize) {
const r = await classifierContract.initializeSparseWeights(
sparseWeights.slice(i, i + sparseWeightsChunkSize))
console.debug(` Added sparse classifier weights [${i},${Math.min(i + sparseWeightsChunkSize, sparseWeights.length)}) out of ${sparseWeights.length}. gasUsed: ${r.receipt.gasUsed}`)
gasUsed += r.receipt.gasUsed
}
console.log(` Deployed Sparse Perceptron classifier to ${classifierContract.address}. gasUsed: ${gasUsed}`)
return {
classifierContract,
gasUsed,
}
}
async function deployNearestCentroidClassifier(model, web3, options) {
const { toFloat } = options
let gasUsed = 0
const classifications = []
const centroids = []
const dataCounts = []
// TODO Allow chunking centroids.
console.log(" Deploying Dense Nearest Centroid Classifier model.")
let numDimensions = null
for (let [classification, centroidInfo] of Object.entries(model.centroids || model.intents)) {
classifications.push(classification)
centroids.push(convertData(centroidInfo.centroid, web3, toFloat))
dataCounts.push(centroidInfo.dataCount)
if (numDimensions === null) {
numDimensions = centroidInfo.centroid.length
} else {
if (centroidInfo.centroid.length !== numDimensions) {
throw new Error(`Found a centroid with ${centroidInfo.centroid.length} dimensions. Expected: ${numDimensions}.`)
}
}
}
const classifierContract = await NearestCentroidClassifier.new(
[classifications[0]], [centroids[0]], [dataCounts[0]]
)
gasUsed += (await web3.eth.getTransactionReceipt(classifierContract.transactionHash)).gasUsed
console.log(` Deployed classifier to ${classifierContract.address}. gasUsed: ${gasUsed}`)
// Add classes separately to avoid hitting gasLimit.
const addClassPromises = []
for (let i = 1; i < classifications.length; ++i) {
addClassPromises.push(classifierContract.addClass(centroids[i], classifications[i], dataCounts[i]))
}
return Promise.all(addClassPromises).then(responses => {
console.debug(" All classes added.")
for (const r of responses) {
gasUsed += r.receipt.gasUsed
}
return {
classifierContract,
gasUsed,
}
})
}
exports.deploySparseNearestCentroidClassifier = async function (model, web3, options) {
const { toFloat, initialChunkSize = 200, chunkSize = 250 } = options
let gasUsed = 0
const classifications = []
const centroids = []
const dataCounts = []
console.log(" Deploying Sparse Nearest Centroid Classifier model.")
for (let [classification, centroidInfo] of Object.entries(model.centroids || model.intents)) {
classifications.push(classification)
const centroid = Object.entries(centroidInfo.centroid).map(([featureIndex, value]) => [parseInt(featureIndex, 10), convertNum(value, web3, toFloat)])
centroids.push(centroid)
dataCounts.push(centroidInfo.dataCount)
}
const classifierContract = await SparseNearestCentroidClassifier.new(
[classifications[0]], [centroids[0].slice(0, initialChunkSize)], [dataCounts[0]]
)
gasUsed += (await web3.eth.getTransactionReceipt(classifierContract.transactionHash)).gasUsed
console.log(` Deployed classifier to ${classifierContract.address}. gasUsed: ${gasUsed}`)
// Add classes separately to avoid hitting gasLimit.
const addClassPromises = []
for (let i = 1; i < classifications.length; ++i) {
addClassPromises.push(classifierContract.addClass(
centroids[i].slice(0, initialChunkSize), classifications[i], dataCounts[i]
).then(r => {
console.debug(` Added class ${i}. gasUsed: ${r.receipt.gasUsed}`)
return r
}))
}
return Promise.all(addClassPromises).then(async responses => {
console.debug(" All classes added.")
for (const r of responses) {
gasUsed += r.receipt.gasUsed
}
console.debug(" Adding remaining dimensions.")
for (let classification = 0; classification < classifications.length; ++classification) {
for (let j = initialChunkSize; j < centroids[classification].length; j += chunkSize) {
// Not parallel since order matters within each classification.
const r = await classifierContract.extendCentroid(
centroids[classification].slice(j, j + chunkSize), classification)
console.debug(` Added dimensions [${j}, ${Math.min(j + chunkSize, centroids[classification].length)}) for class ${classification}. gasUsed: ${r.receipt.gasUsed}`)
gasUsed += r.receipt.gasUsed
}
}
console.log(` Set all centroids. gasUsed: ${gasUsed}.`)
return {
classifierContract,
gasUsed,
}
})
}
async function deployNaiveBayes(model, web3, options) {
const { toFloat, initialChunkSize = 150, chunkSize = 350 } = options
let gasUsed = 0
const { classifications, classCounts, featureCounts, totalNumFeatures } = model
const smoothingFactor = convertNum(model.smoothingFactor, web3, toFloat)
console.log(` Deploying Naive Bayes classifier.`)
const classifierContract = await NaiveBayesClassifier.new([classifications[0]], [classCounts[0]], [featureCounts[0].slice(0, initialChunkSize)], totalNumFeatures, smoothingFactor)
gasUsed += (await web3.eth.getTransactionReceipt(classifierContract.transactionHash)).gasUsed
const addClassPromises = []
for (let i = 1; i < classifications.length; ++i) {
addClassPromises.push(classifierContract.addClass(
classCounts[i], featureCounts[i].slice(0, initialChunkSize), classifications[i]
).then(r => {
console.debug(` Added class ${i}. gasUsed: ${r.receipt.gasUsed}`)
return r
}))
}
return Promise.all(addClassPromises).then(async responses => {
for (const r of responses) {
gasUsed += r.receipt.gasUsed
}
// Add remaining feature counts.
// Tried with promises but got weird unhelpful errors from Truffle (some were like network timeout errors).
for (let classification = 0; classification < classifications.length; ++classification) {
for (let j = initialChunkSize; j < featureCounts[classification].length; j += chunkSize) {
const r = await classifierContract.initializeCounts(
featureCounts[classification].slice(j, j + chunkSize), classification
)
console.debug(` Added features [${j}, ${Math.min(j + chunkSize, featureCounts[classification].length)}) for class ${classification}. gasUsed: ${r.receipt.gasUsed}`)
gasUsed += r.receipt.gasUsed
}
}
console.debug(` Deployed all Naive Bayes classifier classes. gasUsed: ${gasUsed}.`)
return {
classifierContract,
gasUsed,
}
})
}
/**
* @param model A model object or a string for the path to a JSON model file.
* @returns The contract for the model, an instance of `Classifier64`
* along with the the total amount of gas used to deploy the model.
*/
exports.deployModel = async function (model, web3, options) {
if (typeof model === 'string') {
model = JSON.parse(fs.readFileSync(model, 'utf8'))
}
if (options.toFloat === undefined) {
options.toFloat = _toFloat
}
switch (model.type) {
case 'dense perceptron':
return deployDensePerceptron(model, web3, options)
case 'naive bayes':
return deployNaiveBayes(model, web3, options)
case 'dense nearest centroid classifier':
case 'nearest centroid classifier':
return deployNearestCentroidClassifier(model, web3, options)
case 'sparse nearest centroid classifier':
return exports.deploySparseNearestCentroidClassifier(model, web3, options)
case 'sparse perceptron':
return deploySparsePerceptron(model, web3, options)
default:
// Should not happen.
throw new Error(`Unrecognized model type: "${model.type}"`)
}
}
|
0xDeCA10B/demo/client/src/ml-models/deploy-model-node.js/0
|
{
"file_path": "0xDeCA10B/demo/client/src/ml-models/deploy-model-node.js",
"repo_id": "0xDeCA10B",
"token_count": 3743
}
| 4 |
export default {
verified:
[
{
// Example
network: 'private',
address: '0x1b88938102bE9ED97a0e9b8Cb321dD89C60e86Ab'
},
]
}
|
0xDeCA10B/demo/client/src/safety/config.ts/0
|
{
"file_path": "0xDeCA10B/demo/client/src/safety/config.ts",
"repo_id": "0xDeCA10B",
"token_count": 83
}
| 5 |
const { convertNum } = require('../../../src/float-utils-node')
const { deploySparseNearestCentroidClassifier } = require('../../../src/ml-models/deploy-model-node')
const { assertCloseToNumbers, assertEqualNumbers } = require('../../float-test-utils-node')
contract('SparseNearestCentroidClassifier', function (accounts) {
const toFloat = 1E9
let classifier
function parseBN(num) {
if (web3.utils.isBN(num)) {
return num.toNumber()
} else {
assert.typeOf(num, 'number')
return num
}
}
function parseFloatBN(bn) {
assert(web3.utils.isBN(bn), `${bn} is not a BN`)
// Can't divide first since a BN can only be an integer.
try {
return bn.toNumber() / toFloat
} catch (err) {
console.error("Error converting %s", bn)
throw err
}
}
before("deploy classifier", async function () {
const model = {
intents: {
ALARM: {
centroid: { 0: +1 },
dataCount: 2,
},
WEATHER: {
centroid: { 1: +1 },
dataCount: 2
}
}
}
classifier = (await deploySparseNearestCentroidClassifier(model, web3, { toFloat })).classifierContract
})
it("...should get the classifications", function () {
const expectedClassifications = ["ALARM", "WEATHER"]
return classifier.getNumClassifications().then(parseBN).then(numClassifications => {
assert.equal(numClassifications, expectedClassifications.length, "Number of classifications is wrong")
let promises = expectedClassifications.map((_, i) => {
return classifier.classifications(i)
})
return Promise.all(promises).then(results => {
assert.deepEqual(results, expectedClassifications, "Wrong classifications")
})
})
})
it("...should get the squared magnitudes", async function () {
const squaredMagnitude0 = await classifier.getSquaredMagnitude(0)
let expected = web3.utils.toBN(toFloat).mul(web3.utils.toBN(toFloat))
assert(squaredMagnitude0.eq(expected), `${squaredMagnitude0} != ${expected}`)
const squaredMagnitude1 = await classifier.getSquaredMagnitude(1)
expected = web3.utils.toBN(toFloat).mul(web3.utils.toBN(toFloat))
assert(squaredMagnitude1.eq(expected), `${squaredMagnitude1} != ${expected}`)
})
it("...should predict the classification", async function () {
const data = [0]
const prediction = await classifier.predict(data)
assert.equal(prediction, 0, "Wrong classification")
})
it("...should predict the classification", async function () {
const data = [1]
const prediction = await classifier.predict(data)
assert.equal(prediction, 1, "Wrong classification")
})
it("...should train", async function () {
const data = [1, 2]
const classification = 1
const numDimensions = 3
const promises = []
for (let dimension = 0; dimension < numDimensions; ++dimension) {
promises.push(classifier.getCentroidValue(classification, dimension).then(parseFloatBN))
}
const originalCentroidValues = await Promise.all(promises)
const originalSquaredMagnitude = originalCentroidValues.reduce((prev, current) => {
return prev + current * current
}, 0)
assertEqualNumbers(await classifier.getSquaredMagnitude(classification), web3.utils.toBN(originalSquaredMagnitude).mul(web3.utils.toBN(toFloat)).mul(web3.utils.toBN(toFloat)), web3, "original squared magnitude")
let expectedUpdatedSquaredMagnitude = 0
const originalDataCount = await classifier.getNumSamples(classification).then(parseBN)
await classifier.update(data, classification)
return classifier.getNumSamples(classification).then(parseBN).then(async dataCount => {
assert.equal(dataCount, originalDataCount + 1, "Wrong data count.")
for (let dimension = 0; dimension < numDimensions; ++dimension) {
const v = await classifier.getCentroidValue(classification, dimension).then(parseFloatBN)
expectedUpdatedSquaredMagnitude += v * v
const update = data.indexOf(dimension) >= 0 ? 1 : 0
assert.closeTo(v, (originalCentroidValues[dimension] * originalDataCount + update) / dataCount, 1 / toFloat,
`value for centroid[${dimension}]`)
}
const updatedSquaredMagnitude = await classifier.getSquaredMagnitude(classification)
assertCloseToNumbers(updatedSquaredMagnitude, expectedUpdatedSquaredMagnitude * toFloat * toFloat, toFloat, web3, "updated squared magnitude")
})
})
it("...should train with updating non-zero centroid value", async function () {
const data = [1, 2]
const classification = 0
const numDimensions = 3
const promises = []
for (let dimension = 0; dimension < numDimensions; ++dimension) {
promises.push(classifier.getCentroidValue(classification, dimension).then(parseFloatBN))
}
const originalCentroidValues = await Promise.all(promises)
const originalSquaredMagnitude = originalCentroidValues.reduce((prev, current) => {
return prev + current * current
}, 0)
assertEqualNumbers(await classifier.getSquaredMagnitude(classification), web3.utils.toBN(originalSquaredMagnitude).mul(web3.utils.toBN(toFloat)).mul(web3.utils.toBN(toFloat)), web3, "original squared magnitude")
const originalDataCount = await classifier.getNumSamples(classification).then(parseBN)
await classifier.update(data, classification)
let expectedUpdatedSquaredMagnitude = 0
return classifier.getNumSamples(classification).then(parseBN).then(async dataCount => {
assert.equal(dataCount, originalDataCount + 1, "Wrong data count.")
for (let dimension = 0; dimension < numDimensions; ++dimension) {
const v = await classifier.getCentroidValue(classification, dimension).then(parseFloatBN)
expectedUpdatedSquaredMagnitude += v * v
const update = data.indexOf(dimension) >= 0 ? 1 : 0
assert.closeTo(v, (originalCentroidValues[dimension] * originalDataCount + update) / dataCount, 1 / toFloat,
`value for centroid[${dimension}]`)
}
assertCloseToNumbers(await classifier.getSquaredMagnitude(classification), expectedUpdatedSquaredMagnitude * toFloat * toFloat, toFloat, web3, "updated squared magnitude")
})
})
it("...should train with new feature", async function () {
const data = [4]
const classification = 1
const numDimensions = 5
const promises = []
for (let dimension = 0; dimension < numDimensions; ++dimension) {
promises.push(classifier.getCentroidValue(classification, dimension).then(parseFloatBN))
}
const originalCentroidValues = await Promise.all(promises)
const originalSquaredMagnitude = originalCentroidValues.reduce((prev, current) => {
return prev + current * current
}, 0)
assertCloseToNumbers(await classifier.getSquaredMagnitude(classification), web3.utils.toBN(originalSquaredMagnitude * toFloat * toFloat),
toFloat, web3, "original squared magnitude")
const originalDataCount = await classifier.getNumSamples(classification).then(parseBN)
await classifier.update(data, classification)
let expectedUpdatedSquaredMagnitude = 0
return classifier.getNumSamples(classification).then(parseBN).then(async dataCount => {
assert.equal(dataCount, originalDataCount + 1, "Wrong data count.")
for (let dimension = 0; dimension < numDimensions; ++dimension) {
const v = await classifier.getCentroidValue(classification, dimension).then(parseFloatBN)
expectedUpdatedSquaredMagnitude += v * v
const update = data.indexOf(dimension) >= 0 ? 1 : 0
assert.closeTo(v, (originalCentroidValues[dimension] * originalDataCount + update) / dataCount, 1 / toFloat,
`value for centroid[${dimension}]`)
}
assertCloseToNumbers(await classifier.getSquaredMagnitude(classification), expectedUpdatedSquaredMagnitude * toFloat * toFloat, toFloat, web3, "updated squared magnitude")
})
})
it("...should add class", async function () {
const centroid = [[0, 0], [1, 0], [2, +1]]
const newClassificationName = "NEW"
const dataCount = 2
const originalNumClassifications = await classifier.getNumClassifications().then(parseBN)
const info = await classifier.addClass(centroid.map(f => [f[0], convertNum(f[1], web3, toFloat)]), newClassificationName, dataCount)
const events = info.logs.filter(l => l.event == 'AddClass')
assert.lengthOf(events, 1)
const event = events[0]
assert.equal(event.args.name, newClassificationName)
const newClassificationIndex = parseBN(event.args.index)
assert.equal(newClassificationIndex, originalNumClassifications)
const newNumClassifications = await classifier.getNumClassifications().then(parseBN)
assert.equal(newNumClassifications, originalNumClassifications + 1)
const className = await classifier.classifications(newClassificationIndex)
assert.equal(className, newClassificationName)
const foundDataCount = await classifier.getNumSamples(newClassificationIndex).then(parseBN)
assert.equal(foundDataCount, dataCount)
})
it("...should extend centroid", async function () {
const classification = 0
const extension = [[5, 1.5], [7, 2.5]]
const originalCentroidValues = await Promise.all([...Array(3).keys()].map(dimension => {
return classifier.getCentroidValue(classification, dimension).then(parseFloatBN)
}))
const expectedCentroidValues = Array.prototype.concat(originalCentroidValues, [0, 0, 1.5, 0, 2.5])
await classifier.extendCentroid(extension.map(f => [f[0], convertNum(f[1], web3, toFloat)]), classification)
for (let dimension = 0; dimension < expectedCentroidValues.length; ++dimension) {
const v = await classifier.getCentroidValue(classification, dimension).then(parseFloatBN)
assert.closeTo(v, expectedCentroidValues[dimension], 1 / toFloat, `value for centroid[${dimension}]`)
}
})
})
|
0xDeCA10B/demo/client/test/contracts/classification/sparsenearestcentroidclassifier.js/0
|
{
"file_path": "0xDeCA10B/demo/client/test/contracts/classification/sparsenearestcentroidclassifier.js",
"repo_id": "0xDeCA10B",
"token_count": 3112
}
| 6 |
from decai.simulation.contract.classification.ncc import NearestCentroidClassifier
from decai.simulation.contract.classification.scikit_classifier import SciKitClassifierModule
class NearestCentroidClassifierModule(SciKitClassifierModule):
def __init__(self):
super().__init__(
_model_initializer=NearestCentroidClassifier)
|
0xDeCA10B/simulation/decai/simulation/contract/classification/ncc_module.py/0
|
{
"file_path": "0xDeCA10B/simulation/decai/simulation/contract/classification/ncc_module.py",
"repo_id": "0xDeCA10B",
"token_count": 114
}
| 7 |
from dataclasses import dataclass
from logging import Logger
from typing import List
import numpy as np
from injector import Binder, inject, Module
from decai.simulation.data.data_loader import DataLoader
@inject
@dataclass
class SimpleDataLoader(DataLoader):
"""
Load simple data for testing.
"""
_logger: Logger
def classifications(self) -> List[str]:
return ["0", "1"]
def load_data(self, train_size: int = None, test_size: int = None) -> (tuple, tuple):
def _ground_truth(data):
if data[0] * data[2] > 0:
return 1
else:
return 0
x_train = np.array([
[0, 0, 0],
[1, 1, 1],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[0, 0, 2],
[0, 2, 0],
[2, 0, 0],
[2, 0, 2],
[0, 0, -3],
[0, 3, 0],
[0, 3, -3],
[0, -3, 3],
[0, 0, 4],
[0, 4, 4],
[4, 0, 0],
[-6, 0, 0],
])
x_test = np.array([
[0, 2, 2],
[0, 1, -1],
[-1, 0, 0],
[0, -1, 0],
[1, -1, 2],
[0, 0, 3],
[0, -2, 0],
[0, 2, -2],
[3, 0, 0],
[-2, 0, 2],
[2, -2, 0],
])
if train_size is not None:
x_train = x_train[:train_size]
if test_size is not None:
x_test = x_test[:test_size]
y_train = [_ground_truth(x) for x in x_train]
y_test = [_ground_truth(x) for x in x_test]
return (x_train, y_train), (x_test, y_test)
class SimpleDataModule(Module):
"""
Set up a `DataLoader` mainly for testing.
"""
def configure(self, binder: Binder):
binder.bind(DataLoader, to=SimpleDataLoader)
|
0xDeCA10B/simulation/decai/simulation/data/simple_data_loader.py/0
|
{
"file_path": "0xDeCA10B/simulation/decai/simulation/data/simple_data_loader.py",
"repo_id": "0xDeCA10B",
"token_count": 1141
}
| 8 |
import json
import os
import random
import sys
from collections import Counter
from typing import cast
import math
import numpy as np
from injector import inject, Injector
from decai.simulation.contract.classification.classifier import Classifier
from decai.simulation.contract.classification.decision_tree import DecisionTreeModule
from decai.simulation.contract.collab_trainer import DefaultCollaborativeTrainerModule
from decai.simulation.contract.incentive.stakeable import StakeableImModule
from decai.simulation.data.data_loader import DataLoader
from decai.simulation.data.ttt_data_loader import TicTacToeDataModule, TicTacToeDataLoader
from decai.simulation.logging_module import LoggingModule
from decai.simulation.simulate import Agent, Simulator
# For `bokeh serve`.
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
class Runner(object):
@inject
def __init__(self,
data: DataLoader,
simulator: Simulator,
):
self._data = data
self._s = simulator
def run(self):
init_train_data_portion = 0.10
# Set up the agents that will act in the simulation.
agents = [
# Good
Agent(address="Good",
start_balance=10_000,
mean_deposit=5,
stdev_deposit=1,
mean_update_wait_s=10 * 60,
),
# Malicious: determined with the goal of disrupting others.
Agent(address="Bad",
start_balance=10_000,
mean_deposit=10,
stdev_deposit=3,
mean_update_wait_s=1 * 60 * 60,
good=False,
),
]
# Start the simulation.
self._s.simulate(agents,
baseline_accuracy=0.44,
init_train_data_portion=init_train_data_portion,
accuracy_plot_wait_s=math.inf,
)
# Run with `bokeh serve PATH`.
if __name__.startswith('bk_script_'):
# Set up the data, model, and incentive mechanism.
inj = Injector([
DecisionTreeModule,
DefaultCollaborativeTrainerModule,
LoggingModule,
StakeableImModule,
TicTacToeDataModule,
])
inj.get(Runner).run()
def _map_pos(tic_tac_toe, board, pos):
assert 0 <= pos < board.size
return pos // tic_tac_toe.width, pos % tic_tac_toe.width
def play_game(classifier, tic_tac_toe):
board = np.zeros((tic_tac_toe.width, tic_tac_toe.length), dtype=np.int8)
if random.random() < 0.5:
# Machine is playing.
pos = classifier.predict(board.flatten())
board[_map_pos(tic_tac_toe, board, pos)] = 1
m = {0: '#', 1: 'O', -1: 'X'}
map_symbols = np.vectorize(lambda x: m[x])
def print_board(b):
print(np.array2string(map_symbols(b), formatter={'str_kind': lambda x: x}))
print(f"The machine is O. You are X.\nPositions:\n{np.arange(board.size).reshape(board.shape)}")
while True:
if np.count_nonzero(board) == board.size:
print("TIE")
break
# Person's turn.
print_board(board)
while True:
pos = input("Where would you like to go?")
pos = _map_pos(tic_tac_toe, board, int(pos.strip()))
if board[pos] == 0:
board[pos] = -1
break
else:
print("There is already a value there.")
winner = tic_tac_toe.get_winner(board)
if winner is not None:
print("You WIN!")
break
# Machine's turn.
original_pos = classifier.predict(board.flatten())
pos = _map_pos(tic_tac_toe, board, original_pos)
if board[pos] != 0:
print(f"Machine picked a spot that already has a marker ({original_pos}). This probably means a draw.")
print_board(board)
break
board[pos] = 1
winner = tic_tac_toe.get_winner(board)
if winner is not None:
print("You lose :(")
break
print_board(board)
def evaluate_on_self(classifier, tic_tac_toe):
print("Evaluating by playing against itself.")
def _run_game(board, next_player):
if next_player == -1:
# Flip the board since the bot always thinks it is 1.
board_for_prediction = -board
else:
board_for_prediction = board
pos = classifier.predict(board_for_prediction.flatten())
pos = _map_pos(tic_tac_toe, board, pos)
if board[pos] != 0:
return "TIE", np.count_nonzero(board == next_player)
board[pos] = next_player
if tic_tac_toe.get_winner(board):
return next_player, np.count_nonzero(board == next_player)
else:
return _run_game(board, -1 if next_player == 1 else 1)
# Start with empty board and let the model pick where to start.
board = np.zeros((tic_tac_toe.width, tic_tac_toe.length), dtype=np.int8)
winner, num_moves = _run_game(board, 1)
if winner == 1:
print(f"When model starts: WINS in {num_moves} moves.")
elif isinstance(winner, str):
print(f"When model starts: {winner} in {num_moves} moves.")
else:
print(f"When model starts: LOSES. Winner has {num_moves} moves.")
winners = Counter()
winner_move_counts = []
for start_pos in range(board.size):
board = np.zeros((tic_tac_toe.width, tic_tac_toe.length), dtype=np.int8)
board[_map_pos(tic_tac_toe, board, start_pos)] = -1
winner, num_moves = _run_game(board, 1)
winners[winner] += 1
winner_move_counts.append(num_moves)
print("Winners when -1 starts in each position:")
print(json.dumps(winners, indent=2))
print(f"Winner move counts:\n{winner_move_counts}")
print(f"Avg # winner moves: {np.average(winner_move_counts)}")
if __name__ == '__main__':
# Play the game.
inj = Injector([
DecisionTreeModule,
DefaultCollaborativeTrainerModule,
LoggingModule,
StakeableImModule,
TicTacToeDataModule,
])
ttt = inj.get(DataLoader)
assert isinstance(ttt, TicTacToeDataLoader)
ttt = cast(TicTacToeDataLoader, ttt)
# To train on all data.
# ttt._train_split = 1
(x_train, y_train), (x_test, y_test) = ttt.load_data()
c = inj.get(Classifier)
c.init_model(x_train, y_train)
score = c.evaluate(x_train, y_train)
print(f"Evaluation on training data: {score}")
if len(x_test) > 0:
score = c.evaluate(x_test, y_test)
print(f"Evaluation on test data: {score}")
evaluate_on_self(c, ttt)
while True:
play_game(c, ttt)
|
0xDeCA10B/simulation/decai/simulation/simulate_ttt_dt.py/0
|
{
"file_path": "0xDeCA10B/simulation/decai/simulation/simulate_ttt_dt.py",
"repo_id": "0xDeCA10B",
"token_count": 3151
}
| 9 |
Attribution 4.0 International
=======================================================================
Creative Commons Corporation ("Creative Commons") is not a law firm and
does not provide legal services or legal advice. Distribution of
Creative Commons public licenses does not create a lawyer-client or
other relationship. Creative Commons makes its licenses and related
information available on an "as-is" basis. Creative Commons gives no
warranties regarding its licenses, any material licensed under their
terms and conditions, or any related information. Creative Commons
disclaims all liability for damages resulting from their use to the
fullest extent possible.
Using Creative Commons Public Licenses
Creative Commons public licenses provide a standard set of terms and
conditions that creators and other rights holders may use to share
original works of authorship and other material subject to copyright
and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.
Considerations for licensors: Our public licenses are
intended for use by those authorized to give the public
permission to use material in ways otherwise restricted by
copyright and certain other rights. Our licenses are
irrevocable. Licensors should read and understand the terms
and conditions of the license they choose before applying it.
Licensors should also secure all rights necessary before
applying our licenses so that the public can reuse the
material as expected. Licensors should clearly mark any
material not subject to the license. This includes other CC-
licensed material, or material used under an exception or
limitation to copyright. More considerations for licensors:
wiki.creativecommons.org/Considerations_for_licensors
Considerations for the public: By using one of our public
licenses, a licensor grants the public permission to use the
licensed material under specified terms and conditions. If
the licensor's permission is not necessary for any reason--for
example, because of any applicable exception or limitation to
copyright--then that use is not regulated by the license. Our
licenses grant only permissions under copyright and certain
other rights that a licensor has authority to grant. Use of
the licensed material may still be restricted for other
reasons, including because others have copyright or other
rights in the material. A licensor may make special requests,
such as asking that all changes be marked or described.
Although not required by our licenses, you are encouraged to
respect those requests where reasonable. More_considerations
for the public:
wiki.creativecommons.org/Considerations_for_licensees
=======================================================================
Creative Commons Attribution 4.0 International Public License
By exercising the Licensed Rights (defined below), You accept and agree
to be bound by the terms and conditions of this Creative Commons
Attribution 4.0 International Public License ("Public License"). To the
extent this Public License may be interpreted as a contract, You are
granted the Licensed Rights in consideration of Your acceptance of
these terms and conditions, and the Licensor grants You such rights in
consideration of benefits the Licensor receives from making the
Licensed Material available under these terms and conditions.
Section 1 -- Definitions.
a. Adapted Material means material subject to Copyright and Similar
Rights that is derived from or based upon the Licensed Material
and in which the Licensed Material is translated, altered,
arranged, transformed, or otherwise modified in a manner requiring
permission under the Copyright and Similar Rights held by the
Licensor. For purposes of this Public License, where the Licensed
Material is a musical work, performance, or sound recording,
Adapted Material is always produced where the Licensed Material is
synched in timed relation with a moving image.
b. Adapter's License means the license You apply to Your Copyright
and Similar Rights in Your contributions to Adapted Material in
accordance with the terms and conditions of this Public License.
c. Copyright and Similar Rights means copyright and/or similar rights
closely related to copyright including, without limitation,
performance, broadcast, sound recording, and Sui Generis Database
Rights, without regard to how the rights are labeled or
categorized. For purposes of this Public License, the rights
specified in Section 2(b)(1)-(2) are not Copyright and Similar
Rights.
d. Effective Technological Measures means those measures that, in the
absence of proper authority, may not be circumvented under laws
fulfilling obligations under Article 11 of the WIPO Copyright
Treaty adopted on December 20, 1996, and/or similar international
agreements.
e. Exceptions and Limitations means fair use, fair dealing, and/or
any other exception or limitation to Copyright and Similar Rights
that applies to Your use of the Licensed Material.
f. Licensed Material means the artistic or literary work, database,
or other material to which the Licensor applied this Public
License.
g. Licensed Rights means the rights granted to You subject to the
terms and conditions of this Public License, which are limited to
all Copyright and Similar Rights that apply to Your use of the
Licensed Material and that the Licensor has authority to license.
h. Licensor means the individual(s) or entity(ies) granting rights
under this Public License.
i. Share means to provide material to the public by any means or
process that requires permission under the Licensed Rights, such
as reproduction, public display, public performance, distribution,
dissemination, communication, or importation, and to make material
available to the public including in ways that members of the
public may access the material from a place and at a time
individually chosen by them.
j. Sui Generis Database Rights means rights other than copyright
resulting from Directive 96/9/EC of the European Parliament and of
the Council of 11 March 1996 on the legal protection of databases,
as amended and/or succeeded, as well as other essentially
equivalent rights anywhere in the world.
k. You means the individual or entity exercising the Licensed Rights
under this Public License. Your has a corresponding meaning.
Section 2 -- Scope.
a. License grant.
1. Subject to the terms and conditions of this Public License,
the Licensor hereby grants You a worldwide, royalty-free,
non-sublicensable, non-exclusive, irrevocable license to
exercise the Licensed Rights in the Licensed Material to:
a. reproduce and Share the Licensed Material, in whole or
in part; and
b. produce, reproduce, and Share Adapted Material.
2. Exceptions and Limitations. For the avoidance of doubt, where
Exceptions and Limitations apply to Your use, this Public
License does not apply, and You do not need to comply with
its terms and conditions.
3. Term. The term of this Public License is specified in Section
6(a).
4. Media and formats; technical modifications allowed. The
Licensor authorizes You to exercise the Licensed Rights in
all media and formats whether now known or hereafter created,
and to make technical modifications necessary to do so. The
Licensor waives and/or agrees not to assert any right or
authority to forbid You from making technical modifications
necessary to exercise the Licensed Rights, including
technical modifications necessary to circumvent Effective
Technological Measures. For purposes of this Public License,
simply making modifications authorized by this Section 2(a)
(4) never produces Adapted Material.
5. Downstream recipients.
a. Offer from the Licensor -- Licensed Material. Every
recipient of the Licensed Material automatically
receives an offer from the Licensor to exercise the
Licensed Rights under the terms and conditions of this
Public License.
b. No downstream restrictions. You may not offer or impose
any additional or different terms or conditions on, or
apply any Effective Technological Measures to, the
Licensed Material if doing so restricts exercise of the
Licensed Rights by any recipient of the Licensed
Material.
6. No endorsement. Nothing in this Public License constitutes or
may be construed as permission to assert or imply that You
are, or that Your use of the Licensed Material is, connected
with, or sponsored, endorsed, or granted official status by,
the Licensor or others designated to receive attribution as
provided in Section 3(a)(1)(A)(i).
b. Other rights.
1. Moral rights, such as the right of integrity, are not
licensed under this Public License, nor are publicity,
privacy, and/or other similar personality rights; however, to
the extent possible, the Licensor waives and/or agrees not to
assert any such rights held by the Licensor to the limited
extent necessary to allow You to exercise the Licensed
Rights, but not otherwise.
2. Patent and trademark rights are not licensed under this
Public License.
3. To the extent possible, the Licensor waives any right to
collect royalties from You for the exercise of the Licensed
Rights, whether directly or through a collecting society
under any voluntary or waivable statutory or compulsory
licensing scheme. In all other cases the Licensor expressly
reserves any right to collect such royalties.
Section 3 -- License Conditions.
Your exercise of the Licensed Rights is expressly made subject to the
following conditions.
a. Attribution.
1. If You Share the Licensed Material (including in modified
form), You must:
a. retain the following if it is supplied by the Licensor
with the Licensed Material:
i. identification of the creator(s) of the Licensed
Material and any others designated to receive
attribution, in any reasonable manner requested by
the Licensor (including by pseudonym if
designated);
ii. a copyright notice;
iii. a notice that refers to this Public License;
iv. a notice that refers to the disclaimer of
warranties;
v. a URI or hyperlink to the Licensed Material to the
extent reasonably practicable;
b. indicate if You modified the Licensed Material and
retain an indication of any previous modifications; and
c. indicate the Licensed Material is licensed under this
Public License, and include the text of, or the URI or
hyperlink to, this Public License.
2. You may satisfy the conditions in Section 3(a)(1) in any
reasonable manner based on the medium, means, and context in
which You Share the Licensed Material. For example, it may be
reasonable to satisfy the conditions by providing a URI or
hyperlink to a resource that includes the required
information.
3. If requested by the Licensor, You must remove any of the
information required by Section 3(a)(1)(A) to the extent
reasonably practicable.
4. If You Share Adapted Material You produce, the Adapter's
License You apply must not prevent recipients of the Adapted
Material from complying with this Public License.
Section 4 -- Sui Generis Database Rights.
Where the Licensed Rights include Sui Generis Database Rights that
apply to Your use of the Licensed Material:
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
to extract, reuse, reproduce, and Share all or a substantial
portion of the contents of the database;
b. if You include all or a substantial portion of the database
contents in a database in which You have Sui Generis Database
Rights, then the database in which You have Sui Generis Database
Rights (but not its individual contents) is Adapted Material; and
c. You must comply with the conditions in Section 3(a) if You Share
all or a substantial portion of the contents of the database.
For the avoidance of doubt, this Section 4 supplements and does not
replace Your obligations under this Public License where the Licensed
Rights include other Copyright and Similar Rights.
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
c. The disclaimer of warranties and limitation of liability provided
above shall be interpreted in a manner that, to the extent
possible, most closely approximates an absolute disclaimer and
waiver of all liability.
Section 6 -- Term and Termination.
a. This Public License applies for the term of the Copyright and
Similar Rights licensed here. However, if You fail to comply with
this Public License, then Your rights under this Public License
terminate automatically.
b. Where Your right to use the Licensed Material has terminated under
Section 6(a), it reinstates:
1. automatically as of the date the violation is cured, provided
it is cured within 30 days of Your discovery of the
violation; or
2. upon express reinstatement by the Licensor.
For the avoidance of doubt, this Section 6(b) does not affect any
right the Licensor may have to seek remedies for Your violations
of this Public License.
c. For the avoidance of doubt, the Licensor may also offer the
Licensed Material under separate terms or conditions or stop
distributing the Licensed Material at any time; however, doing so
will not terminate this Public License.
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
License.
Section 7 -- Other Terms and Conditions.
a. The Licensor shall not be bound by any additional or different
terms or conditions communicated by You unless expressly agreed.
b. Any arrangements, understandings, or agreements regarding the
Licensed Material not stated herein are separate from and
independent of the terms and conditions of this Public License.
Section 8 -- Interpretation.
a. For the avoidance of doubt, this Public License does not, and
shall not be interpreted to, reduce, limit, restrict, or impose
conditions on any use of the Licensed Material that could lawfully
be made without permission under this Public License.
b. To the extent possible, if any provision of this Public License is
deemed unenforceable, it shall be automatically reformed to the
minimum extent necessary to make it enforceable. If the provision
cannot be reformed, it shall be severed from this Public License
without affecting the enforceability of the remaining terms and
conditions.
c. No term or condition of this Public License will be waived and no
failure to comply consented to unless expressly agreed to by the
Licensor.
d. Nothing in this Public License constitutes or may be interpreted
as a limitation upon, or waiver of, any privileges and immunities
that apply to the Licensor or You, including from the legal
processes of any jurisdiction or authority.
=======================================================================
Creative Commons is not a party to its public
licenses. Notwithstanding, Creative Commons may elect to apply one of
its public licenses to material it publishes and in those instances
will be considered the “Licensor.” The text of the Creative Commons
public licenses is dedicated to the public domain under the CC0 Public
Domain Dedication. Except for the limited purpose of indicating that
material is shared under a Creative Commons public license or as
otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark "Creative Commons" or any other trademark or logo
of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements,
understandings, or agreements concerning use of licensed material. For
the avoidance of doubt, this paragraph does not form part of the
public licenses.
Creative Commons may be contacted at creativecommons.org.
|
AI-System/LICENSE/0
|
{
"file_path": "AI-System/LICENSE",
"repo_id": "AI-System",
"token_count": 5187
}
| 10 |
{
"initial_lr": {
"_type": "loguniform",
"_value": [1e-4, 0.1]
},
"weight_decay": {
"_type": "loguniform",
"_value": [1e-6, 1e-3]
},
"cutout": {
"_type": "choice",
"_value": [0, 8, 16]
},
"batch_size": {
"_type": "choice",
"_value": [64, 128, 256]
},
"epochs": {
"_type": "choice",
"_value": [100, 300, 600]
},
"optimizer": {
"_type": "choice",
"_value": ["adam", "rmsprop", "sgd"]
},
"model": {
"_type": "choice",
"_value": ["resnet18", "resnet50", "vgg16", "vgg16_bn", "densenet121", "squeezenet1_1",
"shufflenet_v2_x1_0", "mobilenet_v2", "resnext50_32x4d", "mnasnet1_0"]
},
"grad_clip": {
"_type": "loguniform",
"_value": [0.0, 5.0]
}
}
|
AI-System/Labs/AdvancedLabs/Lab8/hpo-answer/search_space.json/0
|
{
"file_path": "AI-System/Labs/AdvancedLabs/Lab8/hpo-answer/search_space.json",
"repo_id": "AI-System",
"token_count": 488
}
| 11 |
# BSD 3-Clause License
# Copyright (c) 2017,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This file has been changed for education and teaching purpose
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
import numpy as np
import torchvision.models as models
from torch.utils.tensorboard import SummaryWriter
# default `log_dir` is "logs" - we'll be more specific here
writer = SummaryWriter('logs/mnist_experiment_1')
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
# initialize the running loss for visualization
running_loss = 0.0
correct = 0.0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
# calculate training loss and accuracy
running_loss += loss.item()
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
# log the running loss and accuracy
if batch_idx != 0:
global_step = (epoch - 1) * len(train_loader) + batch_idx
writer.add_scalar('Loss/train', running_loss / (args.batch_size * args.log_interval), global_step)
writer.add_scalar('Accuracy/train', 100. * correct / (args.batch_size * args.log_interval), global_step)
running_loss = 0.0
correct = 0.0
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=14, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
#get some random traning images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.to(device)
# show batch images
grid = torchvision.utils.make_grid(images)
writer.add_image('images', grid, 0)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
# show model graph
writer.add_graph(model, images)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
scheduler.step()
if args.save_model:
torch.save(model.state_dict(), "mnist_cnn.pt")
# close writer
writer.close()
if __name__ == '__main__':
main()
|
AI-System/Labs/BasicLabs/Lab1/mnist_tensorboard.py/0
|
{
"file_path": "AI-System/Labs/BasicLabs/Lab1/mnist_tensorboard.py",
"repo_id": "AI-System",
"token_count": 3374
}
| 12 |
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if args.dry_run:
break
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=14, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--dry-run', action='store_true', default=False,
help='quickly check a single pass')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
train_kwargs = {'batch_size': args.batch_size}
test_kwargs = {'batch_size': args.test_batch_size}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
dataset1 = datasets.MNIST('../data', train=True, download=True,
transform=transform)
dataset2 = datasets.MNIST('../data', train=False,
transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1,**train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
scheduler.step()
if args.save_model:
torch.save(model.state_dict(), "mnist_cnn.pt")
if __name__ == '__main__':
main()
|
AI-System/Labs/BasicLabs/Lab5/pytorch_mnist_basic.py/0
|
{
"file_path": "AI-System/Labs/BasicLabs/Lab5/pytorch_mnist_basic.py",
"repo_id": "AI-System",
"token_count": 2471
}
| 13 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
# 10.2.3 分布式强化学习框架
- [10.2.3 分布式强化学习框架](#1023-分布式强化学习框架)
- [RLlib](#rllib)
- [分层控制分布式强化学习](#分层控制分布式强化学习)
- [强化学习训练的计算模式不规则性](#强化学习训练的计算模式不规则性)
- [当前的现状和解决方案](#当前的现状和解决方案)
- [通用的强化学习范式](#通用的强化学习范式)
- [评估强化学习系统](#评估强化学习系统)
- [总结](#总结)
- [思考](#思考)
- [小结与讨论](#小结与讨论)
- [参考文献](#参考文献)
上一章我们讨论了分布式强化学习框架面临的挑战以及需求。这一章我们更进一步地举例分析一些走在时代前沿的框架。
Github上的强化学习框架数量巨大,而我们可以根据其支持的功能简单地分为以下大类:
- 支持某些特定的环境/模拟器。例如:gym[<sup>[2]</sup>](#gym)支持了一些简单的2D小游戏(例如Atari)。ELF支持一系列并发的实时战略游戏,并且支持C++和python的高效接口。 在 C++ 方面,ELF[<sup>[3]</sup>](#elf)利用用C++ 多线程并行托管多个游戏,从而提高模拟器的效率。 而在 Python 方面也是类似的,ELF通过一次返回批量的游戏状态,来提高模拟器的吞吐量。
- 支持通用的强化学习的算法。例如:Baselines[<sup>[4]</sup>](#baselines)等强化学习工具包,支持了一系列非分布式的强化学习算法。
- 支持分布式强化学习算法,例如:ACME[<sup>[5]</sup>](#acme),RLlib[<sup>[1]</sup>](#rllib)等强化学习工具包,他们都在一定程度上帮助用户快速地将分布式强化学习算法部署在多节点上。
- 针对某个算法开发的项目,不存在通用性;例如:TorchBeast[<sup>[6]</sup>](#torchbeast)是一个基于IMPALA[<sup>[7]</sup>](#impala)的高效实现,可以支持分布式的部署;不支持除了IMPALA以外的其他算法。
下表整理了部分有代表性的受欢迎的强化学习平台:
| 开源框架 | 支持通用的算法接口 | 基于环境开发 | 支持分布式 | 受欢迎程度 |
| ---- | ---- | ---- | ---- | ---- |
| ACMEACME[<sup>[5]</sup>](#acme) + Reverb[14] | ✔ | x | x | 2.1k |
| ELF[<sup>[3]</sup>](#elf) | x | ✔ | ✔ | 2k |
| Ray + RLlib[<sup>[1]</sup>](#rllib) | ✔ | x | ✔ | 16.4k |
| Gym[16] | x | ✔ | x | 24.5k |
| Baselines[<sup>[3]</sup>](#baselines) | ✔ | x | x | 11.6k |
| TorchBeast[<sup>[6]</sup>](#torchbeast) | x | x | ✔ | 553 |
| SEEDRL[<sup>[14]</sup>](#seedrl) | x | x | ✔ | 617 |
| Tianshou[<sup>[15]</sup>](#tianshou) | ✔ | x | ? | 3.2k |
| Keras-RL[<sup>[16]</sup>](#keras-rl) | ✔ | x | x | 5.1k |
可以看到,用户对通用的强化学习框架的需求十分旺盛;而其中,可以分布式的强化学习框架或者系统受到了更多用户的青睐。这也暗示了未来的强化学习框架的发展方向是朝着通用性,可扩展性和大规模分布式的方向发展。
除此之外,还可以根据是否支持多智能体(multi-agent)的任务对强化学习框架进行分类。例如:MARO[<sup>[8]</sup>](#maro)就是一个多智能体资源优化平台,可以用于实际资源优化。
我们以上面表体里提到的一些最受欢迎的经典强化学习平台为例,希望能通过对这些强化学习系统的论文的介绍,讨论他们在系统设计时的一些思考,并且希望这些思考能为未来设计提供参考和灵感。
## RLlib
在RLlib[<sup>[1]</sup>](#rllib)设计之初,并没有太多的可以参考的统一的强化学习平台。
那时候虽然深度学习(Deep Learning)的系统和抽象方面取得了很大的进步(例如:Tensorflow[<sup>[9]</sup>](#tensorflow), Pytorch[<sup>[10]</sup>](#pytorch)等),但对强化学习的系统和抽象设计方面的进展相对较少。尽管如此,强化学习中的许多挑战都源于对学习和仿真/模拟环境的规模化需求,同时也需要整合快速增长的算法和模型。因此设计这么一个系统是很有必要的。
而RLlib提出的区别于其他框架的主要观点包括:
- 采用自顶向下分层控制分布式强化学习算法,从而更好地采用并行计算资源调度来完成这些任务;
- 需要定义一个通用的强化学习范式,能够让一系列的强化学习算法达到可拓展和大量代码重用;
下面我们从这两点区别入手来探讨RLlib的细节。
### 分层控制分布式强化学习
1) 强化学习训练的计算模式不规则性
目前的强化学习算法的**计算模式**中是高度多样化的。如下表所示,目前强化学习算法的计算模式突破了如今流行的分布式框架所支持的计算模型的界限。
根据算法的不同, 这种不规则发生在如下几个层面:
- 不同不同的强化学习任务的**持续时间和资源需求**也有数量级的差异;例如A3C的更新可能需要几毫秒,但其他算法如PPO需要更大粒度时间颗粒。
- 不同的强化学习算法的**通信模式**各异,从同步到异步的梯度优化,再到在高通量的异策略学习算法(如ApeX[<sup>[12]</sup>](#apex)和IMPALA[<sup>[7]</sup>](#impala))中拥有多种类型的异步任务,通信模式各不相同。
- 不同的强化学习算法的**模块构成**高度多样化。由于强化学习或深度学习训练相结合, 因而有超参数调优、或是在单一算法中结合无导数优化和基于梯度的优化等方式产生嵌套计算的需求。因而强化学习算法经常需要维护和更新大量的状态,包括策略参数、重放缓冲区,甚至还有外部模拟器等。
下表的例子从不同维度上说明了,单机DQN和大规模集群的IMPALA+PBT在这些方面上都存在巨大的差别。
| 维度 | 单机DQN | 大规模集群的IMPALA+PBT |
| ---- | ---- | ---- |
| 单任务时长 | ~1ms | 数min |
| 单任务所需要的资源 | 1 CPU | 数个CPU和GPU |
| 总共需要的资源 | 1 CPU | 数百个CPU和GPU |
| 嵌套深度 | 1层 | >3层 |
| 所需内存 | mb量级 | 百GB级别 |
| 执行方式 | 同步 | 异步高并发 |
2) 当前的现状和解决方案
因此,开发人员只能使用大杂烩的框架来实现他们的算法,包括参数服务器(Parameter Server)、类MPI框架中的集体通信基元、任务队列等。
对于更复杂的算法,常见的做法是构建自定义的分布式系统,在这个系统中,进程之间独立计算和协调,没有中央控制。虽然这种方法可以实现较高的性能,但开发和评估的成本很大,不仅因为需要实现和调试分布式程序,而且因为这些算法的组成进一步使其实现复杂化。
此外,今天的计算框架(如Spark[<sup>[11]</sup>](#spark)、MPI)通常是假设有规律的计算模式,当子任务的持续时间、资源需求或嵌套不同时,这些计算框架会有性能损失。
因此,RLLib希望以一个单一的编程模型能够满足强化学习算法训练的所有要求,并且可以在不放弃结构化计算的情况下实现。
<div align="center">
<img src="./img/rllib_paper_11.png" ch="500" width="80%"/>
</div>
<div align=center>图10.2.10 目前大多数强化学习算法都是以 (a)的模式实现的。RLlib提出了一种分层控制模型 (c),它扩展了 (b),支持强化学习中的嵌套和超参数调优工作,简化和统一了用于实现的编程模型。</div>
<br />
举例来说,对于每个分布式强化学习算法,RLlib写出一个等效的算法,表现出逻辑上集中的程序控制(图(b))。也就是说,不用让独立执行进程(图(a) 中的A、B、C、D)相互协调(例如,通过RPC、共享内存、参数服务器或集体通信),而是一个单一的驱动程序((b) 和(c) 中的D)可以将算法的子任务委托给其他进程并行执行。在这种工作模式中,工作进程A、B、C被动地保持状态(如策略或仿真器状态),但在被D调用之前不执行任何计算,为了支持嵌套计算,我们提出用分层委托控制模型(图(c))来扩展集中控制模型,允许工作进程(如B、C)在执行任务时进一步将自己的工作(如仿真、梯度计算)委托给自己的子工作进程。
在这样一个逻辑上集中的分层控制模型的基础上搭建强化学习框架,有如下重要**优势**:
- 等效算法在实际应用中往往更容易实现,因为分布式控制逻辑完全封装在一个进程中,而不是多个进程同时执行。
- 将算法组件分离成不同的子程序(例如,做卷积运算、计算梯度与某些策略的目标函数的梯度),可以在不同的执行模式下实现代码的重用。有不同资源需求的子任务(CPU任务或者GPU任务)可以放在不同的机器上,从而能够降低计算成本。
- 在这个模型中编写的分布式算法可以相互之间无缝嵌套,满足了并行性封装原则。
举例来说明具体的编程模型上的差别:
<div align="center">
<img src="./img/rllib_paper_4.png" ch="500" width="60%"/>
</div>
<div align=center>图10.2.11 分布式和分层控制的编程模式的差别。用同一种颜色加粗的代码可以归类为实现同一个目的的代码模块。 </div>
<br />
如图10.2.11所示,将分布式超参数搜索与分布式计算的函数组合在一起,会涉及到复杂的嵌套并行计算模式。如果使用MPI (a),作为底层来设计并行化写强化学习算法代码的时候,需要对每个算法的适配进行定制化代码修改。这限制了新的分布式强化学习算法的快速开发。使用分层控制 (b),组件可以保持不变,并且可以作为远程任务简单地调用。
尽管图10.2.11中的示例很简单,但例如HyperBand、PBT等需要长时间运行的、精细的超参数调整的算法越来越需要对培训进行细粒度的控制。
因此RLlib在基于任务的灵活编程模型(如Ray[<sup>[13]</sup>](#ray))的基础上,通过分层控制和逻辑中控来构建强化学习算法库。 基于任务的系统允许在细粒度的基础上,在子进程上异步调度和执行子例程,并在进程之间检索或传递结果。
### 通用的强化学习范式
由于强化学习算法之间的差距较大,在RLLib之前几乎没有一个统一强化学习接口的框架。因而大部分的代码可复用性差,导致强化学习开发和学习的成本较高。
RLlib将强化学习算法里的智能体(agent)抽象成两大块:和算法相关的策略(policy)和与算法无关的策略优化器 (policy optimizer)。 其中策略包括策略图(policy graph)和 策略模型(policy model)。策略图定义了如何算法里如何探索和利用,以及如何用采样得到的数据训练模型等,策略模型用来定于算法的模型的网络结构。这两块都支持用户的定制化。而策略优化器是与算法无关的部分。策略优化器负责分布式采样、参数更新和管理重放缓冲区等性能关键任务。
将策略优化器如此抽象具有以下**优点**:
通过将执行策略与策略优化函数定义分开,各种不同的优化器可以被替换进来,以利用不同的硬件和算法特性,却不需要改变算法的其余部分。策略图类封装了与深度学习框架的交互,使得用户可以避免将分布式系统代码与数值计算混合在一起,并使优化器的实现能够被在不同的深度学习框架中改进和重用。
<div align="center">
<img src="./img/rllib_paper_6.png" ch="500"/>
</div>
<div align=center>图10.2.13 四种RLlib策略优化器步骤方法的伪代码。 每次调用优化函数时,都在本地策略图和远程评估程序副本阵列上运行。 图中用橙色高亮Ray的远程执行调用,用蓝色高亮 Ray的其他调用。</div>
<br />
如图10.2.13所示,通过利用集中控制,策略优化器简洁地抽象了强化学习算法优化中的多种选择:同步与异步,全局规约与参数服务器,以及使用GPU与CPU的选择。
### 评估强化学习系统
那么,当完成了强化学习系统设计以后,需要怎么评估呢?
- 算法的完备性。
RLlib目前可以支持大部分的算法类型,包括:基于DQN的变形算法,基于策略梯度,基于进化策略,AlphaGo,多智能体等。同时RLlib也支持大部分的强化学习的模块和组件,并支持用户在之上做二次开发。
- 性能的高效性。
- 采样效率。通常通过单位时间内能采样多少样本来评估。
而在RLlib里,它对采样进程收集样本的可扩展性进行了基准测试。
<div align="center">
<img src="./img/rllib_paper_10.png" ch="500" width="80%"/>
</div>
<div align=center>图10.2.14 策略评估的吞吐量从1到128核几乎呈线性扩展。</div>
<br />
- 是否能有效地支持大规模的任务。 通常情况下,这是衡量框架是否有高效的可扩展性的重要指标。
RLlib使用Redis、OpenMPI和分布式TensorFlow评估了RLlib在ES、PPO和A3C三种算法上的性能,并与专门为这些算法构建的专用系统进行了比较。所有实验中都使用了相同的超参数。实验结果证明,RLlib在使用相同资源的情况下,能够比MPI等获得更好的结果。
<div align="center">
<img src="./img/rllib_paper_9.png" ch="500" width="75%"/>
</div>
<div align=center>图10.2.15 在Humanoid-v1任务上达到6000的奖励所需的时间。RLlib实现的ES和PPO的性能优于已有实现。</div>
<br />
- 是否支持训练的可扩展性,例如: 否支持高效的多GPU训练。
RLlib评估了在使用全局规约和本地多GPU两种优化器模式下,在4个GPU和16个GPU的计算资源下,不同的任务(Pong-v0和Humanoid-v1)下,优化器每秒能处理的数据量。
| 策略优化器 | 梯度计算资源 | 任务 | SGD每秒计算吞吐量 |
| :------| ------: | :------: | :------: |
| 全局规约 | 4 GPU | Humanoid-v1 | 330,000 |
| 全局规约 | 4 GPU | Pong-v0 | 230,000 |
| 全局规约 | 16 GPU | Humanoid-v1 | 440,000 |
| 全局规约 | 16 GPU | Pong-v0 | 100,000 |
| 本地多GPU | 4 GPU | Humanoid-v1 | 2,100,000 |
| 本地多GPU | 4 GPU | Pong-v0 | N/A(内存无法加载) |
| 本地多GPU | 16 GPU | Humanoid-v1 | 1,700,000 |
| 本地多GPU | 16 GPU | Pong-v0 | 150,000 |
上表展示了,一个专门的多GPU策略优化器在数据可以完全装入GPU内存时,表现优于全局规约。事实上,不同的策略在不同条件下表现更好,这表明策略优化器是一个有用的抽象。
### 总结
RLlib是一个强化学习的开源框架,它利用细粒度的嵌套并行机制在各种强化学习任务中实现了最优性能。它既提供了标准强化学习算法的集合,又提供了可扩展的接口,以方便地编写新的强化学习算法。即便如此,RLlib中仍然存在一些值得改进的地方,并且后续有许多工作针对RLlib进行了改进。
### 思考
RLlib中哪些设计的地方不合理呢?
## 小结与讨论
本章主要围绕,分布式强化学习框架,RLlib,分层控制分布式强化学习,通用的强化学习范式,评估强化学习系统展开介绍。
## 参考文献
<div id="rllib"></div>
1. Liang E, Liaw R, Nishihara R, et al. RLlib: Abstractions for distributed reinforcement learning[C.//International Conference on Machine Learning. P MLR, 2018: 3053-3062.
<div id="gym"></div>
2. Brockman G, Cheung V, Pettersson L, et al. Openai gym[J.. arXiv preprint arXiv:1606.01540, 2016.
<div id="elf"></div>
3. Tian Y, Gong Q, Shang W, et al. Elf: An extensive, lightweight and flexible research platform for real-time strategy games[J.. Advances in Neural Information Processing Systems, 2017, 30.
<div id="baselines"></div>
4. https://github.com/openai/baselines
<div id="acme"></div>
5. Hoffman M, Shahriari B, Aslanides J, et al. Acme: A research framework for distributed reinforcement learning[J.. arXiv preprint arXiv:2006.00979, 2020.
<div id="torchbeast"></div>
6. Küttler H, Nardelli N, Lavril T, et al. Torchbeast: A pytorch platform for distributed rl[J.. arXiv preprint arXiv:1910.03552, 2019.
<div id="impala"></div>
7. Espeholt L, Soyer H, Munos R, et al. Impala: Scalable distributed deep-rl with importance weighted actor-learner architectures[C.//International Conference on Machine Learning. PMLR, 2018: 1407-1416.
<div id="maro"></div>
8. https://github.com/microsoft/maro
<div id="tensorflow"></div>
9. Abadi M, Barham P, Chen J, et al. {TensorFlow}: A System for {Large-Scale} Machine Learning[C.//12th USENIX symposium on operating systems design and implementation (OSDI 16). 2016: 265-283.
<div id="pytorch"></div>
10. Paszke A, Gross S, Massa F, et al. Pytorch: An imperative style, high-performance deep learning library[J.. Advances in neural information processing systems, 2019, 32.
<div id="spark"></div>
11. https://github.com/apache/spark
<div id="apex"></div>
12. Horgan D, Quan J, Budden D, et al. Distributed prioritized experience replay[J.. arXiv preprint arXiv:1803.00933, 2018.
<div id="ray"></div>
13. Moritz P, Nishihara R, Wang S, et al. Ray: A distributed framework for emerging {AI} applications[C.//13th USENIX Symposium on Operating Systems Design and Implementation (OSDI 18). 2018: 561-577.
<div id="seedrl"></div>
14. https://github.com/google-research/seed_rl
<div id="tianshou"></div>
15. https://github.com/thu-ml/tianshou
<div id="keras-rl"></div>
16. https://github.com/keras-rl/keras-rl
|
AI-System/Textbook/第10章-强化学习系统/10.2.3-分布式强化学习框架和应用.md/0
|
{
"file_path": "AI-System/Textbook/第10章-强化学习系统/10.2.3-分布式强化学习框架和应用.md",
"repo_id": "AI-System",
"token_count": 11645
}
| 14 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
- [3.1 基于数据流图的深度学习框架](#31-基于数据流图的深度学习框架)
- [3.1.1 深度学习框架发展概述](#311-深度学习框架发展概述)
- [3.1.2 编程范式:声明式和命令式](#312-编程范式声明式和命令式)
- [3.1.3 数据流图](#313-数据流图)
- [张量和张量操作](#张量和张量操作)
- [自动微分基础](#自动微分基础)
- [前向微分](#前向微分)
- [反向微分](#反向微分)
- [数据流图上的自动微分](#数据流图上的自动微分)
- [3.1.4 数据流图的调度与执行](#314-数据流图的调度与执行)
- [单设备算子间调度](#单设备算子间调度)
- [图切分与多设备执行](#图切分与多设备执行)
- [3.1.5 小结与讨论](#315-小结与讨论)
- [参考文献](#参考文献)
# 3.1 基于数据流图的深度学习框架
## 3.1.1 深度学习框架发展概述
神经网络是机器学习技术中一类具体算法分支,通过堆叠基本处理单元形成具有宽度和深度的网络拓扑结构,其背后对应着一个高度复杂的非凸函数,能够对蕴含在各类数据分布中的统计规律进行拟合。传统机器学习方法在面对不同应用时,为了达到所需的学习效果往往需要重新选择函数空间设计新的学习目标。相比之下,神经网络方法能够通过调节构成网络使用的处理单元,处理单元之间的堆叠方式,以及网络的学习算法,用一种较为统一的算法设计视角解决各类应用任务,这种方式很大程度上减轻了机器学习算法设计选择的挑战。同时,神经网络与深度学习方法能够拟合海量数据,并在图像分类,语音识别以及自然语言处理任务中取得的突破性进展。这些成功案例揭示了通过构建更大规模的神经网络对大规模数据进行学习,是一种有效的学习策略与方向。
然而,深度神经网络应用的开发需要对软件栈的各个抽象层进行编程,这对新算法的开发效率和算力都提出了很高的要求,进而催生了深度学习框架的发展。深度学习框架是为了在加速器和集群上高效训练深度神经网络而设计的可编程系统,需要同时兼顾以下三大互相制约设计目标:
1. **可编程性**:使用易用的编程接口,用高层次语义描述出各类主流深度学习模型的计算过程和训练算法。
2. **性能**:为可复用的处理单元提供高效实现;支持多设备、分布式计算。
3. **可扩展性**:降低新模型的开发成本。在添加新硬件支持时,降低增加计算原语和进行计算优化的开发成本。
主流深度学习框架主要经历了三代发展,不同框架的选择也同时影响了其采用的优化手段以及所能达到的性能:
- 早期深度学习工具发展的主要驱动力是为了提高实验室中研究和验证神经网络新算法的实验效率,研究者们开始尝试在新兴的图形处理器(GPUs,Graphic Processing Units)或是集群上运行神经网络训练程序来加速复杂神经网络训练。出现了以Cuda-convnet[<sup>[1]</sup>](#convnet-1),Theano[<sup>[2]</sup>](#theano-2),Distbelief[<sup>[3]</sup>](#distbelief-3)为代表的深度学习框架先驱。这些早期工作定义了深度学习框架需要支持的基本功能,如:神经网络基本计算单元,自动微分,甚至是编译期优化。背后的设计理念对今天主流深度学习框架,特别是TensorFlow,产生了深远的影响。
- 第一代形成了广泛影响力的深度学习框架以一组连续堆叠的层表示深度神经网络模型,一层同时注册前向计算和梯度计算。这一时期流行的神经网络算法的结构还较为简单,以深层全连接网络和卷积网络这样的前馈网络为主,出现了以Caffe[<sup>[4]</sup>](#caffe-4),MxNet[<sup>[5]</sup>](#mxnet-5)为代表的开源工具。这些框架提供的开发方式与C/C++编程接口深度绑定,使得更多研究者能够利用框架提供的基础支持,快速添加高性能的新神经网络层和新的训练算法,从而利用图形处理器(GPUs,Graphic Processing Units)来提高训练速度。这些工作进一步加强和验证了系统设计对神经网络算法模块化的抽象,推动了新网络处理单元和网络结构的进一步发展。
- 前期实践最终催生出了以TensorFlow[<sup>[6]</sup>](#tensorflow-6)和PyTorch[<sup>[9]</sup>](#pytorch-9)为代表的第二代工业级深度学习框架,其核心以数据流图抽象和描述深度神经网络。这一时期同时伴随着如DyNet[<sup>[7]</sup>](#dynet-7),Chainer[<sup>[8]</sup>](#chainer-8)等激发了框架设计灵感的诸多实验项目。TensorFlow和PyTorch代表了今天深度学习框架两种不同的设计路径:系统性能优先改善灵活性和灵活性易用性优先改善系统性能。这两种选择,随着神经网络算法研究和应用的更进一步发展,又逐步造成了解决方案的分裂。
<p align="center">
<img src="img/frameworks-evolution.png" width=70%><br>
图1. 深度学习框架发展历程
</p>
到目前阶段,神经网络模型结构越发多变,涌现出了大量如:TensorFlow Eager[<sup>[10]</sup>](#tf-eager-10),TensorFlow Auto-graph[<sup>[12]</sup>](#auto-graph-12),PyTorch JIT,JAX[<sup>[11]</sup>](#jax-11)这类呈现出设计选择融合的深度学习框架设计。这些项目纷纷采用设计特定领域语言(Domain-Specific Language,DSL)的思路,在提高描述神经网络算法表达能力和编程灵活性的同时,通过编译期优化技术来改善运行时性能。
一个深度神经网络计算任务通常涉及训练和推理两个生命周期。前者运行于算力和存储更加充裕的服务器或者集群;后者服务用户的请求往往运行于资源受限,对响应时间有着更加严格要求的云(Cloud)端或者边缘(Edge)端。他们对系统设计的要求也不尽相同,于是衍生出了用于训练的训练框架和用于部署的推理框架。[第8章](https://github.com/microsoft/AI-System/blob/main/Textbook/%E7%AC%AC8%E7%AB%A0-%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0%E6%8E%A8%E7%90%86%E7%B3%BB%E7%BB%9F/8-%E5%89%8D%E8%A8%80.md)详细介绍推理系统,这一章我们主要围绕编程模型、自动微分、对内存资源进行管理和对计算任务进行调度的运行时系统、多设备支持这几个方面展开,介绍应用于训练的训练框架设计。
## 3.1.2 编程范式:声明式和命令式
深度学习框架为前端用户提供声明式(Declarative Programming)和命令式(Imperative Programming)两种编程范式来定义神经网络计算。
在声明式编程模型下,前端语言中的表达式不直接执行,而是首先构建起一个完整前向计算过程表示,这个计算过程的表示经过序列化发送给后端系统,后端对计算过程表示优化后再执行,又被称作先定义后执行(Define-and-Run)或是静态图。在命令式编程模型下,后端高性能可复用模块以跨语言绑定(Language Binding)方式与前端深度集成,前端语言直接驱动后端算子执行,用户表达式会立即被求值,又被称作边执行边定义(Define-by-Run)或者动态图。
命令式编程的优点是方便调试,灵活性高,但由于在执行前缺少对算法的统一描述,也失去了编译期优化(例如,对数据流图进行全局优化等)的机会。相比之下,声明式编程对数据和控制流的静态性限制更强,由于能够在执行之前得到全程序描述,从而有机会进行运行前编译(Ahead-Of-Time)优化。TensorFlow提供了命令式编程体验,Chainer和PyTroch提供了声明式的编程体验。但两种编程模型之间并不存在绝对的边界,多阶段(Multi-Stage )编程和即时编译(Just-In-Time, JIT)技术能够实现两种编程模式的混合。随着TensorFlow Eager和PyTorch JIT的加入,主流深度学习框架都选择了通过支持混合式编程以兼顾两者的优点。
## 3.1.3 数据流图
为了高效地训练一个复杂神经网络,框架需要解决诸多问题, 例如:如何实现自动求导,如何利用编译期分析对神经网络计算进行化简、合并、变换,如何规划基本计算单元在加速器上的执行,如何将基本处理单元派发(Dispatch)到特定的高效后端实现,如何进行内存预分配和管理等。如何用统一的方式解决这些问题驱使着框架设计者思考为各类神经网络计算提供统一的描述,从而使得在运行神经网络计算之前,编译期分析能够对整个计算过程尽可能进行推断,为用户程序补全反向计算,规划执行,从而最大程度地降低运行时开销。
主流的深度学习框架都选择使用数据流图来抽象神经网络计算,图2展示了基于深度学习框架的组件划分。
<p align="center">
<img src="img/framework-overview.png" width=35%><br>
图2. 基于数据流图的深度学习框架的基本组件
</p>
数据流图(Dataflow Graph)是一种描述计算的经典方式,广泛用于科学计算系统。为了避免在调度执行数据流图时陷入循环依赖,数据流图通常是一个有向无环图。在深度学习框架中,图中的结点是深度学习框架后端所支持的操作原语(Primitive Operation),不带状态,没有副作用,结点的行为完全由输入输出决定;结点之间的边显式地表示了操作原语之间的数据依赖关系。图3左侧是表达式 $x*y+sin(x)$对应的一个数据流图实例,右侧对应了定义这个数据流图的TensorFlow代码。图3中的圆形是数据流图中边上流动的数据,方形是数据流图中的基本操作。
<p align="center">
<img src="img/forward-computation-graph.png" width=40%><br>
图3. x*y+sin(x)的数据流图实例和定义该数据流图的TensorFlow代码
</p>
### 张量和张量操作
我们进一步来看数据流图中数据的具体类型。在科学计算任务中,数据常常被组织成一个高维数组,在深度学习框架中也被称作张量(Tensor),是对标量、向量和矩阵的推广。整个计算任务的绝大部分时间都消耗在这些高维数组上的数值计算操作上。高维数组和其上的数值计算是神经网络关注的核心,这些数值计算构成了数据流图中最重要的一类操作原语:张量之上的数值计算。在这一节,我们首先考虑最为常用的稠密数组,在稀疏性相关章节再展开稀疏数组的计算介绍。
前端用户看到的张量由以下几个重要属性定义:
1. **元素的基本数据类型**:在一个张量中,所有元素具有相同的数据类型(例如,32位浮点型)。
1. **形状**:张量是一个高维数组,每个维度具有固定的长度。张量的形状是一个整型数的元组,描述了一个张量具有几个维度以及每个维度的长度(例如,[224,224,3]是ImageNet中一张图片的形状,具有三个维度,长度分别是:224, 224和3)。
1. **设备**:决定了张量的存储设备。如CPU(Central Processing Unit),GPU(Graphic Processing Unit)等。
标量,向量,矩阵分别是0维,1维和2维张量。图4左侧是一个声明为:CPU上形状为[5, 3]的整型张量的示意图,图4右侧是一个声明为:GPU上形状为[8, 4, 4]的浮点型张量的示意图。
<p align="center">
<img src="img/tensor.png" width=35%><br>
图4. 二维张量和三维张量示意图
</p>
张量是整型,浮点型,布尔型,字符型等基本数据类型的容器类型。张量这一数据类型将具有相同类型的数据元素组织成规则形状,为用户提供了一种逻辑上易于理解的方式组织数据。例如,图像任务通常将一副图片组织成一个三维张量,张量的三个维度分别对应着图像的长,宽和通道数目。自然语言处理任务中,一个句子被组织成一个二维张量,张量的两个维度分别对应着词向量和句子的长度。多副图像或者多个句子只需要为张量再增加一个新的批量(Batch)维度。这种数据组织方式极大地提高了神经网络计算前端程序的可理解性和编程的便捷性,前端用户在描述计算时只需通过张量中元素的逻辑存储地址引用其中的元素,后端在为张量计算生成高效实现时,能够自动将逻辑地址映射到物理存储地址。更重要的是张量操作将大量同构的元素作为一个整体进行批量操作,通常都隐含着很高的数据并行性,因此张量计算非常适合在单指令多数据(SIMD,Single Instruction, Multiple Data)加速器上进行加速实现。
对高维数组上的数值计算进行专门的代码优化,在科学计算和高性能计算领域有着悠久的研究历史,可以追溯到早期科学计算语言Fortran。深度学习框架的设计也很自然地沿用了张量和张量操作作为构造复杂神经网络的基本描述单元,前端用户可以在不陷入后端实现细节的情况下,在前端脚本语言中复用由后端优化过的张量操作。而计算库开发者能够隔离神经网络算法细节,将张量计算作为一个独立的性能域,使用底层的编程模型和编程语言应用硬件相关优化。
至此,我们可以对计算图中的边和节点进一步细化:主流深度学习框架将神经网络计算抽象为一个数据流图(Dataflow Graph),也叫做计算图,图中的节点是后端支持的张量操作原语,节点之间的边上流动着张量。一类最为重要的操作是张量上的数值计算,往往有着极高的数据并行度,能够被硬件加速器加速。
### 自动微分基础
训练神经网络主要包含前向计算,反向计算,更新可学习权重三个最主要的计算阶段。当用户构造完成一个深度神经网络时,在数学上这个网络对应了一个复杂的带参数的高度非凸函数,求解其中的可学习参数依赖于基于一阶梯度的迭代更新法。手工计算复杂函数的一阶梯度非常容易出错,自动微分(Automatic Differentiation,简称Auto-diff)系统就正为了解决这一问题而设计的一种自动化方法。自动微分要解决的问题是给定一个由原子操作构成的复杂计算程序,如何为其自动生成梯度计算程序。自动微分按照工作模式可分为前向自动微分和反向自动微分。按照实现方式自动微分又可为:基于对偶数(Dual Number)的前向微分,基于磁带(Tape)的反向微分,和基于源代码变换的反向微分。深度学习系统很少采用前向微分,基于对偶数的自动微分常实现于程序语言级别的自动微分系统中[<sup>[14]</sup>](#Julia-autodiff-14);基于磁带(Tape)的反向微分通常实现于以PyTorch为代表的边定义边执行类型的动态图深度学习系统中;基于源代码变换的反向微分通常实现于以TensorFlow为代表先定义后执行类型的静态图深度学习系统中。
自动微分是深度学习框架的核心组件之一,在展开深度学习框架如何实现自动微分之前,我们先通过下面这个简单的例子来理解自动微分的基本原理。
**例3.1**: $z=x*y+sin(x)$是一个简单的复合函数,图5是这个函数的表达式树。
<p align="center">
<img src="img/expression-tree-for-example.png" width=10%><br>
图5. 对求值z的表达式树
</p>
假设给定复合函数$z=x*y+sin(x)$,其中$x$和$y$均为标量。让我们思考两个问题:第一,计算机程序会如何通过一系列原子操作对$z$进行求值;第二,如何求解$z$对$x$和$y$的梯度?第一个问题十分直接。为了对$z$求值,我们可以按照表达式树定义的计算顺序,将复合函数$z$分解成如(a.1)至(a.5)所示的求值序列。我们把给定输入逐步计算输出的这样一个求值序列称为前向计算过程。
$$\begin{align}
x &= ? \tag{a.1}\\
y &= ? \tag{a.2}\\
a &= x * y \tag{a.3}\\
b &= sin(x) \tag{a.4}\\
z &= a+b \tag{a.5}
\end{align}$$
#### 前向微分
为了回答如何计算$z$对$x$和$y$梯度的问题,在这里我们引入一个尚未被赋值的变量$t$,依据复合函数求导的链式法则,按照从(a.1)到(a.5)的顺序,依次令以上五个表达式分别对$t$求导,得到求值序列(b.1)至(b.5):
$$
\begin{align}
\frac{\partial x}{\partial t} &= ? \tag{b.1}\\
\frac{\partial y}{\partial t} &= ? \tag{b.2}\\
\frac{\partial a}{\partial t} &= y *\frac{\partial x}{\partial t} + x * \frac{\partial y}{\partial t} \tag{b.3}\\
\frac{\partial b}{\partial t} &= cos(x) * \frac{\partial x}{\partial t} \tag{b.4}\\
\frac{\partial z}{\partial t} &= \frac{\partial a}{\partial t} + \frac{\partial b}{\partial t} \tag{b.5}\\
\end{align}
$$
引入导数变量$dxx \triangleq \frac{\partial xx}{\partial t}$ , 表示$xx$对$t$的导数,同时令$t=x$,带入(b.1)至(b.5),于是得到(c.1)至(c.5):
$$
\begin{align}
\text{dx} &= 1 \tag{c.1}\\
\text{dy} &= 0 \tag{c.2}\\
\text{da} &= y \tag{c.3}\\
\text{db} &= cos(x) \tag{c.4}\\
\text{dz} &= y + cos(x) \tag{c.5}\\
\end{align}
$$
同理,令$t=y$ 带入(b.1)至(b.5),于是得到(d.1)至(d.5):
$$
\begin{align}
\text{dx} &= 0 \tag{d.1}\\
\text{dy} &= 1 \tag{d.2}\\
\text{da} &= x \tag{d.3}\\
\text{db} &= 0 \tag{d.4}\\
\text{dz} &= x \tag{d.5}\\
\end{align}
$$
在(c.1)至(c.5)和(d.1)至(d.5)这样的两轮计算过程中我们可以观察到:给定输入变量计算输出变量(前向计算)和给定输出变量计算输出变量对输入变量的导数,能够以完全一致的求值顺序进行,也就是导数表达式的求值顺序和前向表达式的求值顺序完全一致。运行(c.1)至(c.5)和(d.1)至(d.5)的过程称之为前向微分。
导数的计算往往依赖于前向计算的结果,由于前向微分导数的计算顺序和前向求值顺序完全一致。于是前向微分可以不用存储前向计算的中间结果,在前向计算的同时完成导数计算,从而节省大量内存空间 ,这是前向微分的巨大优点,利用这一事实前向微分存在一种基于对偶数(Dual Number)的简单且高效实现方式[<sup>[14]</sup>](#Julia-autodiff-14)。同时可以观察到前向微分的时间复杂度为$Ο(n)$,$n$是输入变量的个数。在上面的例子中,输入变量的个数为两个,因此前向微分需要运行两次来计算输出变量对输入变量的导数。然而,在神经网络学习中,输入参数个数$n$往往大于一,如果基于前向微分计算中间结果和输入的导数导数,需要多次运行程序,这也是前向微分在大多数情况下难以应用于神经网络训练的一个重要原因。
#### 反向微分
为了解决前向微分的在算法复杂度上的存在局限性,寻找更加高效的导数计算方法,我们可以进一步观察链式求导法则:链式求导法则在计算导数时是对称的,在计算变量$xx$对$x$的导数$\frac{\partial \text{xx}}{\partial \text{x}}$时,链式求导法则并不关心哪个变量作为分母,哪个变量作为分子。于是,我们再次引入一个尚为被赋值的变量$s$,通过交换表达式(b.1)至(b.5)中分子和分母的顺序重写链式求导法则,于是得到(e.1)至(e.5):
$$
\begin{align}
\frac{\partial s}{\partial z} &= ? \tag{e.1}\\
\frac{\partial s}{\partial b} &= \frac{\partial s}{\partial z} * \frac{\partial z}{\partial a} = \frac{\partial s}{\partial z} \tag{e.2}\\
\frac{\partial s}{\partial a} &= \frac{\partial s}{\partial z} * \frac{\partial z}{\partial b} = \frac{\partial s}{\partial z} \tag{e.3}\\
\frac{\partial s}{\partial y} &= \frac{\partial s}{\partial a} * \frac{\partial a }{\partial y} = \frac{\partial s}{\partial a} * x \tag{e.4}\\
\frac{\partial s}{\partial x} &= \frac{\partial s}{\partial a} * \frac{\partial a}{\partial x} + \frac{\partial s}{\partial b} * \frac{\partial b}{\partial x} &= \frac{\partial s}{\partial a} * y + \frac{\partial s}{\partial b} * \text{cos}(x) \tag{e.5}
\end{align}
$$
引入导数变量$gxx \triangleq \frac{\partial s}{\partial \text{xx}}$ , 表示$s$对$xx$的导数,称作$xx$的伴随变量(Aadjoint Variable),改写(e.1)至(e.5),于是有:
$$
\begin{align}
\text{g}z &= ? \tag{f.1}\\
\text{g}b &= \text{g}z \tag{f.2} \\
\text{g}a &= \text{g}z \tag{f.3}\\
\text{g}y &= \text{g}a * x \tag{f.4}\\
\text{g}x &= \text{g}a * y + \text{g}b * \text{cos}(x) \tag{f.5} \\
\end{align}
$$
令$s = z$,得到:
$$
\begin{align}
\text{g}z &= 1 \tag{g.1} \\
\text{g}b &= 1 \tag{g.2} \\
\text{g}a &= 1 \tag{g.3} \\
\text{g}y &= x \tag{g.4} \\
\text{g}x &= y + \text{cos}(x) \tag{g.5} \\
\end{align}
$$
表达式(g.1)至(g.5)求值的过程称之为反向微分。从中可以观察到,与前向微分的特点正好相反,在反向微分中变量导数的计算顺序与变量的前向计算顺序正好相反,运行的时间复杂度是$Ο(m)$,$m$是输出变量的个数。在神经网络以及大量基于一阶导数方法进行训练的机器学习算法中,不论输入变量数目有多少,模型的输出一定是一个标量函数,也称作损失函数,这决定了保留前向计算的所有中间结果,只需再次运行程序一次便可以用反向微分算法计算出损失函数对每个中间变量和输入的导数。反向微分的运行过程十分类似于“扫栈” ,需要保留神经网络所有中间层前向结算的结果,对越接近输入层的中间层,其计算结果首先被压入栈中,而他们在反向计算时越晚被弹出栈。显然,网络越深,反向微分会消耗越多的内存,形成一个巨大的内存足迹 。
至此,我们对两种自动微分模式进行小结:
1. 前向微分的时间复杂度为$O(n)$,$n$是输入变量的个数$n$。反向微分的时间复杂度为$O(m)$,$m$是输出变量的个数。当$n<m$时,前向微分复杂度更低; 当$n>m$时,反向微分复杂度更低。由于在神经网络训练中,总是输出一个标量形式的网络损失,于是$m=1$,反向微分更加适合神经网络的训练。
2. 当$n=m$时,前向微分和反向微分没有时间复杂度上的差异。但在前向微分中,由于导数能够与前向计算混合在一轮计算中完成,因此不需要存储中间计算结果,落实到更具体的高效实现中,也会带来更好的访问存储设备的局部性,因此前向微分更有优势。
3. 尽管在绝大多数情况下,神经网络的整体训练采用反向微分更加合理,但在局部网络使用前向微分依然是可能的。
### 数据流图上的自动微分
尽管[自动微分基础](#自动微分基础)一节的例3.1使用了标量形式的表达式来展示反向微分和前向微分的差异,但并不阻碍我们理解反向微分如何工作于一个真实的基于张量计算的神经网络训练过程。在真实的神经网络训练中,我们可以将上例中每一个基本表达式理解为数据流图中的一个结点,只是这个结点对例3.1中标量形式的表达式进行了张量化的推广,对应着一个框架后端支持的张量操作。
假设,$\mathbf{Y} = G(\mathbf{X})$是一个基本求导原语,其中$\mathbf{Y} = [y_1\ \cdots \ y_m]$和$\mathbf{X}=[x_1 \ \cdots \ x_n]$都是向量。这时,$\mathbf{Y}$对$\mathbf{X}$的导数不再是一个标量,而是由偏导数构成的雅克比矩阵$J$(Jacobian Matrix):
$$
J = \left[\frac{\partial \mathbf{Y}}{\partial x_1}, \cdots, \frac{\partial \mathbf{Y}}{\partial x_n} \right] = \begin{bmatrix}
\frac{\partial y_1}{\partial x_1} \quad \cdots \quad \frac{\partial y_1}{\partial x_n} \\
\vdots \quad \ddots \quad \vdots \\
\frac{\partial y_m}{\partial x_1} \quad \cdots \quad \frac{\partial y_m}{\partial x_n}
\end{bmatrix}
$$
在反向传播算法的反向过程中(也是反向微分的反向过程),中间层$\mathbf{Y} = G(\mathbf{X})$会收到损失函数对当前层输出的导数:$\mathbf{v} = \frac{\partial l}{\partial \mathbf{Y}} = \left[\frac{\partial l}{\partial y_1} \ \cdots \ \frac{\partial l}{\partial y_m} \right]$,然后将这个导数继续乘以该层输出对输入的雅克比矩阵$J$,这个乘法的结果就是向量与雅克比矩阵(Vector-Jacobian)乘积,是一个向量。反向传播过程中如果直接存储雅克比矩阵,会消耗大量存储空间,取而代之,如果只存储向量与雅可比矩阵(Vector-Jacobian)的乘积,在减少存储的同时并不会影响导数的计算。因此,深度学习框架在实现自动微分时,对每个中间层,存储的都是向量与雅克比矩阵的乘积,而非雅克比矩阵。
$$
\mathbf{v} \cdot J = \begin{bmatrix}
\frac{\partial l}{\partial y_1} \cdots \frac{\partial l}{\partial y_m}
\end{bmatrix} \begin{bmatrix}
\frac{\partial y_1}{\partial x_1} \quad \cdots \quad \frac{\partial y_1}{\partial x_n} \\
\vdots \quad \ddots \quad \vdots \\
\frac{\partial y_m}{\partial x_1} \quad \cdots \quad \frac{\partial y_m}{\partial x_n}
\end{bmatrix} = \begin{bmatrix}
\frac{\partial l}{\partial x_1} \cdots \frac{\partial l}{\partial x_n}
\end{bmatrix}
$$
我们继续以图3所示的前向数据流图为例,图6补全了与之对应的反向操作数据流图。与前向计算的计算图相同,反向计算的数据流图中每个结点都是一个无状态的张量操作,结点的入边(Incoming Edge)表示张量操作的输入,出边表示张量操作的输出。数据流图中的可导张量操作在实现时都会同时注册前向计算和反向(导数)计算。前向结点接受输入计算输出,反向结点接受损失函数对当前张量操作输出的梯度$\mathbf{v}$,当前张量操作的输入和输出,计算当前张量操作每个输入的向量与雅克比矩阵的乘积。
<p align="center">
<img src="img/backward-computation-graph.png" width=20%><br>
图6. 反向计算的数据流图实例
</p>
从图6中我们可以观察到:前向数据流图和反向数据流图有着完全相同的结构,区别仅在于数据流流动的方向相反。同时,由于梯度通常都会依赖前向计算的输入或是计算结果,反向数据流图中会多出一些从前向数据流图输入和输出张量指向反向数据流图中导数计算结点的边。在基于数据流图的深度学习框架中,利用反向微分计算梯度通常实现为数据流图上的一个优化pass,给定前向数据流图,以损失函数为根节点广度优先遍历前向数据流图的时,便能按照对偶结构自动生成出求导数据流图。
## 3.1.4 数据流图的调度与执行
训练神经网络包含如下五个阶段:前向计算,反向计算,梯度截断,应用正则,以及更新可学习参数。其中,梯度截断和应用正则视用户是否配置了这两项,可能会跳过。
```python
for batch in TrainDataset:
phrase 1: 前向计算
phrase 2: 反向计算
phrase 3: 梯度截断
phrase 4: 应用正则项
phrase 5: 更新可学习参数
```
在基于计算流图的深度学习框架中,这五个阶段统一表示为由基本算子构成的数据流图,算子是数据流图中的一个节点,由后端进行高效实现。前端用户只需要给定前向计算,框架会根据前向数据流图,自动补全其余阶段,生成出完整的数据流图。神经网络的训练就对应了这个数据流图的执行过程。算子调度是根据数据流图描述的数据依赖关系,确定算子的执行顺序,由运行时系统调度数据流图中的节点到设备上执行。
### 单设备算子间调度
对单设备执行环境,制约数据流图中节点调度执行的关键因素是节点之间的数据流依赖。这种情况下运行时系统的调度策略十分直接:初始状态下,运行时系统会将数据流图中入度为0的节点加入一个FIFO(First-In-First-Out)就绪队列,然后从就绪队列中选择一个节点,分配给线程池中的一个线程执行。当这个节点执行结束后,会将其后继节点加入就绪队列,该节点被弹出。运行时系统继续处理就绪队列中的节点,直到队列为空。以TensorFlow默认调度策略为例,数据流图中的节点会被分类为低代价节点(一般是仅在CPU上执行的一些拼接节点)和高代价节点(张量计算节点)。就绪队列中的一个节点被分配给线程池中的线程调度执行时,这个线程会一次执行完数据流图中所有低代价节点,或在遇到高代价节点时,将这个节点派发给线程池中空闲的其他线程执行。
<p align="center">
<img src="img/op-schedule.png" width=50%><br>
图7. 简单数据流图的串行执行调度
</p>
图7是按照数据流约束执行例3.2对应的数据流图的一个可能调度序列。
### 图切分与多设备执行
对例3.2中这样的简单神经网络,在数据流依赖的约束下只存在串行调度方案。对许多更加复杂的神经网络模型存在多分枝,典型代表如GoogLeNet[<sup>[13]</sup>](#GoogLeNet-13),图8是构成GoogLeNet的Inception模块,这时如果后端有多个计算设备,运行时系统在调度执行数据流图时,会尝试尽可能将可并行算子派发到并行设备上以提高计算资源的利用率。
<p align="center">
<img src="img/inception-parallel-scheduling.png" width=50%><br>
图8. 基本的Inception模块执行策略
</p>
多计算设备环境下执行数据流图,运行时系统需要解如何将数据流图中的节点放置到不同设备上以及如何管理跨设备数据传输两个问题:
1. **数据流图切分**:给定一个数据流图将数据流图切分后放置到多个计算设备上,每个设备拥有数据流图的一部分。
2. **插入跨设备通信**:经过切分数据流图会被分成若干子图,每个子图被放置在一个设备上,这时数据流图中会出现一些边它们的头和尾分别被放置在不同设备上。运行时系统会自动删除这样的边,将它们替换成一对*Send*和*Receive*算子,实现跨设备数据传输。数据传输的所有细节实现可以被*Send*和*Receive*掩盖。
图9是上面两步的示意图。
<p align="center">
<img src="img/op-schedule-multi-devices.png" width=40%><br>
图9. 数据流图切分和插入跨设备数据传输
</p>
实际上做好数据流图切分映射到多设备是一个复杂的组合优化问题,需要在代价模型(cost model)的辅助下预估跨设备通信消耗的时间以及每个算子在设备上的运行时间如何随着输入输出张量大小的改变而变化,最终以数据流依赖为约束,均衡并行执行和数据通信这一对相互竞争的因素。第6章会对并行计算中的策略选择进行更详细的介绍。
## 3.1.5 小结与讨论
主流深度学习框架都采用了数据流图作为神经网络计算的高层次抽象。数据流图是一个有向无环图,图中的结点是一个深度学习框架后端支持的张量操作原语,结点之间的边上流动的是张量,显式地表示了结点之间的数据依赖关系。
1. 数据流图用统一的方式描述出了复杂神经网络训练的全过程,使得在运行程序之前后端系统有机会对整个计算过程的数据依赖关系进行分析,通过数据流图化简、内存优化、预先计算算子间的静态调度策略等方式,改善运行时的性能。
2. 基于数据流图描述,深度学习框架在设计上切分出了三个解耦的优化层:数据流图优化,运行时调度策略,以及算子优化。当遇到新的神经网络模型结构或是训练算法时,通过以下三步进行扩展:(1)添加新的算子;(2)对算子的内核函数在不同设备,不同超参数下进行计算优化;(3)注册算子和内核函数,由运行时系统在在运行时派发到所需的实现上。
3. 在基于数据流图的深度学习框架设计之初,希望通过对三个优化层之间的解耦来加速深度学习软件栈的迭代,然而,随着神经网络模型计算规模的增大,出现了越来越多的定制化算子,多设备支持需求增加,这三个抽象层之间的抽象边界也在被频繁地打破。在后续的章节我们会进一步讨论。
# 参考文献
<div id="convnet-1"></div>
1. Cuda-convnet, High-performance C++/CUDA implementation of convo-lution of
neural networks, [https://code.google.com/p/cuda-convnet/](https://code.google.com/p/cuda-convnet/)
<div id="theano-2"></div>
2. Al-Rfou, R., Alain, G., Almahairi, A., Angermueller, C., Bahdanau, D., Ballas, N., ... & Zhang, Y. (2016). [Theano: A Python framework for fast computation of mathematical expressions](https://arxiv.org/pdf/1605.02688.pdf). arXiv e-prints, arXiv-1605.
<div id="distbelief-3"></div>
3. Dean, J., Corrado, G., Monga, R., Chen, K., Devin, M., Mao, M., ... & Ng, A. (2012). [Large scale distributed deep networks](https://proceedings.neurips.cc/paper/2012/file/6aca97005c68f1206823815f66102863-Paper.pdf). Advances in neural information processing systems, 25.
<div id="caffe-4"></div>
4. Jia, Y., Shelhamer, E., Donahue, J., Karayev, S., Long, J., Girshick, R., ... & Darrell, T. (2014, November). [Caffe: Convolutional architecture for fast feature embedding](https://arxiv.org/pdf/1408.5093.pdf?ref=https://codemonkey.link). In Proceedings of the 22nd ACM international conference on Multimedia (pp. 675-678).
<div id="mxnet-5"></div>
5. Chen, T., Li, M., Li, Y., Lin, M., Wang, N., Wang, M., ... & Zhang, Z. (2015). [Mxnet: A flexible and efficient machine learning library for heterogeneous distributed systems](https://arxiv.org/pdf/1512.01274.pdf). arXiv preprint arXiv:1512.01274.
<div id="tensorflow-6"></div>
6. Abadi, M., Barham, P., Chen, J., Chen, Z., Davis, A., Dean, J., ... & Zheng, X. (2016). [TensorFlow: A System for Large-Scale Machine Learning](https://www.usenix.org/system/files/conference/osdi16/osdi16-abadi.pdf). In 12th USENIX symposium on operating systems design and implementation (OSDI 16) (pp. 265-283).
<div id="dynet-7"></div>
7. Neubig, G., Dyer, C., Goldberg, Y., Matthews, A., Ammar, W., Anastasopoulos, A., ... & Yin, P. (2017). [Dynet: The dynamic neural network toolkit](https://arxiv.org/pdf/1701.03980.pdf)). arXiv preprint arXiv:1701.03980.
<div id="chainer-8"></div>
8. Tokui, S., Oono, K., Hido, S., & Clayton, J. (2015, December). [Chainer: a next-generation open source framework for deep learning](http://learningsys.org/papers/LearningSys_2015_paper_33.pdf). In Proceedings of workshop on machine learning systems (LearningSys) in the twenty-ninth annual conference on neural information processing systems (NIPS) (Vol. 5, pp. 1-6).
<div id="pytorch-9"></div>
9. Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., ... & Chintala, S. (2019). [Pytorch: An imperative style, high-performance deep learning library](https://proceedings.neurips.cc/paper/2019/file/bdbca288fee7f92f2bfa9f7012727740-Paper.pdf). Advances in neural information processing systems, 32.
<div id="tf-eager-10"></div>
10. Agrawal, A., Modi, A., Passos, A., Lavoie, A., Agarwal, A., Shankar, A., ... & Cai, S. (2019). [TensorFlow Eager: A multi-stage, Python-embedded DSL for machine learning](https://proceedings.mlsys.org/paper/2019/file/2a38a4a9316c49e5a833517c45d31070-Paper.pdf). Proceedings of Machine Learning and Systems, 1, 178-189.
<div id="jax-11"></div>
11. Frostig, R., Johnson, M. J., & Leary, C. (2018). [Compiling machine learning programs via high-level tracing](https://mlsys.org/Conferences/doc/2018/146.pdf). Systems for Machine Learning, 23-24.
<div id="auto-graph-12"></div>
12. Moldovan, D., Decker, J. M., Wang, F., Johnson, A. A., Lee, B. K., Nado, Z., ... & Wiltschko, A. B. (2018). [AutoGraph: Imperative-style Coding with Graph-based Performance](https://arxiv.org/pdf/1810.08061.pdf).(oct 2018). arXiv preprint arXiv:1810.08061.
<div id="GoogLeNet-13"></div>
13. Szegedy, Christian, et al. "[Going deeper with convolutions](https://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Szegedy_Going_Deeper_With_2015_CVPR_paper.pdf)." Proceedings of the IEEE conference on computer vision and pattern recognition. 2015.
<div id="Julia-autodiff-14"></div>
14. Revels, Jarrett, Miles Lubin, and Theodore Papamarkou. "[Forward-mode automatic differentiation in Julia](https://arxiv.org/pdf/1607.07892.pdf)." arXiv preprint arXiv:1607.07892 (2016).
|
AI-System/Textbook/第3章-深度学习框架基础/3.1-基于数据流图的深度学习框架.md/0
|
{
"file_path": "AI-System/Textbook/第3章-深度学习框架基础/3.1-基于数据流图的深度学习框架.md",
"repo_id": "AI-System",
"token_count": 24368
}
| 15 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
# 矩阵运算与计算机体系结构 (Computer architecture for Matrix computation)
# 简介
计算体系结构的发展往往和上层应用的演变相辅相成,一方面计算任务的特点决定了体系结构的设计和优化方向,另一方面体系结构的迭代推动了应用超着更加适合体系结构的方向演进。类似的,深度学习的出现和发展不仅深受GPU这样的体系结构所影响,而且还推动了一系列新型计算机体系结构的出现与演进。理解体系结构变迁的前提是要理解上层应用的计算模式的本质特点,从而才能理解体系结构为了适配计算模式而做出的种种取舍。
因此,本章将针对深度学习计算任务,首先对近年来流行的深度学习模型中的核心计算模式进行分析和梳理,然后按照时间顺序依次介绍不同的计算机体系结构在支持深度学习计算中所扮演的角色,包括以CPU为主的传统计算体系结构、以GPU为主的通用图形处理器和一些具有代表性的专有硬件加速器(如TPU)等。由于计算机体系结构和深度学习应用的相关内容跨度较大,涉及的知识体系较有深度,因此本书不会详细介绍每一种体系结构的设计细节,而是尽量讨论和深度学习计算相关的部分,并且着重从他们的变化差异部分来揭示体系结构的变化趋势,从而能够帮助读者更好的理解深度学习计算如何更好利用硬件资源,并进一步引导读者思考和分析未来深度学习模型以及体系结构的变化趋势。
# 内容概览
本章包含以下内容:
- [4.1 深度学习的计算模式](4.1-深度学习的计算模式.md)
- [4.2 计算机体系结构与矩阵运算](4.2-计算机体系结构与矩阵运算.md)
- [4.3 GPU体系结构与矩阵运算](4.3-GPU体系结构与矩阵运算.md)
|
AI-System/Textbook/第4章-矩阵运算与计算机体系结构/4-前言.md/0
|
{
"file_path": "AI-System/Textbook/第4章-矩阵运算与计算机体系结构/4-前言.md",
"repo_id": "AI-System",
"token_count": 1471
}
| 16 |
<!--Copyright © Microsoft Corporation. All rights reserved.
适用于[License](https://github.com/microsoft/AI-System/blob/main/LICENSE)版权许可-->
# 7.3 调度(Scheduling)
<center> <img src="./img/3/7-3-10-schedulemodule.png" /></center>
<center>图 7.3.1 平台调度器</center>
在之前的章节,我们已经介绍集群管理中的运行时,但是作业进程在启动前,需要平台本身进行决策当前作业运行在哪些服务器和 GPU 上,哪个作业能优先执行,进而进行调度决策。如图中所示,本章将围绕调度问题的抽象与优化目标,以及可用于深度学习作业调度的传统调度算法进行介绍,期望让读者了解作业调度的经典问题和解决方法。
- [7.3 调度(Scheduling)](#73-调度scheduling)
- [7.3.1 调度问题优化目标](#731-调度问题优化目标)
- [7.3.2 单作业调度-群调度](#732-单作业调度-群调度)
- [7.3.3 作业间调度-主导资源公平 DRF(Dominant Resource Fairness)调度](#733-作业间调度-主导资源公平-drfdominant-resource-fairness调度)
- [7.3.4 组间作业调度-容量调度(Capacity Scheduling)](#734-组间作业调度-容量调度capacity-scheduling)
- [7.3.5 虚拟集群(Virtual Cluster)机制](#735-虚拟集群virtual-cluster机制)
- [7.3.6 抢占式调度(Preemptive Scheduling)](#736-抢占式调度preemptive-scheduling)
- [7.3.7 深度学习调度算法实验与模拟研究](#737-深度学习调度算法实验与模拟研究)
- [7.3.7.1 数据读取](#7371-数据读取)
- [7.3.7.2 评测指标设定](#7372-评测指标设定)
- [7.3.7.3 算法实现与评测](#7373-算法实现与评测)
- [小结与讨论](#小结与讨论)
- [参考文献](#参考文献)
## 7.3.1 调度问题优化目标
[调度(Scheduling)](https://en.wikipedia.org/wiki/Scheduling_(computing))是分配资源以执行任务的动作。在深度学习平台中,资源可以是处理器、GPU、内存等,任务是用户提交的作业。
调度活动(Scheduling Activity)由称为调度器的进程执行。
调度器中的调度算法通常被设计为使所有计算机资源保持忙碌,让多个用户高效地***共享***系统资源,或实现目标服务质量(Quality-of-Service)。
在运行深度学习作业的集群服务器上,会部署一个“操作系统”进行作业管理与调度,也就是异构资源管理系统,也称作深度学习平台。相比传统操作系统,其特点是,运行的“进程”一般为深度学习作业,所以是一个专有操作系统。管理的资源不仅是一台机器,而是多台服务器构成的集群资源池。每台服务器挂载了多块商用 GPU,InfiniBand 网卡等异构硬件。深度学习平台也是要对作业提供整体所管理的硬件的“一定抽象层次”上的多路复用(Multiplexing)。同时由于整个系统不仅一个用户会提交多个作业,整个资源池被多个公司内部组和用户共享,也就是我们所说的多租(Multi-Tenancy)系统。
平台常常针对一批用户提交的作业进行调度,其常常考虑以下指标 :
作业延迟与吞吐相关指标:
- 排队延迟(Queuing Delay):描述作业在调度器队列中等待资源分配所花费的时间,排队延迟越低,代表用户作业需要等待的时间越短,越高效。主要受两个因素影响,一个是公平性,由于用户作业用完所分配的配额,另一个是局部性(Locality)和资源碎片问题造成资源无法分配和等待。
- 平均响应时间(Average Response Time):平均响应时间是从提交请求到产生第一个响应的时间量的平均。平台希望平均响应时间越短越好。
- 平均作业完成时间(Job Completion Time):一批作业的平均完成时间,指标能够代表系统性能。例如,考虑分布式作业的局部性,影响通信时间,进而影响JCT。
- 完工时间(Makespan):对一批作业,第一个作业到达到最后一个作业完成的整体时间,希望其越小越好,有些调度算法也考虑所有作业的整体完工时间作为优化目标,因为最小化完工时间(Makespan)等价于最大化资源效率(Efficiency)。
- 吞吐(Throughput):单位时间能完成的作业数量。平台希望吞吐量越大越好。
平台资源利用率相关指标:
- 资源利用率(Utilization) :描述用于作业的资源占总资源的百分比。平台希望利用率越高越好。
- 资源碎片(Fragmentation):作业分配后造成个别节点资源无法被再分配,产生碎片问题。碎片越少,代表资源浪费越少。也是和资料利用率相关的指标。
公平与服务水平相关指标:
- 公平性(Fairness):资源使用在平台用户或组之间平均或按指定配额比例分配。
- 服务水平协议(SLA):服务级别协议 (SLA-service-level Agreement) 是平台和用户之间的承诺。例如,平台服务的公平性,质量、可用性、责任等在平台和用户之间进行约定和达成一致。
如图 7.3.2 所示,平台中包含以下集群与作业的包含层级关系,不同层次中蕴含不同的调度问题,我们可以将之后涉及的面向深度学习调度算法也映射到其中的层级问题的解决方法。读者在之后思考下一章 7.4 中的面向深度学习作业的调度算法属于图中其中哪个层级?
- 虚拟集群间作业调度:集群间作业可以通过容量调度(Capacity Scheduling),抢占调度等方式进行调度。其关注公平性,也会为提升效率提供一定奖励资源配合作业强占,提升效率的同时保证服务等级协议。
- 虚拟集群内作业间调度:在一个虚拟集群内用户提交的作业可以根据先进先出(FIFO),主导资源公平(DRF)等算法进行调度。其较为关注防止作业饥饿,降低排队时间,提升作业的完工时间,减少资源碎片进而提升利用率等。
- 用户内作业间调度:此类问题读者可以参考第 9 章自动化机器学习系统了解。用户为了超参数调优会提交改变一定超参数的不同作业进行训练。其较为关注整体作业的完工时间,响应时间,吞吐量等。
- 作业内调度:如果是多卡或分布式作业会启动多个任务,做业内可以通过群调度(Gang Scheduling)算法进行调度。其较为关注作业语义与正确性,作业的完成时间等。
<center> <img src="./img/3/7-3-11-schedulingoverview.png" /></center>
<center>图 7.3.2 平台中的作业调度问题总览 </center>
接下来,我们将通过经典的调度算法,看平台常用算法是如何解决遇到的问题的。
## 7.3.2 单作业调度-群调度
群调度(Gang Scheduling)[<sup>[1]</sup>](#gang) 的 Wiki 定义是:一种用于并行系统的调度算法,用于调度相关线程或进程,在不同处理器上同时启动并运行。
深度学习作业通常会持续数小时,有些甚至会持续数周。深度学习作业通常需要群调度,直到所有必需的加速设备都被授予后才能开始训练过程。
如果不使用群调度会产生什么问题? 深度学习作业可以同时执行多个任务,如果有依赖任务没启动,已启动任务会在同步点忙于等待或者频繁上下文切换 (如下图所示)。首先会造成训练任务无法训练,由于等待不能启动的任务,如下图所示两个作业都申请了部分资源,但是还需要其他资源才能启动,产生了死锁现象。同时已启动的任务不释放资源,造成资源浪费。
<center> <img src="./img/3/7-3-2-gangscheduleproblem.png" /></center>
<center>图 7.3.3 并行启动执行作业可能产生的问题</center>
接下来,对上面的问题实例,我们使用群调度(Gang Scheduling)同时启动深度学习任务进程。图中的 A,B,C 作业就可以交替执行,保证任务能顺利执行完。
<center> <img src="./img/3/7-3-3-gangschedule.png" /></center>
<center>图 7.3.4 并行执行作业可能产生的问题</center>
当然群调度自身也有一定的局限性,群调度会增加资源碎片化的风险,并且在共享集群中利用率低。如图中 t1,t2 时间段,GPU 7 和 8 就是空闲浪费的。
## 7.3.3 作业间调度-主导资源公平 DRF(Dominant Resource Fairness)调度
目前深度学习平台其实包含多种异构资源(CPU,GPU,主存等)以及被多用户使用是一个多租的环境。在调度过程中用户会细粒度的申请不同资源的用量,我们在满足用户异构资源需求的同时,也希望在多租的环境下兼顾一定的公平。
- 问题:包含异构资源类型的系统中如何进行多作业公平(Fairness)的资源调度?
- 挑战:相比传统单资源公平调度,深度学习作业也需要使用多种异构资源 (CPU,主存等),并且需要调度 GPU 及 GPU memory
[主导资源公平简称DRF(Dominant Resource Fairness)](https://cs.stanford.edu/~matei/papers/2011/nsdi_drf.pdf)[<sup>[2]</sup>](#drf)调度使用优势资源的概念来比较多维(CPU,GPU,内存等)资源。这个想法是在多资源环境中,资源分配应该由作业(用户或队列)的主导份额决定,这是作业已分配的任何资源(内存或 CPU)的最大份额。其论文中介绍,与其他可能的策略不同,DRF 满足几个较为理想的属性。
首先,DRF 激励用户共享资源,进而保证公平性。
其次,DRF 是[防策略(Strategy-Proof)](https://en.wikipedia.org/wiki/Strategyproofness)的,也就是说真话的人有好结果,因为用户没有动力通过谎报需求来增加作业资源的分配。用户当前基于最大最小公平,谎报更多的资源则需要更多的排队时间。
同时,DRF 是[无嫉妒(Envy-free)](https://en.wikipedia.org/wiki/Envy-freeness)的,也就是用户不羡慕其他用户的分配,其他用户的分配即使更快但是不一定适合自己的资源需求,这样反应了公平性。
最后,DRF 分配是帕累托有效(Pareto Efficient)的,帕累托有效(Pareto Efficient)是指如果一种可行的配置不可能在不损害某些人利益的前提下使另一些人获益,则该配置便是一种帕累托效率的配置。因为不可能在不减少另一个用户的配额的情况下改善一个用户的配额。算法的作者在 Mesos 集群资源管理器中实现了 DRF。
本质上,DRF 优化目标是寻求最大化所有实体的最小主导份额(Smallest Dominant Share)。
DRF 调度策略的简要总结是:
1. 通过同类型资源在集群整体资源中的份额确定主导资源 (Dominant Resource)。
2. 基于最大最小公平(Max-Min Fairness)的针对多资源类型(例如 GPU,CPU)的调度算法。
如下图所示实例,有 Job 1 和 Job 2 都在启动多个任务并申请多个资源。第一步先计算每个 Job 的主导资源。Job 1主导资源为 Memory,Job 2 主导资源是 GPU。Job 1 的优先级高于 Job 2,因为 Job 1 份额 0.4 小于 Job 2 份额 0.5。
<center> <img src="./img/3/7-3-1-drf.png" /></center>
<center>图 7.3.5 2个作业的DRF调度实例 (<a href="https://github.com/volcano-sh/volcano/blob/master/docs/design/drf.md">图片引用 Volcano</a>)</center>
$$Cluster \ Resources: [10 \ GPU, 20GB \ RAM] $$
以下的资源申请需求中,主导资源是内存。
$$Job \ 1:$$
$$Total \ GPU \ 1 + 1 = 2 \ GPU$$
$$GPU \ Share \ \frac{2}{10} = 0.2$$
$$Total \ Memory \ 4 + 4 = 8GB$$
$$Memory \ Share \ \frac{8}{20} = 0.4$$
$$Dominant \ resource \ is \ Memory$$
以下的资源申请需求中,主导资源是GPU。
$$Job \ 2:$$
$$Total \ GPU \ 2 + 3 = 5 \ GPU$$
$$GPU \ Share \ \frac{5}{10} = 0.5$$
$$Total \ Memory \ 2 + 2 = 4GB$$
$$Memory \ Share \ \frac{4}{20} = 0.2$$
$$Dominant \ resource \ is \ GPU$$
## 7.3.4 组间作业调度-容量调度(Capacity Scheduling)
除了多个作业能够兼顾公平的分配,平台管理员还需要考虑如果是多个组共享平台,也需要兼顾组与组之间的公平性。
如何让多个小组共享集群?
- 能否为多个组织共享集群资源?
- 共享集群资源的同时,如何同时为每个组织提供最小容量保证?
- 空闲资源能否弹性为其他组织利用?
挑战:相比传统容量调度调度,深度学习作业也需要考虑调度 GPU 及 GPU 显存
容量调度器(Capacity Scheduler)[<sup>[3]</sup>](#yarn)在大数据平台常常作为主流调度器使用,从[作业类型角度](https://research.google/pubs/pub43438/),大数据作业和深度学习训练作业,都可以看成是批处理作业。它允许多租户安全地共享一个大型集群,以便在分配容量的限制下及时为他们的应用程序分配资源。
我们看下图实例,图中组(Team) A,B,C 共享集群,每个组有多个用户,每个用户都会提交作业使用集群资源。如果不考虑组间公平性,Team A 再申请了 $45\%$ 的资源后,如果没有使用造成浪费的同时,也会让 Team C 对资源无法申请,产生[饥饿(Starvation)](https://en.wikipedia.org/wiki/Starvation_(computer_science))现象。
<center> <img src="./img/3/7-3-4-capaproblem.png" /></center>
<center>图 7.3.6 资源占用过多造成其他组无法分配资源问题</center>
所以,容量调度为了支持支持多租(Multi-Tenant)资源共享设计了以下的策略集合:
- 提升利用率(Utilization):
- 虚拟集群(Virtual Cluster):组能看到视图是虚拟资源,并不绑定具体机器,等作业启动后分配相应的资源,这样有助于提升资源利用率。
- 层级队列(Hierarchical Queues): 支持队列分层结构,以确保在允许其他队列使用空闲资源之前在组织的子队列之间共享资源,从而提供更多的控制和可预测性。
- 队列内可以正交组合其他作业间调度算法,例如,先进先出(FIFO),DRF 等。对异构计算场景,扔可以采用适合多维资源调度或其他自定义调度器。满足下面介绍的约束情况下,仍旧可以采用DRF等调度策略进行具体作业之间的调度与资源分配。
- 多租与提升公平性(Fairness):
- 多租与用户限制因素(User Limit Factor):
- 从某种意义上说,队列将分配到网格容量的一小部分,因为它们可以使用一定容量的资源。 提交到队列的所有应用程序都可以访问分配给队列的容量。管理员可以对分配给每个队列的容量配置软限制和可选的硬限制。
- 允许多用户多组以多租形式使用集群。控制单用户的可以消耗的最大资源,放止占用资源过多,造成其他进程无法申请资源。
- 弹性(Elasitcity)和 SLA
- 奖励资源(Bonus Resource):对其他组没有使用的资源可以临时免费出让给有需要的团队,但是当资源持有者需要,则需要抢占资源归还给持有者。
- 抢占(Preemption):配合奖励资源使用,保证对用户提供的服务等级协议(SLA)。
如下图所示,当管理员配置了最小和最大的组使用资源限额,这样保证组与组之间都有资源可用。
<center> <img src="./img/3/7-3-5-capascheduling.png" /></center>
<center>图 7.3.7 容量调度</center>
***经典回顾***
操作系统[公平共享调度(Fair-share Scheduling)](https://en.wikipedia.org/wiki/Fair-share_scheduling):“是一种用于操作系统的调度算法,其中 CPU 在系统用户或组之间平均分配(或演变为类似容量调度,组之间按比例分配配额),而不是在进程之间平均分配资源。在逻辑上实现公平共享调度策略的一种常见方法是在每个抽象级别(进程、用户、组等)递归应用轮询调度(Round-Robin)策略,例如,两个公平分配的组,组内用户再根据轮训或其他调度算法进行调度。”所以我们看到有的时候调度算法具有一定的正交性,可以嵌套使用,关键是在不同抽象级别上应用不同的策略。
## 7.3.5 虚拟集群(Virtual Cluster)机制
在集群内,组和用户所看到的的资源配额一般情况下,并没有绑定到具体的物理机器,而是在调度后决定作业部署的物理机器。这背后是通过虚拟集群 (Virtual Cluster) 映射所实现的。而虚拟集群和我们之前介绍的控制组(Cgroups)的设计较为类似。我们会看到很多集群产生的问题,在传统的操作系统中都能找到类似的设计问题与原则。
如下图所示,虚拟集群会配置用户组的配额和视图,物理集群是在调度后在运行时绑定的。这样可以大幅提升资源利用率,减少资源碎片。
<center> <img src="./img/3/7-3-6-vc-bind.png"/></center>
<center>图 7.3.8 虚拟集群和物理集群映射与绑定 (<a href="https://www.usenix.org/sites/default/files/conference/protected-files/osdi20_slides_zhao.pdf">图片引用 HiveD</a>)</center>
如图所示,针对深度学习作业调度的虚拟集群策略可以总结为:
- 虚拟集群根据小组的配额进行定义
- 每个租户(Tenant)构成了一个虚拟集群(VC)
- 资源被分配给租户(Tenant)
- 将虚拟集群绑定到物理集群
<center> <img src="./img/3/7-3-7-vc-define.png" /></center>
<center>图 7.3.9 虚拟集群资源分配 <a href="https://dl.acm.org/doi/10.5555/3488766.3488795">图片引用 HiveD OSDI '20</a> </center>
***经典回顾***
虚拟集群的设计思想类似传统操作系统中的控制组(Cgroups),约束资源但是解耦物理资源的绑定。操作系统[控制组(Cgroups)](https://man7.org/linux/man-pages/man7/cgroups.7.html),让不同组织,用户的不同进程在操作系统中有相应的资源约束,但是不于具体硬件资源绑定,而是由运行时调度分配。
## 7.3.6 抢占式调度(Preemptive Scheduling)
一些集群管理员为了减少组内空闲资源的浪费,希望通过一定的策略共享虚拟集群内的空闲资源,但是单纯出让资源但是不能保证原有用户随时能回收对应配额资源,产生相应新的问题即不能保证对原用户的 SLA (Service Level Agreement)。这种问题一般可以通过抢占调度解决,也就是当资源的原有用户需要资源时,终止使用奖励资源的作业进程,回收资源给原配额用户。
强占调度一般用于以下场景:
(1)让资源饥饿的作业,或短作业强占一定资源,降低作业的平均响应时间。由于深度学习作业的韧性(Resilience)并不完善,一般不为此类需求使用强占调度。
如下图所示 APP2 长时间无法得到资源,则无法执行,而其执行时间实际很短。这就需要通过抢占机制进行调度。让 APP2 获取一定资源执行,保证降低平均响应时间。
<center> <img src="./img/3/7-3-8-preemptive.png" /></center>
<center>图 7.3.10 作业等待时间过长问题 <a href="https://www.slideshare.net/Hadoop_Summit/enabling-diverse-workload-scheduling-in-yarn">图片引用 Hortonworks Talk '15</a> </center>
(2)出让虚拟集群空闲资源资源形成奖励资源供其他虚拟集群中的作业使用,提升整体资源利用率。一般我们在深度学习中常常是出于这点考虑使用强占调度。
下图所示,我们可以看到,A 队列中配置可用为 10 资源,但是由于集群有空闲资源,多实用了 20 奖励资源给 C6 和 C7,这时C队列需要使用 20 资源,而且集群应该保证,这时会触发抢占。当 APP1 的 C6 和 C7 使用的资源被标记为可以被抢占后,其资源可以通过以下步骤被抢占:
1. 从过度使用的队列中获取需要被抢占的容器(队列 A 的 C6 和 C7)。
2. 通知作业(队列 A)控制器即将触发抢占。
3. 等待直到被抢占终止运行。
<center> <img src="./img/3/7-3-9-preemptive.png" /></center>
<center>图 7.3.11 抢占调度 <a href="https://blog.cloudera.com/better-slas-via-resource-preemption-in-yarns-capacityscheduler/">图片引用 Cloudera YARN Capacity Scheduler '15</a></center>
抢占式调度对深度学习作业的挑战:在深度学习作业下,被抢占的作业当前只能失败,默认情况下无法像传统操作系统进行上下文切换。目前有些工作中会提供框架或设备驱动库层的检查点机制,配合调度器实现强占与恢复,但是由于本身不是原生支持,所以有一定开销且支持的框架与场景有限并未大范围使用。未来可以设计更好的深度学习检查点和恢复技术进行减少抢占后作业失效造成的被强占作业资源无效使用的问题。
***经典回顾***
- [操作系统抢占调度](https://en.wikipedia.org/wiki/Preemption_(computing)):“抢占(Preemption)是暂时中断正在执行的任务的行为,目的是占用执行任务的资源给其他任务。这个中断(Interrupt)是由外部调度器完成的,处理器当前执行任务的这种变化称为上下文切换(Context Switching)。”如今,几乎所有操作系统都支持[抢占式多任务处理](Preemptive Multitasking),例如 Windows、macOS、Linux 等。读者思考深度学习作业是否能支持上下文切换,也就是恢复或迁移被强占进程?
## 7.3.7 深度学习调度算法实验与模拟研究
调度算法的研究如果通过真实作业进行实验执行时间过长,代价较大,一般调度算法的研究基于历史作业日志,通过模拟器验证。
读者可以通过本实例的练习,通过真实的平台上的深度学习作业日志痕迹(Traces),进行以上调度算法模型,或研究新的面向深度学习的调度算法。
此开源数据集 [philly-traces](https://github.com/msr-fiddle/philly-traces) 包含 Microsoft 内部 Philly 集群上第一方 (First-Party) 深度学习训练工作负载的代表性子集。数据是 ATC '19 中“Analysis of large-scale multi-tenant GPU clusters for DNN training workloads”[<sup>[4]</sup>](#philly)中描述的工作负载的一个脱敏数据子集。
### 7.3.7.1 数据读取
读者可以参考库中提供的脚本读取数据并了解数据模式。
### 7.3.7.2 评测指标设定
读者可以根据本章开始介绍的指标设计优化目标。
### 7.3.7.3 算法实现与评测
读者可以选用以上介绍的经典算法作为基准测试,设计新的算法,并通过真实平台数据模拟,看能否提升当前目标,超越基准算法,并进行结果分析,形成分析报告或论文。
## 小结与讨论
本章我们主要介绍可以应用于异构计算集群管理系统的传统经典算法,这些算法在深度学习集群管理的场景依然发挥作用,我们可以看到经典理论和技术的魅力并经得起时间的考验。
请读者思考,当前调度算法,在深度学习作业场景还有哪些不足和潜在优化?
## 参考文献
<div id="gang"></div>
1. [Wikipedia contributors. "Gang scheduling." Wikipedia, The Free Encyclopedia. Wikipedia, The Free Encyclopedia, 11 Jan. 2021. Web. 3 Jul. 2022.](https://en.wikipedia.org/wiki/Gang_scheduling)
<div id="drf"></div>
2. [Ali Ghodsi, Matei Zaharia, Benjamin Hindman, Andy Konwinski, Scott Shenker, and Ion Stoica. 2011. Dominant resource fairness: fair allocation of multiple resource types. In Proceedings of the 8th USENIX conference on Networked systems design and implementation (NSDI'11). USENIX Association, USA, 323–336.](https://dl.acm.org/doi/10.5555/1972457.1972490)
<div id="yarn"></div>
3. [Hadoop: Capacity Scheduler](https://hadoop.apache.org/docs/stable/hadoop-yarn/hadoop-yarn-site/CapacityScheduler.html)
<div id="philly"></div>
4. [Myeongjae Jeon, Shivaram Venkataraman, Amar Phanishayee, unjie Qian, Wencong Xiao, and Fan Yang. 2019. Analysis of large-scale multi-tenant GPU clusters for DNN training workloads. In Proceedings of the 2019 USENIX Conference on Usenix Annual Technical Conference (USENIX ATC '19). USENIX Association, USA, 947–960.](https://dl.acm.org/doi/10.5555/3358807.3358888)
|
AI-System/Textbook/第7章-异构计算集群调度与资源管理系统/7.3-调度.md/0
|
{
"file_path": "AI-System/Textbook/第7章-异构计算集群调度与资源管理系统/7.3-调度.md",
"repo_id": "AI-System",
"token_count": 16130
}
| 17 |
# Starter pipeline
# Start with a minimal pipeline that you can customize to build and deploy your code.
# Add steps that build, run tests, deploy, and more:
# https://aka.ms/yaml
stages:
- stage: dev
jobs:
- template: steps/deploy.yml
parameters:
deployment_name: RecoPySparkRTS
template: RecoPySparkRTS.yml
azureSubscription: $(devsub)
azure_subscription: $(devsubid)
azureresourcegroup: tridant-pyreco
workspacename: tridantpyreco
azureregion: $(azureregion)
aksimagename: tridantpyrecoaksimage
doCleanup: False
project: "e2etestharness"
expires : "DnD"
alias: $(Build.RequestedForId)
agent: $(agent)
|
AI/.ci/realtime-serving-pyspark-reco-dev.yml/0
|
{
"file_path": "AI/.ci/realtime-serving-pyspark-reco-dev.yml",
"repo_id": "AI",
"token_count": 284
}
| 18 |
parameters:
Agent: Hosted Ubuntu 1604
Demands: "python3"
jobTimeoutInMinutes: 180
jobDisplayName: 'defaultDisplayName'
TridentWorkloadTypeShort: #
DeployLocation: #
DefaultWorkingDirectory: #
Template: #
ProjectLocation: #
PythonPath: #
workload_vars: #
doCleanup: True
sp_appid: #
sp_password: #
stages:
- template: ../stage/deploy_container_stage_v2.yml
parameters:
Agent: ${{parameters.Agent}}
Demands: ${{parameters.Demands}}
stageName: 'stable'
jobDisplayName: ${{parameters.jobDisplayName}}
jobTimeoutInMinutes: ${{parameters.jobTimeoutInMinutes}}
TridentWorkloadTypeShort: ${{parameters.TridentWorkloadTypeShort}}
DeployLocation: ${{parameters.DeployLocation}}
TestPostfix: "-stable"
DefaultWorkingDirectory: ${{parameters.DeployLocation}}
Template: ${{parameters.Template}}
ProjectLocation: ${{parameters.ProjectLocation}}
PythonPath: ${{parameters.PythonPath}}
workload_vars: ${{parameters.workload_vars}}
sp_appid: ${{parameters.sp_appid}}
sp_password: ${{parameters.sp_password}}
doCleanup: ${{parameters.doCleanup}}
|
AI/.ci/stages/deploy_container_stages_v2.yml/0
|
{
"file_path": "AI/.ci/stages/deploy_container_stages_v2.yml",
"repo_id": "AI",
"token_count": 398
}
| 19 |
# AI Architecture Template TODO: update tile
#
# A Github Service Connection must also be created with the name "AIArchitecturesAndPractices-GitHub"
# https://docs.microsoft.com/en-us/azure/devops/pipelines/process/demands?view=azure-devops&tabs=yaml
#
# An Agent_Name Variable must be creating in the Azure DevOps UI.
# https://docs.microsoft.com/en-us/azure/devops/pipelines/process/variables?view=azure-devops&tabs=yaml%2Cbatch#secret-variables
#
# This must point to an Agent Pool, with a Self-Hosted Linux VM with a Docker.
# https://docs.microsoft.com/en-us/azure/devops/pipelines/agents/v2-linux?view=azure-devops
parameters:
azureSubscription: ''
azure_subscription: ''
azureresourcegroup: ''
workspacename: ''
azureregion: westus2
aksimagename: ''
aks_name: ''
aks_service_name: myimage
conda: ai-architecture-template
doCleanup: true
flighting_release: false
flighting_preview: false
flighting_master: false
fresh_install: false
steps:
- template: config_conda.yml
parameters:
conda_location: .
azureSubscription: ${{parameters.azureSubscription}}
conda: ${{parameters.conda}}
flighting_release: ${{parameters.flighting_release}}
flighting_preview: ${{parameters.flighting_preview}}
flighting_master: ${{parameters.flighting_master}}
fresh_install: ${{parameters.fresh_install}}
- template: pytest_steps.yml
parameters:
azureSubscription: ${{parameters.azureSubscription}}
azure_subscription: ${{parameters.azure_subscription}}
azureresourcegroup: ${{parameters.azureresourcegroup}}
workspacename: aiarchtemplate
conda: ${{parameters.conda}}
azureregion: ${{parameters.azureregion}}
- template: cleanuptask.yml
parameters:
azureSubscription: ${{parameters.azureSubscription}}
azure_subscription: ${{parameters.azure_subscription}}
conda: ${{parameters.conda}}
azureresourcegroup: ${{parameters.azureresourcegroup}}
doCleanup: ${{parameters.doCleanup}}
|
AI/.ci/steps/ai-architecture-template.yml/0
|
{
"file_path": "AI/.ci/steps/ai-architecture-template.yml",
"repo_id": "AI",
"token_count": 706
}
| 20 |
parameters:
template: ''
azureSubscription: 'x'
azure_subscription: 'x'
azureresourcegroup: 'x'
workspacename: 'x'
azureregion: 'x'
aksimagename: 'x'
aks_name: "mlaks"
location: "" #Root Dir of Project
python_path: "" #Root Dir of Python Env
cluster_name: "-"
flighting_release: false
flighting_preview: false
sp_appid: #
sp_password: #
doCleanup: True
steps:
- template: ${{parameters.template}}
parameters:
azureSubscription: ${{parameters.azureSubscription}}
azure_subscription: ${{parameters.azure_subscription}}
azureresourcegroup: ${{parameters.azureresourcegroup}}
workspacename: ${{parameters.workspacename}}
azureregion: ${{parameters.azureregion}}
aksimagename: ${{parameters.aksimagename}}
aks_name: ${{parameters.aks_name}}
location: ${{parameters.location}}
python_path: ${{parameters.python_path}}
cluster_name: ${{parameters.cluster_name}}
flighting_release: ${{parameters.flighting_release}}
flighting_preview: ${{parameters.flighting_preview}}
sp_appid: ${{parameters.sp_appid}}
sp_password: ${{parameters.sp_password}}
doCleanup: ${{parameters.doCleanup}}
|
AI/.ci/steps/deploy_container_steps_v2.yml/0
|
{
"file_path": "AI/.ci/steps/deploy_container_steps_v2.yml",
"repo_id": "AI",
"token_count": 448
}
| 21 |
parameters:
Input_String: ""
Output_Variable: ""
cut_length: 12
steps:
- bash: |
$Output_Variable=$(echo $Input_String | cut -c1-$cut_length)
echo "##vso[task.setvariable variable=$Output_Variable]$Output_Variable"
|
AI/.ci/steps/shorten_string.yml/0
|
{
"file_path": "AI/.ci/steps/shorten_string.yml",
"repo_id": "AI",
"token_count": 91
}
| 22 |
{
"$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"accountName": {
"type": "string"
},
"databaseName": {
"type": "string"
},
"containerName": {
"type": "string"
},
"partitionKey": {
"type": "string"
},
"location": {
"type": "string"
},
"keyVaultKeyUri": {
"type": "string",
"metadata": {
"description": "The uri to a key in your Key Vault to add a second layer of encryption on top of what is provided by default"
},
"defaultValue": ""
}
},
"resources": [
{
"comments": "Create a Cosmos DB account, database and container.",
"name": "[parameters('accountName')]",
"type": "Microsoft.DocumentDB/databaseAccounts",
"apiVersion": "2020-04-01",
"location": "[parameters('location')]",
"properties": {
"locations": [
{
"locationName": "[parameters('location')]"
}
],
"databaseAccountOfferType": "Standard",
"keyVaultKeyUri": "[parameters('keyVaultKeyUri')]"
},
{
"name": "[parameters('databaseName')]",
"type": "Microsoft.DocumentDB/sqlDatabases",
"apiVersion": "2020-04-01",
"dependsOn": [
"[parameters('accountName')]"
],
"properties": {
"resource": {
"id": "[parameters('databaseName')]"
},
"options": {
"throughput": 400
}
}
}
{
"name": "[parameters('containerName')]",
"type": "Microsoft.DocumentDB/containers",
"apiVersion": "2020-04-01",
"dependsOn": [
"[parameters('databaseName')]"
],
"properties": {
"resource": {
"id": "[parameters('containerName')]",
"partitionKey": {
"paths": [
"[parameters('partitionKey')]"
],
"kind": "Hash"
}
}
}
}
],
"outputs": {
"resourceId": {
"type": "string",
"value": "[resourceId('Microsoft.DocumentDB/databaseAccounts', parameters('accountName'))]"
}
}
}
|
AI/AzureDeployment/CosmosDB/cosmosdb.json/0
|
{
"file_path": "AI/AzureDeployment/CosmosDB/cosmosdb.json",
"repo_id": "AI",
"token_count": 1040
}
| 23 |
import torch
def pad_ids(input_ids, attention_mask, token_type_ids, max_length, pad_token, mask_padding_with_zero, pad_token_segment_id, pad_on_left=False):
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1]
* padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] *
padding_length) + token_type_ids
else:
input_ids += [pad_token] * padding_length
attention_mask += [0 if mask_padding_with_zero else 1] * padding_length
token_type_ids += [pad_token_segment_id] * padding_length
return input_ids, attention_mask, token_type_ids
def dual_process_fn(line, i, tokenizer, args):
features = []
cells = line.split("\t")
if len(cells) == 2:
# this is for training and validation
# id, passage = line
mask_padding_with_zero = True
pad_token_segment_id = 0
pad_on_left = False
text = cells[1].strip()
input_id_a = tokenizer.encode(
text, add_special_tokens=True, max_length=args.max_seq_length,)
token_type_ids_a = [0] * len(input_id_a)
attention_mask_a = [
1 if mask_padding_with_zero else 0] * len(input_id_a)
input_id_a, attention_mask_a, token_type_ids_a = pad_ids(
input_id_a, attention_mask_a, token_type_ids_a, args.max_seq_length, tokenizer.pad_token_id, mask_padding_with_zero, pad_token_segment_id, pad_on_left)
features += [torch.tensor(input_id_a, dtype=torch.int), torch.tensor(
attention_mask_a, dtype=torch.bool), torch.tensor(token_type_ids_a, dtype=torch.uint8)]
qid = int(cells[0])
features.append(qid)
else:
raise Exception(
"Line doesn't have correct length: {0}. Expected 2.".format(str(len(cells))))
return [features]
def triple_process_fn(line, i, tokenizer, args):
features = []
cells = line.split("\t")
if len(cells) == 3:
# this is for training and validation
# query, positive_passage, negative_passage = line
mask_padding_with_zero = True
pad_token_segment_id = 0
pad_on_left = False
for text in cells:
input_id_a = tokenizer.encode(
text.strip(), add_special_tokens=True, max_length=args.max_seq_length,)
token_type_ids_a = [0] * len(input_id_a)
attention_mask_a = [
1 if mask_padding_with_zero else 0] * len(input_id_a)
input_id_a, attention_mask_a, token_type_ids_a = pad_ids(
input_id_a, attention_mask_a, token_type_ids_a, args.max_seq_length, tokenizer.pad_token_id, mask_padding_with_zero, pad_token_segment_id, pad_on_left)
features += [torch.tensor(input_id_a, dtype=torch.int),
torch.tensor(attention_mask_a, dtype=torch.bool)]
else:
raise Exception(
"Line doesn't have correct length: {0}. Expected 3.".format(str(len(cells))))
return [features]
def triple2dual_process_fn(line, i, tokenizer, args):
ret = []
cells = line.split("\t")
if len(cells) == 3:
# this is for training and validation
# query, positive_passage, negative_passage = line
# return 2 entries per line, 1 pos + 1 neg
mask_padding_with_zero = True
pad_token_segment_id = 0
pad_on_left = False
pos_feats = []
neg_feats = []
for i, text in enumerate(cells):
input_id_a = tokenizer.encode(
text.strip(), add_special_tokens=True, max_length=args.max_seq_length,)
token_type_ids_a = [0] * len(input_id_a)
attention_mask_a = [
1 if mask_padding_with_zero else 0] * len(input_id_a)
input_id_a, attention_mask_a, token_type_ids_a = pad_ids(
input_id_a, attention_mask_a, token_type_ids_a, args.max_seq_length, tokenizer.pad_token_id, mask_padding_with_zero, pad_token_segment_id, pad_on_left)
if i == 0:
pos_feats += [torch.tensor(input_id_a, dtype=torch.int),
torch.tensor(attention_mask_a, dtype=torch.bool)]
neg_feats += [torch.tensor(input_id_a, dtype=torch.int),
torch.tensor(attention_mask_a, dtype=torch.bool)]
elif i == 1:
pos_feats += [torch.tensor(input_id_a, dtype=torch.int),
torch.tensor(attention_mask_a, dtype=torch.bool), 1]
else:
neg_feats += [torch.tensor(input_id_a, dtype=torch.int),
torch.tensor(attention_mask_a, dtype=torch.bool), 0]
ret = [pos_feats, neg_feats]
else:
raise Exception(
"Line doesn't have correct length: {0}. Expected 3.".format(str(len(cells))))
return ret
|
ANCE/data/process_fn.py/0
|
{
"file_path": "ANCE/data/process_fn.py",
"repo_id": "ANCE",
"token_count": 2423
}
| 24 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Optional, Dict, Tuple, Any, NamedTuple, List
import torch
import torch.nn as nn
from .modules import (
FairseqDropout,
LayerDropModuleList,
LayerNorm,
MultiheadAttention,
PositionalEmbedding,
TransformerSentenceEncoderLayer,
#with_incremental_state,
AdaptiveSoftmax,
SinusoidalPositionalEmbedding,
TransformerDecoderLayer,
softmax,
log_softmax,
)
from torch import Tensor
from .modules import quant_noise as apply_quant_noise_
# from transformers.utils import logging
# logger = logging.get_logger(__name__)
import logging
logger = logging.getLogger(__name__)
EncoderOut = NamedTuple(
"EncoderOut",
[
("encoder_out", Tensor), # T x B x C
("encoder_padding_mask", Optional[Tensor]), # B x T
("encoder_embedding", Optional[Tensor]), # B x T x C
("encoder_states", Optional[List[Tensor]]), # List[T x B x C]
("src_tokens", Optional[Tensor]), # B x T
("src_lengths", Optional[Tensor]), # B x 1
],
)
class FairseqDecoder(nn.Module):
"""Base class for decoders."""
def __init__(self):
super().__init__()
self.onnx_trace = False
def forward(self, prev_output_tokens, encoder_out=None, **kwargs):
"""
Args:
prev_output_tokens (LongTensor): shifted output tokens of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (dict, optional): output from the encoder, used for
encoder-side attention
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
x = self.output_layer(x)
return x, extra
def extract_features(self, prev_output_tokens, encoder_out=None, **kwargs):
"""
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
raise NotImplementedError
def output_layer(self, features, **kwargs):
"""
Project features to the default output size, e.g., vocabulary size.
Args:
features (Tensor): features returned by *extract_features*.
"""
raise NotImplementedError
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
if hasattr(self, "adaptive_softmax") and self.adaptive_softmax is not None:
if sample is not None:
assert "target" in sample
target = sample["target"]
else:
target = None
out = self.adaptive_softmax.get_log_prob(net_output[0], target=target)
return out.exp_() if not log_probs else out
logits = net_output[0]
if log_probs:
return log_softmax(logits, dim=-1, onnx_trace=self.onnx_trace)
else:
return softmax(logits, dim=-1, onnx_trace=self.onnx_trace)
def max_positions(self):
"""Maximum input length supported by the decoder."""
return 1e6 # an arbitrary large number
def upgrade_state_dict(self, state_dict):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
def prepare_for_onnx_export_(self):
self.onnx_trace = True
#@with_incremental_state
class FairseqIncrementalDecoder(FairseqDecoder):
"""Base class for incremental decoders.
Incremental decoding is a special mode at inference time where the Model
only receives a single timestep of input corresponding to the previous
output token (for teacher forcing) and must produce the next output
*incrementally*. Thus the model must cache any long-term state that is
needed about the sequence, e.g., hidden states, convolutional states, etc.
Compared to the standard :class:`FairseqDecoder` interface, the incremental
decoder interface allows :func:`forward` functions to take an extra keyword
argument (*incremental_state*) that can be used to cache state across
time-steps.
The :class:`FairseqIncrementalDecoder` interface also defines the
:func:`reorder_incremental_state` method, which is used during beam search
to select and reorder the incremental state based on the selection of beams.
To learn more about how incremental decoding works, refer to `this blog
<http://www.telesens.co/2019/04/21/understanding-incremental-decoding-in-fairseq/>`_.
"""
def __init__(self ):
super().__init__()
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs):
"""
Args:
prev_output_tokens (LongTensor): shifted output tokens of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (dict, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict, optional): dictionary used for storing
state during :ref:`Incremental decoding`
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
raise NotImplementedError
def extract_features(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs):
"""
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
raise NotImplementedError
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Reorder incremental state.
This will be called when the order of the input has changed from the
previous time step. A typical use case is beam search, where the input
order changes between time steps based on the selection of beams.
"""
pass
def reorder_incremental_state_scripting(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Main entry point for reordering the incremental state.
Due to limitations in TorchScript, we call this function in
:class:`fairseq.sequence_generator.SequenceGenerator` instead of
calling :func:`reorder_incremental_state` directly.
"""
for module in self.modules():
if hasattr(module, 'reorder_incremental_state'):
result = module.reorder_incremental_state(incremental_state, new_order)
if result is not None:
incremental_state = result
def set_beam_size(self, beam_size):
"""Sets the beam size in the decoder and all children."""
if getattr(self, '_beam_size', -1) != beam_size:
seen = set()
def apply_set_beam_size(module):
if module != self and hasattr(module, 'set_beam_size') \
and module not in seen:
seen.add(module)
module.set_beam_size(beam_size)
self.apply(apply_set_beam_size)
self._beam_size = beam_size
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float("-inf")).type_as(t)
def item(tensor):
if hasattr(tensor, "item"):
return tensor.item()
if hasattr(tensor, "__getitem__"):
return tensor[0]
return tensor
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, embed_tokens, no_encoder_attn=False):
self.args = args
super().__init__()
self.register_buffer("version", torch.Tensor([3]))
self._future_mask = torch.empty(0)
self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__)
self.decoder_layerdrop = args.decoder_layerdrop
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.embed_dim = embed_dim
self.output_embed_dim = args.decoder_output_dim
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
if not args.adaptive_input and args.quant_noise_pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
self.quant_noise = None
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
args.max_target_positions,
embed_dim,
self.padding_idx,
learned=args.decoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if getattr(args, "layernorm_embedding", False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
self.cross_self_attention = getattr(args, "cross_self_attention", False)
if self.decoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.decoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[
self.build_decoder_layer(args, no_encoder_attn)
for _ in range(args.decoder_layers)
]
)
self.num_layers = len(self.layers)
if args.decoder_normalize_before and not getattr(
args, "no_decoder_final_norm", False
):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
self.project_out_dim = (
Linear(embed_dim, self.output_embed_dim, bias=False)
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights
else None
)
self.adaptive_softmax = None
self.output_projection = None
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
args.vocab_size,
self.output_embed_dim,
options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif self.share_input_output_embed:
self.output_projection = nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
self.output_projection.weight = self.embed_tokens.weight
else:
self.output_projection = nn.Linear(
self.output_embed_dim, args.vocab_size, bias=False
)
nn.init.normal_(
self.output_projection.weight, mean=0, std=self.output_embed_dim ** -0.5
)
def build_decoder_layer(self, args, no_encoder_attn=False):
return TransformerDecoderLayer(args, no_encoder_attn)
def forward(
self,
prev_output_tokens,
encoder_out: Optional[EncoderOut] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
local_attn_mask=None,
origin=None,
keep_residual=0,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
local_attn_mask=local_attn_mask,
keep_residual=keep_residual,
)
x_orign=x
if not features_only:
x = self.output_layer(x)
if origin is not None:
return x, x_orign, extra
else:
return x,extra
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[EncoderOut] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
local_attn_mask=None,
keep_residual=0,
):
return self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
local_attn_mask,
keep_residual,
)
"""
A scriptable subclass of this class has an extract_features method and calls
super().extract_features, but super() is not supported in torchscript. Aa copy of
this function is made to be used in the subclass instead.
"""
def extract_features_scriptable(
self,
prev_output_tokens,
encoder_out: Optional[EncoderOut] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
local_attn_mask=None,
keep_residual=0,
):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
if alignment_layer is None:
alignment_layer = self.num_layers - 1
# embed positions
positions = (
self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if self.embed_positions is not None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
# print('???',prev_output_tokens)
# assert 1==0
if prev_output_tokens.eq(-1).any():
assert len(prev_output_tokens.shape)==2
prev_output_tokens_t=torch.ones((prev_output_tokens.shape[0],prev_output_tokens.shape[1])).long().cuda()
prev_output_tokens_t[:,0]=0
# print('???',prev_output_tokens_t)
# assert 1==0
x=self.embed_scale * self.embed_tokens(prev_output_tokens_t)
else:
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
# print('????',positions)
# assert 1==0
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
self_attn_padding_mask: Optional[Tensor] = None
if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# print('???',self_attn_padding_mask)
# assert 1==0
# decoder layers
attn: Optional[Tensor] = None
inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x,local_attn_mask)
else:
self_attn_mask = None
# if local_attn_mask is not None:
# self_attn_mask+=local_attn_mask
#print('???',x.shape,encoder_out.encoder_out.shape)
# print('???self_attn_mask',self_attn_mask)
# assert 1==0
x, layer_attn, _ = layer(
x,
encoder_out.encoder_out if encoder_out is not None else None,
encoder_out.encoder_padding_mask if encoder_out is not None else None,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
keep_residual=keep_residual,
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": [attn], "inner_states": inner_states}
def output_layer(self, features):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
return self.output_projection(features)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor,local_attn_mask=None):
dim = tensor.size(0)
# self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
if (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(0) < dim
):
if local_attn_mask is None:
self._future_mask = torch.triu(
fill_with_neg_inf(torch.zeros([dim, dim])), 1
)
else:
self._future_mask = torch.triu(
fill_with_neg_inf(torch.zeros([dim, dim])), 1
)+torch.triu(
fill_with_neg_inf(torch.zeros([dim, dim])), local_attn_mask
).transpose(0,1)
self._future_mask[:,0]=0
# self._future_mask =utils.fill_with_neg_inf(torch.zeros([dim, dim]))
# for i in range(dim):
# start=i-int(local_attn_mask)+1
# if start<0:
# start=0
# self._future_mask[i][start:i+1]=0
#self._future_mask.scatter_(1,torch.LongTensor(),)
# +torch.triu(
# utils.fill_with_neg_inf(torch.zeros([dim, dim])), local_attn_mask
# ).transpose(0,1)
self._future_mask = self._future_mask.to(tensor)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
if f"{name}.output_projection.weight" not in state_dict:
if self.share_input_output_embed:
embed_out_key = f"{name}.embed_tokens.weight"
else:
embed_out_key = f"{name}.embed_out"
if embed_out_key in state_dict:
state_dict[f"{name}.output_projection.weight"] = state_dict[
embed_out_key
]
if not self.share_input_output_embed:
del state_dict[embed_out_key]
for i in range(self.num_layers):
# update layer norms
layer_norm_map = {
"0": "self_attn_layer_norm",
"1": "encoder_attn_layer_norm",
"2": "final_layer_norm",
}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m)
if k in state_dict:
state_dict[
"{}.layers.{}.{}.{}".format(name, i, new, m)
] = state_dict[k]
del state_dict[k]
version_key = "{}.version".format(name)
if item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
def init_bert_params(module):
"""
Initialize the weights specific to the BERT Model.
This overrides the default initializations depending on the specified arguments.
1. If normal_init_linear_weights is set then weights of linear
layer will be initialized using the normal distribution and
bais will be set to the specified value.
2. If normal_init_embed_weights is set then weights of embedding
layer will be initialized using the normal distribution.
3. If normal_init_proj_weights is set then weights of
in_project_weight for MultiHeadAttention initialized using
the normal distribution (to be validated).
"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=0.02)
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=0.02)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, MultiheadAttention):
module.q_proj.weight.data.normal_(mean=0.0, std=0.02)
module.k_proj.weight.data.normal_(mean=0.0, std=0.02)
module.v_proj.weight.data.normal_(mean=0.0, std=0.02)
class TransformerSentenceEncoder(nn.Module):
"""
Implementation for a Bi-directional Transformer based Sentence Encoder used
in BERT/XLM style pre-trained models.
This first computes the token embedding using the token embedding matrix,
position embeddings (if specified) and segment embeddings
(if specified). After applying the specified number of
TransformerEncoderLayers, it outputs all the internal states of the
encoder as well as the final representation associated with the first
token (usually CLS token).
Input:
- tokens: B x T matrix representing sentences
- segment_labels: B x T matrix representing segment label for tokens
Output:
- a tuple of the following:
- a list of internal model states used to compute the
predictions where each tensor has shape T x B x C
- sentence representation associated with first input token
in format B x C.
"""
def __init__(
self,
padding_idx: int,
vocab_size: int,
num_encoder_layers: int = 6,
embedding_dim: int = 768,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
layerdrop: float = 0.0,
max_seq_len: int = 256,
num_segments: int = 2,
use_position_embeddings: bool = True,
offset_positions_by_padding: bool = True,
encoder_normalize_before: bool = False,
apply_bert_init: bool = False,
activation_fn: str = "relu",
learned_pos_embedding: bool = True,
embed_scale: float = None,
freeze_embeddings: bool = False,
n_trans_layers_to_freeze: int = 0,
export: bool = False,
traceable: bool = False,
q_noise: float = 0.0,
qn_block_size: int = 8,
) -> None:
super().__init__()
self.padding_idx = padding_idx
self.vocab_size = vocab_size
self.dropout_module = FairseqDropout(dropout, module_name=self.__class__.__name__)
self.layerdrop = layerdrop
self.max_seq_len = max_seq_len
self.embedding_dim = embedding_dim
self.num_segments = num_segments
self.use_position_embeddings = use_position_embeddings
self.apply_bert_init = apply_bert_init
self.learned_pos_embedding = learned_pos_embedding
self.traceable = traceable
self.tpu = False # whether we're on TPU
self.embed_tokens = self.build_embedding(
self.vocab_size, self.embedding_dim, self.padding_idx
)
self.embed_scale = embed_scale
if q_noise > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(self.embedding_dim, self.embedding_dim, bias=False),
q_noise,
qn_block_size,
)
else:
self.quant_noise = None
self.segment_embeddings = (
nn.Embedding(self.num_segments, self.embedding_dim, padding_idx=None)
if self.num_segments > 0
else None
)
self.embed_positions = (
PositionalEmbedding(
self.max_seq_len,
self.embedding_dim,
padding_idx=(self.padding_idx if offset_positions_by_padding else None),
learned=self.learned_pos_embedding,
)
if self.use_position_embeddings
else None
)
if self.layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend([
self.build_transformer_sentence_encoder_layer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=ffn_embedding_dim,
num_attention_heads=num_attention_heads,
dropout=self.dropout_module.p,
attention_dropout=attention_dropout,
activation_dropout=activation_dropout,
activation_fn=activation_fn,
export=export,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
for _ in range(num_encoder_layers)
])
if encoder_normalize_before:
self.emb_layer_norm = LayerNorm(self.embedding_dim, export=export)
else:
self.emb_layer_norm = None
# Apply initialization of model params after building the model
if self.apply_bert_init:
self.apply(init_bert_params)
def freeze_module_params(m):
if m is not None:
for p in m.parameters():
p.requires_grad = False
if freeze_embeddings:
freeze_module_params(self.embed_tokens)
freeze_module_params(self.segment_embeddings)
freeze_module_params(self.embed_positions)
freeze_module_params(self.emb_layer_norm)
for layer in range(n_trans_layers_to_freeze):
freeze_module_params(self.layers[layer])
def build_embedding(self, vocab_size, embedding_dim, padding_idx):
return nn.Embedding(vocab_size, embedding_dim, padding_idx)
def build_transformer_sentence_encoder_layer(
self,
embedding_dim,
ffn_embedding_dim,
num_attention_heads,
dropout,
attention_dropout,
activation_dropout,
activation_fn,
export,
q_noise,
qn_block_size,
):
return TransformerSentenceEncoderLayer(
embedding_dim=embedding_dim,
ffn_embedding_dim=ffn_embedding_dim,
num_attention_heads=num_attention_heads,
dropout=dropout,
attention_dropout=attention_dropout,
activation_dropout=activation_dropout,
activation_fn=activation_fn,
export=export,
q_noise=q_noise,
qn_block_size=qn_block_size,
)
def prepare_for_tpu_(self, **kwargs):
self.tpu = True
def forward(
self,
tokens: torch.Tensor,
segment_labels: torch.Tensor = None,
last_state_only: bool = False,
positions: Optional[torch.Tensor] = None,
attn_mask: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
# compute padding mask. This is needed for multi-head attention
padding_mask = tokens.eq(self.padding_idx)
if not self.traceable and not self.tpu and not padding_mask.any():
padding_mask = None
x = self.embed_tokens(tokens)
if self.embed_scale is not None:
x *= self.embed_scale
if self.embed_positions is not None:
x += self.embed_positions(tokens, positions=positions)
if self.segment_embeddings is not None and segment_labels is not None:
x += self.segment_embeddings(segment_labels)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.emb_layer_norm is not None:
x = self.emb_layer_norm(x)
x = self.dropout_module(x)
# account for padding while computing the representation
if padding_mask is not None:
x *= 1 - padding_mask.unsqueeze(-1).type_as(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
inner_states = []
if not last_state_only:
inner_states.append(x)
for layer in self.layers:
x, _ = layer(x, self_attn_padding_mask=padding_mask,self_attn_mask=attn_mask)
if not last_state_only:
inner_states.append(x)
sentence_rep = x[0, :, :]
if last_state_only:
inner_states = [x]
if self.traceable:
return torch.stack(inner_states), sentence_rep
else:
return inner_states, sentence_rep
|
ANCE/model/SEED_Encoder/transformer_sentence_encoder.py/0
|
{
"file_path": "ANCE/model/SEED_Encoder/transformer_sentence_encoder.py",
"repo_id": "ANCE",
"token_count": 15925
}
| 25 |
from .preprocessed_dataset import PreprocessedDataset
|
ASTRA/astra/dataset/__init__.py/0
|
{
"file_path": "ASTRA/astra/dataset/__init__.py",
"repo_id": "ASTRA",
"token_count": 16
}
| 26 |
#!/bin/bash
echo "creating e2e datasets..."
path=data/e2e
echo "train..."
python src/format_converting_e2e.py $path/train.txt $path/train_formatted.jsonl
python src/gpt2_encode.py --vocab vocab --input $path/train_formatted.jsonl --output $path/train.jsonl --add_bos --add_eos
echo "test..."
python src/format_converting_e2e.py $path/test.txt $path/test_formatted.jsonl
python src/gpt2_encode.py --vocab vocab --input $path/test_formatted.jsonl --output $path/test.jsonl --add_bos --add_eos
echo "valid..."
python src/format_converting_e2e.py $path/valid.txt $path/valid_formatted.jsonl
python src/gpt2_encode.py --vocab vocab --input $path/valid_formatted.jsonl --output $path/valid.jsonl --add_bos --add_eos
echo "creating webnlg datasets..."
path=data/webnlg_challenge_2017
echo "train..."
python src/format_converting_webnlg.py $path/train.json $path/train_formatted.jsonl
python src/gpt2_encode.py --vocab vocab --input $path/train_formatted.jsonl --output $path/train.jsonl --add_bos --add_eos
echo "test..."
python src/format_converting_webnlg.py $path/test.json $path/test_formatted.jsonl
python src/gpt2_encode.py --vocab vocab --input $path/test_formatted.jsonl --output $path/test.jsonl --add_bos --add_eos
echo "valid..."
python src/format_converting_webnlg.py $path/dev.json $path/valid_formatted.jsonl
python src/gpt2_encode.py --vocab vocab --input $path/valid_formatted.jsonl --output $path/valid.jsonl --add_bos --add_eos
echo "creating dart datasets..."
path=data/dart
echo "train..."
python src/format_converting_dart.py data/dart/dart-v1.1.1-full-train.json data/dart/train_formatted.jsonl
python src/gpt2_encode.py --vocab vocab --input $path/train_formatted.jsonl --output $path/train.jsonl --add_bos --add_eos
echo "test..."
python src/format_converting_dart.py data/dart/dart-v1.1.1-full-test.json data/dart/test_formatted.jsonl
python src/gpt2_encode.py --vocab vocab --input $path/test_formatted.jsonl --output $path/test.jsonl --add_bos --add_eos
echo "valid..."
python src/format_converting_dart.py data/dart/dart-v1.1.1-full-dev.json data/dart/valid_formatted.jsonl
python src/gpt2_encode.py --vocab vocab --input $path/valid_formatted.jsonl --output $path/valid.jsonl --add_bos --add_eos
echo "script complete!"
|
AdaMix/NLG/create_datasets.sh/0
|
{
"file_path": "AdaMix/NLG/create_datasets.sh",
"repo_id": "AdaMix",
"token_count": 864
}
| 27 |
# ------------------------------------------------------------------------------------------
# Copyright (c). All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
import math
from typing import Optional, List
class LoRALayer():
def __init__(
self,
r: int,
lora_alpha: int,
lora_dropout: float,
merge_weights: bool,
):
self.r = r
self.lora_alpha = lora_alpha
# Optional dropout
if lora_dropout > 0.:
self.lora_dropout = nn.Dropout(p=lora_dropout)
else:
self.lora_dropout = lambda x: x
# Mark the weight as unmerged
self.merged = False
self.merge_weights = merge_weights
class Embedding(nn.Embedding, LoRALayer):
# LoRA implemented in a dense layer
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
r: int = 0,
lora_alpha: int = 1,
merge_weights: bool = True,
**kwargs
):
nn.Embedding.__init__(self, num_embeddings, embedding_dim, **kwargs)
LoRALayer.__init__(self, r=r, lora_alpha=lora_alpha, lora_dropout=0,
merge_weights=merge_weights)
# Actual trainable parameters
if r > 0:
self.lora_A = nn.Parameter(self.weight.new_zeros((r, num_embeddings)))
self.lora_B = nn.Parameter(self.weight.new_zeros((embedding_dim, r)))
self.scaling = self.lora_alpha / self.r
# Freezing the pre-trained weight matrix
self.weight.requires_grad = False
self.reset_parameters()
def reset_parameters(self):
nn.Embedding.reset_parameters(self)
if hasattr(self, 'lora_A'):
# initialize A the same way as the default for nn.Linear and B to zero
nn.init.zeros_(self.lora_A)
nn.init.normal_(self.lora_B)
def train(self, mode: bool = True):
nn.Embedding.train(self, mode)
if self.merge_weights and self.merged:
# Make sure that the weights are not merged
if self.r > 0:
self.weight.data -= (self.lora_B @ self.lora_A).T * self.scaling
self.merged = False
def eval(self):
nn.Linear.eval(self)
if self.merge_weights and not self.merged:
# Merge the weights and mark it
if self.r > 0:
self.weight.data += (self.lora_B @ self.lora_A) * self.scaling
self.merged = True
def forward(self, x: torch.Tensor):
if self.r > 0 and not self.merged:
result = nn.Embedding.forward(self, x)
if self.r > 0:
after_A = F.embedding(
x, self.lora_A.T, self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.sparse
)
result += (after_A @ self.lora_B.T) * self.scaling
return result
else:
return nn.Embedding.forward(self, x)
class Linear(nn.Linear, LoRALayer):
# LoRA implemented in a dense layer
def __init__(
self,
in_features: int,
out_features: int,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.,
fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out)
merge_weights: bool = True,
**kwargs
):
nn.Linear.__init__(self, in_features, out_features, **kwargs)
LoRALayer.__init__(self, r=r, lora_alpha=lora_alpha, lora_dropout=lora_dropout,
merge_weights=merge_weights)
self.fan_in_fan_out = fan_in_fan_out
# Actual trainable parameters
if r > 0:
self.lora_A = nn.Parameter(self.weight.new_zeros((r, in_features)))
self.lora_B = nn.Parameter(self.weight.new_zeros((out_features, r)))
self.scaling = self.lora_alpha / self.r
# Freezing the pre-trained weight matrix
self.weight.requires_grad = False
self.reset_parameters()
if fan_in_fan_out:
self.weight.data = self.weight.data.T
def reset_parameters(self):
nn.Linear.reset_parameters(self)
if hasattr(self, 'lora_A'):
# initialize A the same way as the default for nn.Linear and B to zero
nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))
nn.init.zeros_(self.lora_B)
def train(self, mode: bool = True):
def T(w):
return w.T if self.fan_in_fan_out else w
nn.Linear.train(self, mode)
if self.merge_weights and self.merged:
# Make sure that the weights are not merged
if self.r > 0:
self.weight.data -= T(self.lora_B @ self.lora_A) * self.scaling
self.merged = False
def eval(self):
def T(w):
return w.T if self.fan_in_fan_out else w
nn.Linear.eval(self)
if self.merge_weights and not self.merged:
# Merge the weights and mark it
if self.r > 0:
self.weight.data += T(self.lora_B @ self.lora_A) * self.scaling
self.merged = True
def forward(self, x: torch.Tensor):
def T(w):
return w.T if self.fan_in_fan_out else w
if self.r > 0 and not self.merged:
result = F.linear(x, T(self.weight), bias=self.bias)
if self.r > 0:
result += (self.lora_dropout(x) @ self.lora_A.T @ self.lora_B.T) * self.scaling
return result
else:
return F.linear(x, T(self.weight), bias=self.bias)
class MergedLinear(nn.Linear, LoRALayer):
# LoRA implemented in a dense layer
def __init__(
self,
in_features: int,
out_features: int,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.,
enable_lora: List[bool] = [False],
fan_in_fan_out: bool = False,
merge_weights: bool = True,
**kwargs
):
nn.Linear.__init__(self, in_features, out_features, **kwargs)
LoRALayer.__init__(self, r=r, lora_alpha=lora_alpha, lora_dropout=lora_dropout,
merge_weights=merge_weights)
assert out_features % len(enable_lora) == 0, \
'The length of enable_lora must divide out_features'
self.enable_lora = enable_lora
self.fan_in_fan_out = fan_in_fan_out
# Actual trainable parameters
if r > 0 and any(enable_lora):
self.lora_A = nn.Parameter(
self.weight.new_zeros((r * sum(enable_lora), in_features)))
self.lora_B = nn.Parameter(
self.weight.new_zeros((out_features // len(enable_lora) * sum(enable_lora), r))
) # weights for Conv1D with groups=sum(enable_lora)
self.scaling = self.lora_alpha / self.r
# Freezing the pre-trained weight matrix
self.weight.requires_grad = False
# Compute the indices
self.lora_ind = self.weight.new_zeros(
(out_features, ), dtype=torch.bool
).view(len(enable_lora), -1)
self.lora_ind[enable_lora, :] = True
self.lora_ind = self.lora_ind.view(-1)
self.reset_parameters()
if fan_in_fan_out:
self.weight.data = self.weight.data.T
def reset_parameters(self):
nn.Linear.reset_parameters(self)
if hasattr(self, 'lora_A'):
# initialize A the same way as the default for nn.Linear and B to zero
nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))
nn.init.zeros_(self.lora_B)
def zero_pad(self, x):
result = x.new_zeros((*x.shape[:-1], self.out_features))
result = result.view(-1, self.out_features)
result[:, self.lora_ind] = x.reshape(
-1, self.out_features // len(self.enable_lora) * sum(self.enable_lora)
)
return result.view((*x.shape[:-1], self.out_features))
def train(self, mode: bool = True):
def T(w):
return w.T if self.fan_in_fan_out else w
nn.Linear.train(self, mode)
if self.merge_weights and self.merged:
# Make sure that the weights are not merged
if self.r > 0 and any(self.enable_lora):
delta_w = F.conv1d(
self.lora_A.data.unsqueeze(0),
self.lora_B.data.unsqueeze(-1),
groups=sum(self.enable_lora)
).squeeze(0)
self.weight.data -= self.zero_pad(T(delta_w * self.scaling))
self.merged = False
def eval(self):
def T(w):
return w.T if self.fan_in_fan_out else w
nn.Linear.eval(self)
if self.merge_weights and not self.merged:
# Merge the weights and mark it
if self.r > 0 and any(self.enable_lora):
delta_w = F.conv1d(
self.lora_A.data.unsqueeze(0),
self.lora_B.data.unsqueeze(-1),
groups=sum(self.enable_lora)
).squeeze(0)
self.weight.data += self.zero_pad(T(delta_w * self.scaling))
self.merged = True
def forward(self, x: torch.Tensor):
def T(w):
return w.T if self.fan_in_fan_out else w
if self.merged:
return F.linear(x, T(self.weight), bias=self.bias)
else:
result = F.linear(x, T(self.weight), bias=self.bias)
if self.r > 0:
after_A = F.linear(self.lora_dropout(x), self.lora_A)
after_B = F.conv1d(
after_A.transpose(-2, -1),
self.lora_B.unsqueeze(-1),
groups=sum(self.enable_lora)
).transpose(-2, -1)
result += self.zero_pad(after_B) * self.scaling
return result
class AdamixMergedLinear(MergedLinear):
def __init__(
self,
num_experts=1,
share_A=0,
share_B=0,
**kwargs
):
super().__init__(**kwargs)
if share_A == 1:
self.experts_lora_A = torch.nn.ParameterList([copy.deepcopy(self.lora_A) for i in range(1)])
else:
self.experts_lora_A = torch.nn.ParameterList([copy.deepcopy(self.lora_A) for i in range(num_experts)])
if share_B == 1:
self.experts_lora_B = torch.nn.ParameterList([copy.deepcopy(self.lora_B) for i in range(1)])
else:
self.experts_lora_B = torch.nn.ParameterList([copy.deepcopy(self.lora_B) for i in range(num_experts)])
self.share_A = share_A
self.share_B = share_B
# Remove original lora parameters
self.lora_A = None
self.lora_B = None
self.num_experts = num_experts
self.lora_expert_score_weight = torch.nn.Parameter(torch.zeros(self.num_experts), requires_grad=False)
self.lora_A_w = None
self.lora_B_w = None
def forward(self, x: torch.Tensor):
def T(w):
return w.T if self.fan_in_fan_out else w
if self.merged:
return F.linear(x, T(self.weight), bias=self.bias)
else:
result = F.linear(x, T(self.weight), bias=self.bias)
if self.r > 0:
if self.training and not self.lora_expert_score_weight.requires_grad:
if self.share_A == 1:
after_A = F.linear(self.lora_dropout(x), self.experts_lora_A[0])
else:
expert_idx = torch.randint(low=0, high=self.num_experts, size=(1,)).item() # selected expert
after_A = F.linear(self.lora_dropout(x), self.experts_lora_A[expert_idx])
if self.share_B == 1:
after_B = F.conv1d(
after_A.transpose(-2, -1),
self.experts_lora_B[0].unsqueeze(-1),
groups=sum(self.enable_lora)
).transpose(-2, -1)
else:
expert_idx = torch.randint(low=0, high=self.num_experts, size=(1,)).item() # selected expert
after_B = F.conv1d(
after_A.transpose(-2, -1),
self.experts_lora_B[expert_idx].unsqueeze(-1),
groups=sum(self.enable_lora)
).transpose(-2, -1)
else:
expert_weights = F.softmax(self.lora_expert_score_weight, dim=-1)
if not self.training:
if self.lora_A_w is None and self.lora_B_w is None:
self.lora_A_w = 0.
self.lora_B_w = 0.
if self.share_A == 1:
self.lora_A_w = self.experts_lora_A[0]
else:
for idx in range(self.num_experts):
self.lora_A_w += expert_weights[idx] * self.experts_lora_A[idx]
if self.share_B == 1:
self.lora_B_w = self.experts_lora_B[0]
else:
for idx in range(self.num_experts):
self.lora_B_w += expert_weights[idx] * self.experts_lora_B[idx]
lora_A_w = self.lora_A_w
lora_B_w = self.lora_B_w
else:
lora_A_w = 0.
lora_B_w = 0.
if self.share_A == 1:
lora_A_w = self.experts_lora_A[0]
else:
for idx in range(self.num_experts):
lora_A_w += expert_weights[idx] * self.experts_lora_A[idx]
if self.share_B == 1:
lora_B_w = self.experts_lora_B[0]
else:
for idx in range(self.num_experts):
lora_B_w += expert_weights[idx] * self.experts_lora_B[idx]
after_A = F.linear(self.lora_dropout(x), lora_A_w)
after_B = F.conv1d(
after_A.transpose(-2, -1),
lora_B_w.unsqueeze(-1),
groups=sum(self.enable_lora)
).transpose(-2, -1)
result += self.zero_pad(after_B) * self.scaling
return result
class Conv2d(nn.Conv2d, LoRALayer):
# LoRA implemented in a dense layer
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.,
merge_weights: bool = True,
**kwargs
):
nn.Conv2d.__init__(self, in_channels, out_channels, kernel_size, **kwargs)
LoRALayer.__init__(self, r=r, lora_alpha=lora_alpha, lora_dropout=lora_dropout,
merge_weights=merge_weights)
assert type(kernel_size) is int
# Actual trainable parameters
if r > 0:
self.lora_A = nn.Parameter(
self.weight.new_zeros((r*kernel_size, in_channels*kernel_size))
)
self.lora_B = nn.Parameter(
self.weight.new_zeros((out_channels*kernel_size, r*kernel_size))
)
self.scaling = self.lora_alpha / self.r
# Freezing the pre-trained weight matrix
self.weight.requires_grad = False
self.reset_parameters()
def reset_parameters(self):
nn.Conv2d.reset_parameters(self)
if hasattr(self, 'lora_A'):
# initialize A the same way as the default for nn.Linear and B to zero
nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))
nn.init.zeros_(self.lora_B)
def train(self, mode: bool = True):
nn.Conv2d.train(self, mode)
if self.merge_weights and self.merged:
# Make sure that the weights are not merged
self.weight.data -= (self.lora_B @ self.lora_A).view(self.weight.shape) * self.scaling
self.merged = False
def eval(self):
nn.Conv2d.eval(self)
if self.merge_weights and not self.merged:
# Merge the weights and mark it
self.weight.data += (self.lora_B @ self.lora_A).view(self.weight.shape) * self.scaling
self.merged = True
def forward(self, x: torch.Tensor):
if self.r > 0 and not self.merged:
return F.conv2d(
x,
self.weight + (self.lora_B @ self.lora_A).view(self.weight.shape) * self.scaling,
self.bias, self.stride, self.padding, self.dilation, self.groups
)
return nn.Conv2d.forward(self, x)
|
AdaMix/NLG/loralib/layers.py/0
|
{
"file_path": "AdaMix/NLG/loralib/layers.py",
"repo_id": "AdaMix",
"token_count": 9565
}
| 28 |
sudo apt-get update
sudo apt-get -y install jq virtualenv
virtualenv -p `which python3` ./venv
. ./venv/bin/activate
pip install -r requirement.txt
source download_pretrained_checkpoints.sh
source create_datasets.sh
cd ./eval
source download_evalscript.sh
cd ..
|
AdaMix/NLG/setup.sh/0
|
{
"file_path": "AdaMix/NLG/setup.sh",
"repo_id": "AdaMix",
"token_count": 85
}
| 29 |
apiVersion: v1
kind: PersistentVolume
metadata:
name: huggingface-cluster-disk
spec:
storageClassName: ""
capacity:
storage: 500Gi
accessModes:
- ReadOnlyMany
claimRef:
namespace: default
name: huggingface-cluster-disk-claim
gcePersistentDisk:
pdName: huggingface-cluster-disk
fsType: ext4
readOnly: true
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: huggingface-cluster-disk-claim
spec:
# Specify "" as the storageClassName so it matches the PersistentVolume's StorageClass.
# A nil storageClassName value uses the default StorageClass. For details, see
# https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1
storageClassName: ""
accessModes:
- ReadOnlyMany
resources:
requests:
storage: 1Ki
|
AdaMix/docker/transformers-pytorch-tpu/dataset.yaml/0
|
{
"file_path": "AdaMix/docker/transformers-pytorch-tpu/dataset.yaml",
"repo_id": "AdaMix",
"token_count": 274
}
| 30 |
..
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
BERTology
-----------------------------------------------------------------------------------------------------------------------
There is a growing field of study concerned with investigating the inner working of large-scale transformers like BERT
(that some call "BERTology"). Some good examples of this field are:
* BERT Rediscovers the Classical NLP Pipeline by Ian Tenney, Dipanjan Das, Ellie Pavlick:
https://arxiv.org/abs/1905.05950
* Are Sixteen Heads Really Better than One? by Paul Michel, Omer Levy, Graham Neubig: https://arxiv.org/abs/1905.10650
* What Does BERT Look At? An Analysis of BERT's Attention by Kevin Clark, Urvashi Khandelwal, Omer Levy, Christopher D.
Manning: https://arxiv.org/abs/1906.04341
In order to help this new field develop, we have included a few additional features in the BERT/GPT/GPT-2 models to
help people access the inner representations, mainly adapted from the great work of Paul Michel
(https://arxiv.org/abs/1905.10650):
* accessing all the hidden-states of BERT/GPT/GPT-2,
* accessing all the attention weights for each head of BERT/GPT/GPT-2,
* retrieving heads output values and gradients to be able to compute head importance score and prune head as explained
in https://arxiv.org/abs/1905.10650.
To help you understand and use these features, we have added a specific example script: :prefix_link:`bertology.py
<examples/research_projects/bertology/run_bertology.py>` while extract information and prune a model pre-trained on
GLUE.
|
AdaMix/docs/source/bertology.rst/0
|
{
"file_path": "AdaMix/docs/source/bertology.rst",
"repo_id": "AdaMix",
"token_count": 568
}
| 31 |
..
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
Models
-----------------------------------------------------------------------------------------------------------------------
The base classes :class:`~transformers.PreTrainedModel`, :class:`~transformers.TFPreTrainedModel`, and
:class:`~transformers.FlaxPreTrainedModel` implement the common methods for loading/saving a model either from a local
file or directory, or from a pretrained model configuration provided by the library (downloaded from HuggingFace's AWS
S3 repository).
:class:`~transformers.PreTrainedModel` and :class:`~transformers.TFPreTrainedModel` also implement a few methods which
are common among all the models to:
- resize the input token embeddings when new tokens are added to the vocabulary
- prune the attention heads of the model.
The other methods that are common to each model are defined in :class:`~transformers.modeling_utils.ModuleUtilsMixin`
(for the PyTorch models) and :class:`~transformers.modeling_tf_utils.TFModuleUtilsMixin` (for the TensorFlow models) or
for text generation, :class:`~transformers.generation_utils.GenerationMixin` (for the PyTorch models) and
:class:`~transformers.generation_tf_utils.TFGenerationMixin` (for the TensorFlow models)
PreTrainedModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.PreTrainedModel
:members:
ModuleUtilsMixin
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.modeling_utils.ModuleUtilsMixin
:members:
TFPreTrainedModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFPreTrainedModel
:members:
TFModelUtilsMixin
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.modeling_tf_utils.TFModelUtilsMixin
:members:
FlaxPreTrainedModel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.FlaxPreTrainedModel
:members:
Generation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.generation_utils.GenerationMixin
:members:
.. autoclass:: transformers.generation_tf_utils.TFGenerationMixin
:members:
|
AdaMix/docs/source/main_classes/model.rst/0
|
{
"file_path": "AdaMix/docs/source/main_classes/model.rst",
"repo_id": "AdaMix",
"token_count": 757
}
| 32 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.